hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
64b5fd679988d6740cfa11831e571513bb2efecd
| 21,927
|
py
|
Python
|
instancehandler/views.py
|
TCSOSM-20/LW-UI
|
70c3331278f71d3b22fc3a090d526b4b8106d155
|
[
"Apache-2.0"
] | null | null | null |
instancehandler/views.py
|
TCSOSM-20/LW-UI
|
70c3331278f71d3b22fc3a090d526b4b8106d155
|
[
"Apache-2.0"
] | null | null | null |
instancehandler/views.py
|
TCSOSM-20/LW-UI
|
70c3331278f71d3b22fc3a090d526b4b8106d155
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018 CNIT - Consorzio Nazionale Interuniversitario per le Telecomunicazioni
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.shortcuts import render, redirect
from django.http import HttpResponse, JsonResponse
import yaml
import json
import logging
from lib.osm.osmclient.clientv2 import Client
from lib.osm.osm_rdcl_parser import OsmParser
import authosm.utils as osmutils
from sf_t3d.decorators import login_required
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('instancehandler/view.py')
@login_required
def get_list(request, type=None):
user = osmutils.get_user(request)
project_id = user.project_id
client = Client()
result = {'type': type, 'project_id': project_id}
if "OSM_ERROR" in request.session:
result['alert_error'] = request.session["OSM_ERROR"]
del request.session["OSM_ERROR"]
raw_content_types = request.META.get('HTTP_ACCEPT', '*/*').split(',')
if 'application/json' not in raw_content_types:
return __response_handler(request, result, 'instance_list.html')
instance_list = None
if type == 'ns':
instance_list = client.ns_list(user.get_token())
elif type == 'vnf':
instance_list = client.vnf_list(user.get_token())
elif type == 'pdu':
instance_list = client.pdu_list(user.get_token())
elif type == 'nsi':
instance_list = client.nsi_list(user.get_token())
result['instances'] = instance_list['data'] if instance_list and instance_list['error'] is False else []
return __response_handler(request, result, 'instance_list.html')
@login_required
def create(request, type=None):
result = {}
config_vim_account_id = {}
config_wim_account_id = {}
user = osmutils.get_user(request)
client = Client()
def get_vim_account_id(vim_account):
if config_vim_account_id.get(vim_account):
return config_vim_account_id[vim_account]
result_client = client.vim_list(user.get_token())
vim_list = result_client['data'] if result_client and result_client['error'] is False else []
if vim_list is None or len(vim_list) == 0:
raise ValueError("cannot find vim account '{}'".format(vim_account))
for vim in vim_list:
if vim_account == vim['name']:
config_vim_account_id[vim_account] = vim['uuid']
return vim['uuid']
def get_wim_account_id(wim_account):
if config_wim_account_id.get(wim_account):
return config_wim_account_id[wim_account]
result_client = client.wim_list(user.get_token())
wim_list = result_client['data'] if result_client and result_client['error'] is False else []
if wim_list is None or len(wim_list) == 0:
raise ValueError("cannot find wim account '{}'".format(wim_account))
for wim in wim_list:
if wim_account == wim['name']:
config_wim_account_id[wim_account] = wim['uuid']
return wim['uuid']
if type == 'ns':
try:
ns_data = {
"nsName": request.POST.get('nsName', 'WithoutName'),
"nsDescription": request.POST.get('nsDescription', ''),
"nsdId": request.POST.get('nsdId', ''),
"vimAccountId": request.POST.get('vimAccountId', ''),
}
ns_data["ssh_keys"] = []
if 'ssh_key' in request.POST and request.POST.get('ssh_key') != '':
ns_data["ssh_keys"].append(request.POST.get('ssh_key'))
ssh_key_files = request.FILES.getlist('ssh_key_files')
for ssh_key_file in ssh_key_files:
ssh_key = ''
for line in ssh_key_file:
ssh_key = ssh_key + line.decode()
ns_data["ssh_keys"].append(ssh_key)
config_file = request.FILES.get('config_file')
if config_file is not None:
config = ''
for line in config_file:
config = config + line.decode()
ns_config = yaml.load(config)
elif 'config' in request.POST and request.POST.get('config') != '':
ns_config = yaml.load(request.POST.get('config'))
else:
ns_config = None
if ns_config is not None:
if isinstance(ns_config, dict):
if "vim-network-name" in ns_config:
ns_config["vld"] = ns_config.pop("vim-network-name")
if "vld" in ns_config:
for vld in ns_config["vld"]:
if vld.get("vim-network-name"):
if isinstance(vld["vim-network-name"], dict):
vim_network_name_dict = {}
for vim_account, vim_net in list(vld["vim-network-name"].items()):
vim_network_name_dict[get_vim_account_id(vim_account)] = vim_net
vld["vim-network-name"] = vim_network_name_dict
if "wim_account" in vld and vld["wim_account"] is not None:
vld["wimAccountId"] = get_wim_account_id(vld.pop("wim_account"))
ns_data["vld"] = ns_config["vld"]
if "vnf" in ns_config:
for vnf in ns_config["vnf"]:
if vnf.get("vim_account"):
vnf["vimAccountId"] = get_vim_account_id(vnf.pop("vim_account"))
ns_data["vnf"] = ns_config["vnf"]
if "additionalParamsForNs" in ns_config:
ns_data["additionalParamsForNs"] = ns_config.pop("additionalParamsForNs")
if not isinstance(ns_data["additionalParamsForNs"], dict):
raise ValueError("Error 'additionalParamsForNs' must be a dictionary")
if "additionalParamsForVnf" in ns_config:
ns_data["additionalParamsForVnf"] = ns_config.pop("additionalParamsForVnf")
if not isinstance(ns_data["additionalParamsForVnf"], list):
raise ValueError("Error 'additionalParamsForVnf' must be a list")
for additional_param_vnf in ns_data["additionalParamsForVnf"]:
if not isinstance(additional_param_vnf, dict):
raise ValueError("Error 'additionalParamsForVnf' items must be dictionaries")
if not additional_param_vnf.get("member-vnf-index"):
raise ValueError("Error 'additionalParamsForVnf' items must contain "
"'member-vnf-index'")
if not additional_param_vnf.get("additionalParams"):
raise ValueError("Error 'additionalParamsForVnf' items must contain "
"'additionalParams'")
if "wim_account" in ns_config:
wim_account = ns_config.pop("wim_account")
if wim_account is not None:
ns_data['wimAccountId'] = get_wim_account_id(wim_account)
except Exception as e:
return __response_handler(request, {'status': 400, 'code': 'BAD_REQUEST', 'detail': e.message} , url=None, status=400)
result = client.ns_create(user.get_token(), ns_data)
if result['error']:
return __response_handler(request, result['data'], url=None,
status=result['data']['status'] if 'status' in result['data'] else 500)
else:
return __response_handler(request, {}, url=None, status=200)
elif type == 'nsi':
try:
nsi_data = {
"nsiName": request.POST.get('nsiName', 'WithoutName'),
"nsiDescription": request.POST.get('nsiDescription', ''),
"nstId": request.POST.get('nstId', ''),
"vimAccountId": request.POST.get('vimAccountId', ''),
}
nsi_data["ssh_keys"] = []
if 'ssh_key' in request.POST and request.POST.get('ssh_key') != '':
nsi_data["ssh_keys"].append(request.POST.get('ssh_key'))
ssh_key_files = request.FILES.getlist('ssh_key_files')
for ssh_key_file in ssh_key_files:
ssh_key = ''
for line in ssh_key_file:
ssh_key = ssh_key + line.decode()
nsi_data["ssh_keys"].append(ssh_key)
nsi_data["ssh_keys"] = ','.join(nsi_data["ssh_keys"])
config_file = request.FILES.get('config_file')
if config_file is not None:
config = ''
for line in config_file:
config = config + line.decode()
nsi_config = yaml.load(config)
elif 'config' in request.POST and request.POST.get('config') != '':
nsi_config = yaml.load(request.POST.get('config'))
else:
nsi_config = None
if nsi_config is not None:
if "netslice-vld" in nsi_config:
for vld in nsi_config["netslice-vld"]:
if vld.get("vim-network-name"):
if isinstance(vld["vim-network-name"], dict):
vim_network_name_dict = {}
for vim_account, vim_net in list(vld["vim-network-name"].items()):
vim_network_name_dict[get_vim_account_id(vim_account)] = vim_net
vld["vim-network-name"] = vim_network_name_dict
nsi_data["netslice-vld"] = nsi_config["netslice-vld"]
if "netslice-subnet" in nsi_config:
for nssubnet in nsi_config["netslice-subnet"]:
if "vld" in nssubnet:
for vld in nssubnet["vld"]:
if vld.get("vim-network-name"):
if isinstance(vld["vim-network-name"], dict):
vim_network_name_dict = {}
for vim_account, vim_net in list(vld["vim-network-name"].items()):
vim_network_name_dict[get_vim_account_id(vim_account)] = vim_net
vld["vim-network-name"] = vim_network_name_dict
if "vnf" in nssubnet:
for vnf in nsi_config["vnf"]:
if vnf.get("vim_account"):
vnf["vimAccountId"] = get_vim_account_id(vnf.pop("vim_account"))
nsi_data["netslice-subnet"] = nsi_config["netslice-subnet"]
if "additionalParamsForNsi" in nsi_config:
nsi_data["additionalParamsForNsi"] = nsi_config.pop("additionalParamsForNsi")
if not isinstance(nsi_data["additionalParamsForNsi"], dict):
raise ValueError("Error at 'additionalParamsForNsi' must be a dictionary")
if "additionalParamsForSubnet" in nsi_config:
nsi_data["additionalParamsForSubnet"] = nsi_config.pop("additionalParamsForSubnet")
if not isinstance(nsi_data["additionalParamsForSubnet"], list):
raise ValueError("Error 'additionalParamsForSubnet' must be a list")
for additional_param_subnet in nsi_data["additionalParamsForSubnet"]:
if not isinstance(additional_param_subnet, dict):
raise ValueError("Error 'additionalParamsForSubnet' items must be dictionaries")
if not additional_param_subnet.get("id"):
raise ValueError("Error 'additionalParamsForSubnet' items must contain subnet 'id'")
if not additional_param_subnet.get("additionalParamsForNs") and\
not additional_param_subnet.get("additionalParamsForVnf"):
raise ValueError("Error 'additionalParamsForSubnet' items must contain "
"'additionalParamsForNs' and/or 'additionalParamsForVnf'")
except Exception as e:
return __response_handler(request, {'status': 400, 'code': 'BAD_REQUEST', 'detail': e.message} , url=None, status=400)
result = client.nsi_create(user.get_token(), nsi_data)
if result['error']:
return __response_handler(request, result['data'], url=None,
status=result['data']['status'] if 'status' in result['data'] else 500)
else:
return __response_handler(request, {}, url=None, status=200)
elif type == 'pdu':
interface_param_name = request.POST.getlist('interfaces_name')
interface_param_ip = request.POST.getlist('interfaces_ip')
interface_param_mgmt = request.POST.getlist('interfaces_mgmt')
interface_param_netname = request.POST.getlist('interfaces_vimnetname')
pdu_payload = {
"name": request.POST.get('name'),
"type": request.POST.get('pdu_type'),
"vim_accounts": request.POST.getlist('pdu_vim_accounts'),
"description": request.POST.get('description'),
"interfaces": []
}
for i in (0,len(interface_param_name)-1):
pdu_payload['interfaces'].append({
'name': interface_param_name[i],
'mgmt': True if interface_param_mgmt[i] == 'true' else False,
'ip-address': interface_param_ip[i],
'vim-network-name': interface_param_netname[i]
})
result = client.pdu_create(user.get_token(), pdu_payload)
if result['error']:
return __response_handler(request, result['data'], url=None,
status=result['data']['status'] if 'status' in result['data'] else 500)
else:
return __response_handler(request, {}, url=None, status=200)
@login_required
def ns_operations(request, instance_id=None, type=None):
user = osmutils.get_user(request)
project_id = user.project_id
result = {'type': type, 'project_id': project_id, 'instance_id': instance_id}
raw_content_types = request.META.get('HTTP_ACCEPT', '*/*').split(',')
if 'application/json' not in raw_content_types:
return __response_handler(request, result, 'instance_operations_list.html')
client = Client()
if type == 'ns':
op_list = client.ns_op_list(user.get_token(), instance_id)
elif type == 'nsi':
op_list = client.nsi_op_list(user.get_token(), instance_id)
result['operations'] = op_list['data'] if op_list and op_list['error'] is False else []
return __response_handler(request, result, 'instance_operations_list.html')
@login_required
def ns_operation(request, op_id, instance_id=None, type=None):
user = osmutils.get_user(request)
client = Client()
result = client.ns_op(user.get_token(), op_id)
return __response_handler(request, result['data'])
@login_required
def action(request, instance_id=None, type=None):
user = osmutils.get_user(request)
client = Client()
# result = client.ns_action(instance_id, action_payload)
primitive_param_keys = request.POST.getlist('primitive_params_name')
primitive_param_value = request.POST.getlist('primitive_params_value')
action_payload = {
"vnf_member_index": request.POST.get('vnf_member_index'),
"primitive": request.POST.get('primitive'),
"primitive_params": {k: v for k, v in zip(primitive_param_keys, primitive_param_value) if len(k) > 0}
}
result = client.ns_action(user.get_token(), instance_id, action_payload)
if result['error']:
return __response_handler(request, result['data'], url=None,
status=result['data']['status'] if 'status' in result['data'] else 500)
else:
return __response_handler(request, {}, url=None, status=200)
@login_required
def delete(request, instance_id=None, type=None):
force = bool(request.GET.get('force', False))
result = {}
user = osmutils.get_user(request)
client = Client()
if type == 'ns':
result = client.ns_delete(user.get_token(), instance_id, force)
elif type == 'pdu':
result = client.pdu_delete(user.get_token(), instance_id)
elif type == 'nsi':
result = client.nsi_delete(user.get_token(), instance_id, force)
if result['error']:
return __response_handler(request, result['data'], url=None,
status=result['data']['status'] if 'status' in result['data'] else 500)
else:
return __response_handler(request, {}, url=None, status=200)
@login_required
def show_topology(request, instance_id=None, type=None):
user = osmutils.get_user(request)
project_id = user.project_id
raw_content_types = request.META.get('HTTP_ACCEPT', '*/*').split(',')
if 'application/json' in raw_content_types:
client = Client()
nsr_object = {'nsr': {}, 'vnfr': {}, 'vnfd': {}}
if type == 'ns':
nsr_resp = client.ns_get(user.get_token(), instance_id)
nsr_object['nsr'] = nsr_resp['data']
if 'constituent-vnfr-ref' in nsr_object['nsr'] :
for vnfr_id in nsr_object['nsr']['constituent-vnfr-ref']:
vnfr_resp = client.vnf_get(user.get_token(), vnfr_id)
vnfr = vnfr_resp['data']
nsr_object['vnfr'][vnfr['id']] = vnfr
if vnfr['vnfd-id'] not in nsr_object['vnfd']:
vnfd_resp = client.vnfd_get(user.get_token(), vnfr['vnfd-id'])
nsr_object['vnfd'][vnfr['vnfd-id']] = vnfd_resp['vnfd:vnfd-catalog']['vnfd'][0]
test = OsmParser()
result = test.nsr_to_graph(nsr_object)
return __response_handler(request, result)
else:
result = {'type': type, 'project_id': project_id, 'instance_id': instance_id}
return __response_handler(request, result, 'instance_topology_view.html')
@login_required
def show(request, instance_id=None, type=None):
# result = {}
user = osmutils.get_user(request)
project_id = user.project_id
client = Client()
if type == 'ns':
result = client.ns_get(user.get_token(), instance_id)
elif type == 'vnf':
result = client.vnf_get(user.get_token(), instance_id)
elif type == 'pdu':
result = client.pdu_get(user.get_token(), instance_id)
elif type == 'nsi':
result = client.nsi_get(user.get_token(), instance_id)
return __response_handler(request, result)
@login_required
def export_metric(request, instance_id=None, type=None):
metric_data = request.POST.dict()
user = osmutils.get_user(request)
project_id = user.project_id
client = Client()
keys = ["collection_period",
"vnf_member_index",
"metric_name",
"correlation_id",
"vdu_name",
"collection_unit"]
metric_data = dict(filter(lambda i: i[0] in keys and len(i[1]) > 0, metric_data.items()))
result = client.ns_metric_export(user.get_token(), instance_id, metric_data)
if result['error']:
return __response_handler(request, result['data'], url=None,
status=result['data']['status'] if 'status' in result['data'] else 500)
else:
return __response_handler(request, {}, url=None, status=200)
@login_required
def create_alarm(request, instance_id=None, type=None):
metric_data = request.POST.dict()
user = osmutils.get_user(request)
project_id = user.project_id
client = Client()
keys = ["threshold_value",
"vnf_member_index",
"metric_name",
"vdu_name",
"alarm_name",
"correlation_id",
"statistic",
"operation",
"severity"]
metric_data = dict(filter(lambda i: i[0] in keys and len(i[1]) > 0, metric_data.items()))
result = client.ns_alarm_create(user.get_token(), instance_id, metric_data)
if result['error']:
return __response_handler(request, result['data'], url=None,
status=result['data']['status'] if 'status' in result['data'] else 500)
else:
return __response_handler(request, {}, url=None, status=200)
def __response_handler(request, data_res, url=None, to_redirect=None, *args, **kwargs):
raw_content_types = request.META.get('HTTP_ACCEPT', '*/*').split(',')
if not to_redirect and ('application/json' in raw_content_types or url is None):
return HttpResponse(json.dumps(data_res), content_type="application/json", *args, **kwargs)
elif to_redirect:
return redirect(url, *args, **kwargs)
else:
return render(request, url, data_res)
| 47.771242
| 130
| 0.585671
|
09cbc3e122baea74dc6e7e211bdf43133d2ae3b1
| 5,337
|
py
|
Python
|
MoneyTracker/purchases.py
|
albert-dinh-01/MoneyTracker
|
855145dbb9842096a70bef43c8c6431e8d04c7fe
|
[
"MIT"
] | null | null | null |
MoneyTracker/purchases.py
|
albert-dinh-01/MoneyTracker
|
855145dbb9842096a70bef43c8c6431e8d04c7fe
|
[
"MIT"
] | null | null | null |
MoneyTracker/purchases.py
|
albert-dinh-01/MoneyTracker
|
855145dbb9842096a70bef43c8c6431e8d04c7fe
|
[
"MIT"
] | null | null | null |
"""
Author: Albert Dinh
Date: Oct 13, 2020
This file contains the basic template for
certain goods classes.
"""
from datetime import datetime, date
class BasicPurchases:
def __init__(self, price, onoff, item_name):
self.__time_stamp = date.today().strftime("%B %d, %Y")
self.__date_stamp = datetime.now().strftime("%H:%M:%S")
self.__price = price
self.__online_or_offline = onoff
self.__type = str(self.__class__)
self.__name = item_name
self.__f = 'buffer.txt'
def __str__(self):
return self.__type
def get_f_out(self):
return self.__default_f_out
def get_time(self):
return self.__time_stamp
def get_date(self):
return self.__date_stamp
def get_price(self):
return self.__price
def get_off_on_line(self):
return self.__online_or_offline
def on_off_value(self):
if self.get_off_on_line():
a = 'online'
return a
else:
a = 'offline'
return a
def get_item_name(self):
return self.__name
def get_buffer_loc(self):
return self.__f
def get_f_obj_loc(self):
fout = open(self.get_buffer_loc(), 'a')
return fout
def set_buffer_loc(self, new_loc):
self.__f = new_loc
def print_item_info(self):
f = self.get_f_obj_loc()
print(file=f)
print("Purchased: {}".format(self.get_item_name()), file=f)
print('Price is: ${} CAD'.format(self.get_price()), file=f)
print('The item was purchased {}'.format(self.on_off_value()), file=f)
print('The item was purchased on {}'.format(self.get_time()), file=f)
print()
class Grocery(BasicPurchases):
def __init__(self, price, onoff, item_name, category, retailer):
BasicPurchases.__init__(self, price, onoff, item_name)
self.__food_category = category
self.__retailer = retailer
def get_retailer(self):
return self.__retailer
def get_food_category(self):
return self.__food_category
def print_retailer(self):
print()
print("======================*****======================")
print('Grocery item is', self.get_item_name())
print('Item was purchased in %s.' % self.get_retailer())
print('Food sub-category is %s.' % self.get_food_category())
print("======================*****======================")
print()
class EatingOut(BasicPurchases):
def __init__(self, price, onoff, item_name, restaurant, feedback, recommendation):
BasicPurchases.__init__(self, price, onoff, item_name)
self.__restaurant_name = restaurant
self.__feedback = feedback
self.__recommend_or_no = recommendation
def get_fb(self):
a = str(self.__feedback)
return a
def get_recommendation(self):
return self.__recommend_or_no
def get_restaurant_name(self):
return self.__restaurant_name
def decision_to_rec(self):
if self.get_recommendation():
a = 'to recommend'
return a
else:
a = 'to not recommend'
return a
def print_eat_out_info(self):
print()
print("======================*****======================")
print('Ate the following dish:', self.get_item_name())
print('Ate at:', self.get_restaurant_name())
print('Decided', self.decision_to_rec(), 'this restaurant.')
print('Rate this restaurant at level %s.' % self.get_fb())
print("======================*****======================")
print()
class Utilities(BasicPurchases):
def __init__(self, price, onoff, item_name, util_category, util_provider):
BasicPurchases.__init__(self, price, onoff, item_name)
self.__uti_category = util_category
self.__util_provider = util_provider
def get_category(self):
return self.__uti_category
def get_util_provider(self):
return self.__util_provider
def print_utilities_info(self):
print()
print("======================*****======================")
print('Paid for:', self.get_item_name(), 'ON', self.get_time())
print('The utility provider was:', self.get_util_provider())
print('The utility was classified as:', self.get_item_name())
print("======================*****======================")
print()
class MonthlySubscription(BasicPurchases):
def __init__(self, price, onoff, item_name, provider, category):
BasicPurchases.__init__(self, price, onoff, item_name)
self.__provider = provider
self.__category = category
def get_subscription_provider(self):
return self.__provider
def get_category_subscription(self):
return self.__category
def print_subscription_service(self):
print()
print("======================*****======================")
print('Paid', self.get_price(), 'to',
self.get_subscription_provider(), 'for item',
self.get_item_name())
print('The subscription is for', self.get_category_subscription())
print("======================*****======================")
print()
if __name__ == '__main__':
pass
| 30.323864
| 86
| 0.579914
|
aa46b477ee9aabfa34c1730fc8ca5f905bc112c8
| 418
|
py
|
Python
|
tools/add_all_cohorts_to_teacher.py
|
simonchristensen1/Zeeguu-Core
|
76f0e4a73676e00e6023ccbb2017210982670da2
|
[
"MIT"
] | 1
|
2018-03-22T12:29:49.000Z
|
2018-03-22T12:29:49.000Z
|
tools/add_all_cohorts_to_teacher.py
|
simonchristensen1/Zeeguu-Core
|
76f0e4a73676e00e6023ccbb2017210982670da2
|
[
"MIT"
] | 82
|
2017-12-09T16:15:02.000Z
|
2020-11-12T11:34:09.000Z
|
tools/add_all_cohorts_to_teacher.py
|
simonchristensen1/Zeeguu-Core
|
76f0e4a73676e00e6023ccbb2017210982670da2
|
[
"MIT"
] | 9
|
2017-11-25T11:32:05.000Z
|
2020-10-26T15:50:13.000Z
|
#!/usr/bin/env python
"""
Script that lists recent users
To be called from a cron job.
"""
import zeeguu_core
from zeeguu_core.model import User, Cohort, TeacherCohortMap
session = zeeguu_core.db.session
big_teacher = User.query.filter_by(id=534).one()
for cohort in Cohort.query.all():
mapping = TeacherCohortMap.find_or_create(big_teacher, cohort, session)
session.add(mapping)
session.commit()
| 19.904762
| 75
| 0.746411
|
f044e345713dd7cdf17ec894fcaa78d631a74c26
| 1,223
|
py
|
Python
|
matchms/importing/load_from_usi.py
|
sdrogers/matchms
|
1ce16b45ca218fb86b4d6c24cf7dc338045f5cc1
|
[
"Apache-2.0"
] | null | null | null |
matchms/importing/load_from_usi.py
|
sdrogers/matchms
|
1ce16b45ca218fb86b4d6c24cf7dc338045f5cc1
|
[
"Apache-2.0"
] | null | null | null |
matchms/importing/load_from_usi.py
|
sdrogers/matchms
|
1ce16b45ca218fb86b4d6c24cf7dc338045f5cc1
|
[
"Apache-2.0"
] | null | null | null |
import json
import numpy as np
import requests
from matchms import Spectrum
def load_from_usi(usi: str, server='https://metabolomics-usi.ucsd.edu'):
"""Load spectrum from metabolomics USI.
USI returns JSON data with keys 'peaks', 'n_peaks' and 'precuror_mz'
Args:
----
usi: str
Provide the usi.
server: string
USI server
"""
# Create the url
url = server + '/json/?usi=' + usi
metadata = {'usi': usi, 'server': server}
response = requests.get(url)
if response.statuscode == 404:
return None
# Extract data and create Spectrum object
try:
spectral_data = response.json()
if spectral_data is None or 'peaks' not in spectral_data:
return None
peaks = spectral_data['peaks']
if len(peaks) == 0:
return None
mz_list, intensity_list = zip(*peaks)
mz_array = np.array(mz_list)
intensity_array = np.array(intensity_list)
metadata['precursor_mz'] = spectral_data.get('precursor_mz', None)
s = Spectrum(mz_array, intensity_array, metadata)
return s
except json.decoder.JSONDecodeError:
# failed to unpack json
return None
| 24.959184
| 74
| 0.626329
|
dc28e1adc57950d2a43e7e91d62b2937b4dcca4f
| 1,424
|
py
|
Python
|
sandbox/lib/jumpscale/JumpscaleLibs/clients/gitea/GiteaMilestones.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
sandbox/lib/jumpscale/JumpscaleLibs/clients/gitea/GiteaMilestones.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 117
|
2019-09-01T11:59:19.000Z
|
2020-07-14T11:10:08.000Z
|
sandbox/lib/jumpscale/JumpscaleLibs/clients/gitea/GiteaMilestones.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 2
|
2020-04-06T15:21:23.000Z
|
2020-05-07T04:29:53.000Z
|
from Jumpscale import j
from .GiteaMilestone import GiteaMilestone
JSBASE = j.baseclasses.object
class GiteaMilestones(j.baseclasses.object):
def __init__(self, client, repo, user):
JSBASE.__init__(self)
self.client = client
self.repo = repo
self.user = user
self.position = 0
def new(self):
return GiteaMilestone(self.client, self.repo, self.user)
def get(self, id, fetch=False):
o = self.new()
if fetch:
resp = self.client.api.repos.issueGetMilestone(
id=str(id), repo=self.repo.name, owner=self.user.username
).json()
for k, v in resp.items():
setattr(o, k, v)
return o
def __next__(self):
if self.position < len(self._items):
item = self._items[self.position]
self.position += 1
return item
else:
self.position = 0
raise StopIteration()
def __iter__(self):
self._items = []
items = self.client.api.repos.issueGetMilestones(repo=self.repo.name, owner=self.user.username).json()
for item in items:
c = self.new()
for k, v in item.items():
setattr(c, k, v)
self._items.append(c)
return self
__str__ = __repr__ = lambda self: "Gitea Milestones Iterator for repo: {0}".format(self.repo.name)
| 29.061224
| 110
| 0.572331
|
9fea3c352334c559cc051120cefdb84c20062945
| 2,415
|
py
|
Python
|
Tools/TextProcess.py
|
camille1874/FinQA
|
5e99cdee44e88a494c4cf6127c5c127ac80ab4db
|
[
"Apache-2.0"
] | null | null | null |
Tools/TextProcess.py
|
camille1874/FinQA
|
5e99cdee44e88a494c4cf6127c5c127ac80ab4db
|
[
"Apache-2.0"
] | null | null | null |
Tools/TextProcess.py
|
camille1874/FinQA
|
5e99cdee44e88a494c4cf6127c5c127ac80ab4db
|
[
"Apache-2.0"
] | null | null | null |
#coding:utf8
import jieba
import jieba.posseg as pseg
import os,sys
'''
initialize jieba Segment
'''
def jieba_initialize():
jieba.load_userdict(os.path.dirname(os.path.split(os.path.realpath(__file__))[0])+'/resources/QAattrdic.txt')
jieba.initialize()
'''
Segment words by jieba
'''
def wordSegment(text):
text = text.strip()
seg_list = jieba.cut(text)
result = " ".join(seg_list)
return result
'''
POS Tagging
'''
def postag(text):
words = pseg.cut(text)
# for w in words:
# print w.word, w.flag
return words
'''
proecss xiaohuangji corpus
'''
def xiaohuangji_textprocess(fr_path,fw_path):
fr = open(fr_path,'r')
fw = open(fw_path,'a')
line = fr.readline()
i = 0
while line:
if line[0] == 'E':
question = fr.readline()[2:].strip()
answer = fr.readline()[2:]
print (question)
print (answer)
if len(question)<20 and len(answer)<30:
i +=1
qa_pair = question+":::"+answer
fw.write(qa_pair)
line = fr.readline()
fw.close()
fr.close()
print ('Finished')
'''
q:::a text processing
'''
def tp2(fr_path,fw_path):
fr = open(fr_path,'r')
fw = open(fw_path,'a')
line = fr.readline()
while line:
flag = 0
words = pseg.cut(line)
for w in words:
print (w.word + w.flag)
if w.flag == 'nr':
flag = 1
if flag == 0:
fw.write(line)
line = fr.readline()
fw.close()
fr.close()
print ('Finished')
'''
Load baike attributi name
'''
def load_baikeattr_name(attrdic):
fr = open(attrdic,'r')
attr = []
line = fr.readline()
while line:
attr.append(line.strip())
line = fr.readline()
fr.close()
return attr
'''
Synonyms Analysis,return word in baike attr
word 原始词
synsdic 同义词典
attr 属性
'''
def load_synonyms_word_inattr(word,synsdic,attr):
fr = open(synsdic,'r')
tar_word = ''
line = fr.readline().strip()
while line:
words = line.split(" ")
if word in words:
for w in words:
if w in attr:
tar_word = w
break
if tar_word != '':
break
line = fr.readline()
fr.close()
if tar_word == '':
tar_word = 'Empty'
return tar_word
| 19.795082
| 113
| 0.541201
|
f4ab16fb93bd95e7490f31adb4487814378d740d
| 17,657
|
py
|
Python
|
batchglm/unit_test/test_acc_glm_all_tf2.py
|
le-ander/batchglm
|
31b905b99b6baa7c94b82550d6a74f00d81966ea
|
[
"BSD-3-Clause"
] | null | null | null |
batchglm/unit_test/test_acc_glm_all_tf2.py
|
le-ander/batchglm
|
31b905b99b6baa7c94b82550d6a74f00d81966ea
|
[
"BSD-3-Clause"
] | null | null | null |
batchglm/unit_test/test_acc_glm_all_tf2.py
|
le-ander/batchglm
|
31b905b99b6baa7c94b82550d6a74f00d81966ea
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import numpy as np
import scipy.sparse
import unittest
import batchglm.api as glm
glm.setup_logging(verbosity="WARNING", stream="STDOUT")
logger = logging.getLogger(__name__)
class _TestAccuracyGlmAllEstim:
def __init__(
self,
simulator,
quick_scale,
noise_model,
sparse,
init_mode
):
if noise_model is None:
raise ValueError("noise_model is None")
else:
if noise_model == "nb":
from batchglm.api.models.glm_nb import Estimator, InputDataGLM
elif noise_model == "norm":
from batchglm.api.models.glm_norm import Estimator, InputDataGLM
elif noise_model == "beta":
from batchglm.api.models.glm_beta import Estimator, InputDataGLM
else:
raise ValueError("noise_model not recognized")
batch_size = 2000
provide_optimizers = {
"gd": True,
"adam": True,
"adagrad": True,
"rmsprop": True,
"nr": True,
"nr_tr": True,
"irls": noise_model in ["nb", "norm"],
"irls_gd": noise_model in ["nb", "norm"],
"irls_tr": noise_model in ["nb", "norm"],
"irls_gd_tr": noise_model in ["nb", "norm"]
}
if sparse:
input_data = InputDataGLM(
data=scipy.sparse.csr_matrix(simulator.input_data.x),
design_loc=simulator.input_data.design_loc,
design_scale=simulator.input_data.design_scale,
constraints_loc=simulator.input_data.constraints_loc,
constraints_scale=simulator.input_data.constraints_scale,
size_factors=simulator.input_data.size_factors
)
else:
input_data = InputDataGLM(
data=simulator.input_data.x,
design_loc=simulator.input_data.design_loc,
design_scale=simulator.input_data.design_scale,
constraints_loc=simulator.input_data.constraints_loc,
constraints_scale=simulator.input_data.constraints_scale,
size_factors=simulator.input_data.size_factors
)
self.estimator = Estimator(
input_data=input_data,
#batch_size=batch_size,
quick_scale=quick_scale,
#provide_optimizers=provide_optimizers,
#provide_batched=True,
#provide_fim=noise_model in ["nb", "norm"],
#provide_hessian=True,
init_a=init_mode,
init_b=init_mode
)
self.sim = simulator
def estimate(
self,
algo,
batched,
acc,
lr
):
self.estimator.initialize()
self.estimator.train_sequence(training_strategy=[
{
"learning_rate": lr,
"convergence_criteria": "all_converged",
"stopping_criteria": acc,
"use_batching": batched,
"optim_algo": algo,
"featurewise": False
},
])
def eval_estimation(
self,
batched,
train_loc,
train_scale
):
if batched:
threshold_dev_a = 0.4
threshold_dev_b = 0.4
threshold_std_a = 2
threshold_std_b = 2
else:
threshold_dev_a = 0.2
threshold_dev_b = 0.2
threshold_std_a = 1
threshold_std_b = 1
success = True
if train_loc:
mean_rel_dev_a = np.mean((self.estimator.model.a_var - self.sim.a_var) / self.sim.a_var)
std_rel_dev_a = np.std((self.estimator.model.a_var - self.sim.a_var) / self.sim.a_var)
logging.getLogger("batchglm").info("mean_rel_dev_a %f" % mean_rel_dev_a)
logging.getLogger("batchglm").info("std_rel_dev_a %f" % std_rel_dev_a)
if np.abs(mean_rel_dev_a) > threshold_dev_a or std_rel_dev_a > threshold_std_a:
success = False
if train_scale:
mean_rel_dev_b = np.mean((self.estimator.model.b_var - self.sim.b_var) / self.sim.b_var)
std_rel_dev_b = np.std((self.estimator.model.b_var - self.sim.b_var) / self.sim.b_var)
logging.getLogger("batchglm").info("mean_rel_dev_b %f" % mean_rel_dev_b)
logging.getLogger("batchglm").info("std_rel_dev_b %f" % std_rel_dev_b)
if np.abs(mean_rel_dev_b) > threshold_dev_b or std_rel_dev_b > threshold_std_b:
success = False
return success
class _TestAccuracyGlmAll(
unittest.TestCase
):
"""
Test whether optimizers yield exact results.
Accuracy is evaluted via deviation of simulated ground truth.
The unit tests test individual training graphs and multiple optimizers
(incl. one tensorflow internal optimizer and newton-rhapson)
for each training graph. The training graphs tested are as follows:
- full data model
- train a and b model: test_full_global_a_and_b()
- train a model only: test_full_global_a_only()
- train b model only: test_full_global_b_only()
- batched data model
- train a and b model: test_batched_global_a_and_b()
- train a model only: test_batched_global_a_only()
- train b model only: test_batched_global_b_only()
The unit tests throw an assertion error if the required accurcy is
not met. Accuracy thresholds are fairly lenient so that unit_tests
pass even with noise inherent in fast optimisation and random
initialisation in simulation. Still, large biases (i.e. graph errors)
should be discovered here.
Note on settings by optimised:
IRLS_TR: Needs slow TR collapse to converge.
"""
noise_model: str
optims_tested: dict
def simulate(self):
self.simulate1()
self.simulate2()
def get_simulator(self):
if self.noise_model is None:
raise ValueError("noise_model is None")
else:
if self.noise_model == "nb":
from batchglm.api.models.glm_nb import Simulator
elif self.noise_model == "norm":
from batchglm.api.models.glm_norm import Simulator
elif self.noise_model == "beta":
from batchglm.api.models.glm_beta import Simulator
else:
raise ValueError("noise_model not recognized")
return Simulator(num_observations=10000, num_features=10)
def simulate1(self):
self.sim1 = self.get_simulator()
self.sim1.generate_sample_description(num_batches=2, num_conditions=2)
def rand_fn_ave(shape):
if self.noise_model in ["nb", "norm"]:
theta = np.random.uniform(10, 1000, shape)
elif self.noise_model in ["beta"]:
theta = np.random.uniform(0.1, 0.7, shape)
else:
raise ValueError("noise model not recognized")
return theta
def rand_fn_loc(shape):
if self.noise_model in ["nb", "norm"]:
theta = np.random.uniform(1, 3, shape)
elif self.noise_model in ["beta"]:
theta = np.random.uniform(0, 0.15, shape)
else:
raise ValueError("noise model not recognized")
return theta
def rand_fn_scale(shape):
if self.noise_model in ["nb"]:
theta = np.random.uniform(1, 3, shape)
elif self.noise_model in ["norm"]:
theta = np.random.uniform(1, 3, shape)
elif self.noise_model in ["beta"]:
theta = np.random.uniform(0, 0.15, shape)
else:
raise ValueError("noise model not recognized")
return theta
self.sim1.generate_params(
rand_fn_ave=lambda shape: rand_fn_ave(shape),
rand_fn_loc=lambda shape: rand_fn_loc(shape),
rand_fn_scale=lambda shape: rand_fn_scale(shape)
)
self.sim1.generate_data()
def simulate2(self):
self.sim2 = self.get_simulator()
self.sim2.generate_sample_description(num_batches=0, num_conditions=2)
def rand_fn_ave(shape):
if self.noise_model in ["nb", "norm"]:
theta = np.random.uniform(10, 1000, shape)
elif self.noise_model in ["beta"]:
theta = np.random.uniform(0.1, 0.9, shape)
else:
raise ValueError("noise model not recognized")
return theta
def rand_fn_loc(shape):
if self.noise_model in ["nb", "norm"]:
theta = np.ones(shape)
elif self.noise_model in ["beta"]:
theta = np.zeros(shape)+0.05
else:
raise ValueError("noise model not recognized")
return theta
def rand_fn_scale(shape):
if self.noise_model in ["nb"]:
theta = np.ones(shape)
elif self.noise_model in ["norm"]:
theta = np.ones(shape)
elif self.noise_model in ["beta"]:
theta = np.ones(shape) - 0.8
else:
raise ValueError("noise model not recognized")
return theta
self.sim2.generate_params(
rand_fn_ave=lambda shape: rand_fn_ave(shape),
rand_fn_loc=lambda shape: rand_fn_loc(shape),
rand_fn_scale=lambda shape: rand_fn_scale(shape)
)
self.sim2.generate_data()
def simulator(self, train_loc):
if train_loc:
return self.sim1
else:
return self.sim2
def basic_test(
self,
batched,
train_loc,
train_scale,
sparse
):
self.optims_tested = {
"nb": ["ADAM", "IRLS_GD_TR"],
"beta": ["NR_TR"],
"norm": ["IRLS_TR"]
}
if self.noise_model in ["norm"]:
algos = self.optims_tested["norm"]
init_mode = "all_zero"
lr = {"ADAM": 1e-3, "NR_TR": 1, "IRLS_TR": 1}
elif self.noise_model in ["beta"]:
algos = self.optims_tested["beta"]
init_mode = "all_zero"
if batched:
lr = {"ADAM": 0.1, "NR_TR": 1}
else:
lr = {"ADAM": 1e-5, "NR_TR": 1}
elif self.noise_model in ["nb"]:
algos = self.optims_tested["nb"]
init_mode = "standard"
if batched:
lr = {"ADAM": 0.1, "IRLS_GD_TR": 1}
else:
lr = {"ADAM": 0.05, "IRLS_GD_TR": 1}
else:
raise ValueError("noise model %s not recognized" % self.noise_model)
for algo in algos:
logger.info("algorithm: %s" % algo)
if algo in ["ADAM", "RMSPROP", "GD"]:
if batched:
acc = 1e-4
else:
acc = 1e-6
glm.pkg_constants.JACOBIAN_MODE = "analytic"
elif algo in ["NR", "NR_TR"]:
if batched:
acc = 1e-12
else:
acc = 1e-14
if self.noise_model in ["beta"]:
glm.pkg_constants.TRUST_REGION_RADIUS_INIT = 1
else:
glm.pkg_constants.TRUST_REGION_RADIUS_INIT = 100
glm.pkg_constants.TRUST_REGION_T1 = 0.5
glm.pkg_constants.TRUST_REGION_T2 = 1.5
glm.pkg_constants.CHOLESKY_LSTSQS = True
glm.pkg_constants.CHOLESKY_LSTSQS_BATCHED = True
glm.pkg_constants.JACOBIAN_MODE = "analytic"
glm.pkg_constants.HESSIAN_MODE = "analytic"
elif algo in ["IRLS", "IRLS_TR", "IRLS_GD", "IRLS_GD_TR"]:
if batched:
acc = 1e-12
else:
acc = 1e-14
glm.pkg_constants.TRUST_REGION_T1 = 0.5
glm.pkg_constants.TRUST_REGION_T2 = 1.5
glm.pkg_constants.CHOLESKY_LSTSQS = True
glm.pkg_constants.CHOLESKY_LSTSQS_BATCHED = True
glm.pkg_constants.JACOBIAN_MODE = "analytic"
else:
return ValueError("algo %s not recognized" % algo)
estimator = _TestAccuracyGlmAllEstim(
simulator=self.simulator(train_loc=train_loc),
quick_scale=False if train_scale else True,
noise_model=self.noise_model,
sparse=sparse,
init_mode=init_mode
)
estimator.estimate(
algo=algo,
batched=batched,
acc=acc,
lr=lr[algo]
)
estimator.estimator.finalize()
success = estimator.eval_estimation(
batched=batched,
train_loc=train_loc,
train_scale=train_scale,
)
assert success, "%s did not yield exact results" % algo
return True
def _test_full_a_and_b(self, sparse):
return self.basic_test(
batched=False,
train_loc=True,
train_scale=True,
sparse=sparse
)
def _test_full_a_only(self, sparse):
return self.basic_test(
batched=False,
train_loc=True,
train_scale=False,
sparse=sparse
)
def _test_full_b_only(self, sparse):
return self.basic_test(
batched=False,
train_loc=False,
train_scale=True,
sparse=sparse
)
def _test_batched_a_and_b(self, sparse):
return self.basic_test(
batched=True,
train_loc=True,
train_scale=True,
sparse=sparse
)
def _test_batched_a_only(self, sparse):
return self.basic_test(
batched=True,
train_loc=True,
train_scale=False,
sparse=sparse
)
def _test_batched_b_only(self, sparse):
return self.basic_test(
batched=True,
train_loc=False,
train_scale=True,
sparse=sparse
)
def _test_full(self, sparse):
self._test_full_a_and_b(sparse=sparse)
self._test_full_a_only(sparse=sparse)
self._test_full_b_only(sparse=sparse)
def _test_batched(self, sparse):
self._test_batched_a_and_b(sparse=sparse)
self._test_batched_a_only(sparse=sparse)
self._test_batched_b_only(sparse=sparse)
class TestAccuracyGlmNb(
_TestAccuracyGlmAll,
unittest.TestCase
):
"""
Test whether optimizers yield exact results for negative binomial distributed data.
"""
def test_full_nb(self):
logging.getLogger("tensorflow").setLevel(logging.INFO)
logging.getLogger("batchglm").setLevel(logging.INFO)
logger.error("TestAccuracyGlmNb.test_full_nb()")
np.random.seed(1)
self.noise_model = "nb"
self.simulate()
self._test_full(sparse=False)
self._test_full(sparse=True)
"""
def test_batched_nb(self):
logging.getLogger("tensorflow").setLevel(logging.INFO)
logging.getLogger("batchglm").setLevel(logging.INFO)
logger.error("TestAccuracyGlmNb.test_batched_nb()")
np.random.seed(1)
self.noise_model = "nb"
self.simulate()
self._test_batched(sparse=False)
self._test_batched(sparse=True)
"""
"""
class TestAccuracyGlmNorm(
_TestAccuracyGlmAll,
unittest.TestCase
):
Test whether optimizers yield exact results for normal distributed data.
def test_full_norm(self):
logging.getLogger("tensorflow").setLevel(logging.INFO)
logging.getLogger("batchglm").setLevel(logging.INFO)
logger.error("TestAccuracyGlmNorm.test_full_norm()")
np.random.seed(1)
self.noise_model = "norm"
self.simulate()
self._test_full(sparse=False)
self._test_full(sparse=True)
def test_batched_norm(self):
logging.getLogger("tensorflow").setLevel(logging.INFO)
logging.getLogger("batchglm").setLevel(logging.INFO)
logger.error("TestAccuracyGlmNorm.test_batched_norm()")
# TODO not working yet.
np.random.seed(1)
self.noise_model = "norm"
self.simulate()
self._test_batched(sparse=False)
self._test_batched(sparse=True)
class TestAccuracyGlmBeta(
_TestAccuracyGlmAll,
unittest.TestCase
):
Test whether optimizers yield exact results for beta distributed data.
TODO not working yet.
def test_full_beta(self):
logging.getLogger("tensorflow").setLevel(logging.INFO)
logging.getLogger("batchglm").setLevel(logging.INFO)
logger.error("TestAccuracyGlmBeta.test_full_beta()")
np.random.seed(1)
self.noise_model = "beta"
self.simulate()
self._test_full(sparse=False)
self._test_full(sparse=True)
def test_batched_beta(self):
logging.getLogger("tensorflow").setLevel(logging.INFO)
logging.getLogger("batchglm").setLevel(logging.INFO)
logger.error("TestAccuracyGlmBeta.test_batched_beta()")
np.random.seed(1)
self.noise_model = "beta"
self.simulate()
self._test_batched(sparse=False)
self._test_batched(sparse=True)
"""
if __name__ == '__main__':
unittest.main()
| 33.632381
| 100
| 0.574107
|
a9a61b31229bbca11e83f25b7d5b253cc7473a55
| 114
|
py
|
Python
|
pynuit/_enums.py
|
louisdevie/pynuit
|
4add2277529be8f577457202da6146f722c03caf
|
[
"MIT"
] | null | null | null |
pynuit/_enums.py
|
louisdevie/pynuit
|
4add2277529be8f577457202da6146f722c03caf
|
[
"MIT"
] | null | null | null |
pynuit/_enums.py
|
louisdevie/pynuit
|
4add2277529be8f577457202da6146f722c03caf
|
[
"MIT"
] | null | null | null |
from enum import Enum
__all__ = ["Alignment"]
class Alignment(Enum):
LEFT = 1
RIGHT = 2
CENTER = 3
| 11.4
| 23
| 0.614035
|
b84db1f338fde3c3386860f1001420ec26f4d160
| 1,769
|
py
|
Python
|
yat-master/pymodule/yat/test/data_generator.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
yat-master/pymodule/yat/test/data_generator.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
yat-master/pymodule/yat/test/data_generator.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
#!/usr/bin/env python
# encoding=utf-8
"""
Copyright (c) 2021 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
class DataGenerator:
BATCH_COUNT = 500
def __init__(self, db, table_name, count, **column_metas):
self.db = db
self.table_name = table_name
self.count = count
self.column_metas = column_metas
def _make_sql_template(self):
return 'insert into {table_name} ({columns}) values ({values})'.format(
table_name=self.table_name,
columns=','.join(self.column_metas.keys()),
values=','.join('?'*len(self.column_metas)))
def generate(self):
sql = self._make_sql_template()
cache = []
for i in range(self.count):
cache.append(self._gen_line())
if i % self.BATCH_COUNT == 0:
self.db.execute(sql, *cache)
cache = []
if len(cache) > 0:
self.db.execute_query(sql, *cache)
def _gen_line(self):
res = []
for name in self.column_metas:
meta = self.column_metas[name]
res.append(self._gen_element(meta))
return res
def _gen_element(self, meta):
if 'value' in meta:
return meta['value']
tp = meta['type']
if tp == 'str':
pass
| 28.532258
| 84
| 0.610514
|
9c74f8eb6bceda188dd46722c8d72a8d13992dff
| 2,005
|
py
|
Python
|
pysyte/cli/streams.py
|
git-wwts/pysyte
|
625658138cdb5affc1a6a89a9f2c7e3667ee80c2
|
[
"MIT"
] | 1
|
2021-11-10T15:24:36.000Z
|
2021-11-10T15:24:36.000Z
|
pysyte/cli/streams.py
|
git-wwts/pysyte
|
625658138cdb5affc1a6a89a9f2c7e3667ee80c2
|
[
"MIT"
] | 12
|
2020-01-15T00:19:41.000Z
|
2021-05-11T14:52:04.000Z
|
pysyte/cli/streams.py
|
git-wwts/pysyte
|
625658138cdb5affc1a6a89a9f2c7e3667ee80c2
|
[
"MIT"
] | 2
|
2015-01-31T11:51:06.000Z
|
2015-01-31T21:29:19.000Z
|
"""Module to handle streams of text from cli arguments"""
import os
import sys
from six import StringIO
from pysyte import iteration
from pysyte.cli import arguments
from pysyte.oss.platforms import get_clipboard_data
def parse_args():
"""Parse out command line arguments"""
parser = arguments.parser(__doc__)
parser.args("streams", help="streams to use")
parser.opt("-p", "--paste", "paste text from clipboard")
parser.opt("-i", "--stdin", "wait for text from stdin")
return parser.parse_args()
def args(parsed_args, name=None, files_only=False):
"""Interpret parsed args to streams"""
strings = parsed_args.get_strings(name)
files = [s for s in strings if os.path.isfile(s)]
if files:
streams = [open(f) for f in files]
elif files_only:
return []
else:
streams = []
if "-" in files or not files or getattr(parsed_args, "stdin", False):
streams.append(sys.stdin)
if getattr(parsed_args, "paste", not files):
streams.append(clipboard_stream())
return streams
def files(parsed_args, name=None):
return args(parsed_args, name, True)
def all():
yielded = False
for path in _arg_files():
with open(path) as stream:
yield stream
yielded = True
if not yielded or "-" in sys.argv:
yield sys.stdin
def some():
if sys.argv[1:]:
assert _arg_files()
return any()
def clipboard_stream(name=None):
stream = StringIO(get_clipboard_data())
stream.name = name or "<clipboard>"
return stream
def _arg_files():
return [a for a in sys.argv[1:] if os.path.isfile(a)]
def _arg_streams():
"""yield streams to all arg.isfile()"""
for path in _arg_files():
with open(path) as stream:
yield stream
def _any():
try:
stream = iteration.first(_arg_streams())
if stream:
return _arg_streams()
except ValueError:
return iter([clipboard_stream(), sys.stdin])
| 24.156627
| 73
| 0.640898
|
e4e24a9e93ed4fec68be1e2bef39f17a070e0095
| 9,536
|
py
|
Python
|
Overlap_Case/py_analysis/plot_microenvs.py
|
furkankurtoglu/whole-well
|
cbafce776256f1a78ca52141a365f4690601c339
|
[
"BSD-3-Clause"
] | null | null | null |
Overlap_Case/py_analysis/plot_microenvs.py
|
furkankurtoglu/whole-well
|
cbafce776256f1a78ca52141a365f4690601c339
|
[
"BSD-3-Clause"
] | null | null | null |
Overlap_Case/py_analysis/plot_microenvs.py
|
furkankurtoglu/whole-well
|
cbafce776256f1a78ca52141a365f4690601c339
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 27 14:41:55 2021
@author: Furkan
"""
import importlib.machinery
pyMCDS = importlib.machinery.SourceFileLoader('pyMCDS','./analysis/pyMCDS.py').load_module()
import os.path
from os import path
from pathlib import Path
import matplotlib.pyplot as plt
import matplotlib.animation
import numpy as np
import pandas as pd
#from fury import window, actor, utils, primitive, io, ui
#from fury.data import read_viz_textures, fetch_viz_textures
import itertools
# import vtk
import glob
import time
import random
import scipy.io as sio
import xml.etree.ElementTree as ET
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
saving_times = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 20.0])
main_path = Path(os.getcwd()).parent
out_path = os.path.join(main_path, "output")
os.chdir(out_path)
time_point = "output000000"
number_of_frames = len(saving_times)
Temporospatial_Plotting = 'N'
Total_Amount_Analysis = 'Y'
if Temporospatial_Plotting == 'Y':
def data_parser (time_point):
# Fine MicroEnv Data Parsing
fine_tuple = []
coarse_tuple = []
transfer_tuple = []
if path.exists(time_point + "_microenvironment0.mat"):
fine_data = sio.loadmat(time_point + "_microenvironment0.mat")['multiscale_microenvironment']
fine_x = np.unique(fine_data[0,:])
fine_y = np.unique(fine_data[1,:])
fine_X, fine_Y = np.meshgrid(fine_x, fine_y)
fine_oxy = fine_data[4,np.where(fine_data[2,:] == 16)]
fine_oxy = fine_oxy.reshape((len(fine_y),len(fine_x)))
fine_glu = fine_data[5,np.where(fine_data[2,:] == 16)]
fine_glu = fine_glu.reshape((len(fine_y),len(fine_x)))
fine_chem = fine_data[6,np.where(fine_data[2,:] == 16)]
fine_chem = fine_chem.reshape((len(fine_y),len(fine_x)))
fine_oxy_tuple = (fine_X, fine_Y, fine_oxy)
fine_glu_tuple = (fine_X, fine_Y, fine_glu)
fine_chem_tuple = (fine_X, fine_Y, fine_chem)
fine_tuple = (fine_oxy_tuple, fine_glu_tuple, fine_chem_tuple)
# Coarse MicroEnv Data Parsing
if path.exists(time_point + "_microenvironment1.mat"):
coarse_data = sio.loadmat(time_point + "_microenvironment1.mat")['multiscale_microenvironment']
coarse_y = coarse_data[0,:]
coarse_x = np.unique(fine_data[0,:])
coarse_X, coarse_Y = np.meshgrid(coarse_x, coarse_y)
coarse_oxy = coarse_data[4,:]
coarse_oxy = np.transpose(np.tile(coarse_oxy,(90,1)))
coarse_glu = coarse_data[5,:]
coarse_glu = np.transpose(np.tile(coarse_glu,(90,1)))
coarse_chem = coarse_data[6,:]
coarse_chem = np.transpose(np.tile(coarse_chem,(90,1)))
coarse_tuple = (coarse_X, coarse_Y, coarse_oxy, coarse_glu, coarse_chem)
if path.exists(time_point + "_microenvironment2.mat"):
transfer_region = sio.loadmat(time_point + "_microenvironment2.mat")['multiscale_microenvironment']
return fine_tuple, coarse_tuple, transfer_tuple
def get_subs_name ():
tree = ET.parse("initial.xml")
root = tree.getroot()
subs_names = []
for substrate in root.iter('variable'):
subs_names.append(substrate.attrib['name'])
return subs_names
subs_list = get_subs_name()
fig, axs = plt.subplots()
# color bar
tp = "output00000020"
ft, ct, tt = data_parser(tp)
fine_X, fine_Y, fine_oxy = ft[0]
cX, cY, cOxy, cGlu, cChem = ct
w_X = np.concatenate((fine_X,cX),axis=0)
w_Y = np.concatenate((fine_Y,cY),axis=0)
w_O = np.concatenate((fine_oxy,cOxy),axis=0)
zmin = min([min(zl) for zl in w_O])
zmax = max([max(zl) for zl in w_O])
levels = np.linspace(zmin, 0.28500001,41)
kw = dict(levels=levels, vmin=zmin, vmax=0.28500001, origin='lower')
cp = axs.contourf(w_Y,w_X,w_O, **kw)
cbar = plt.colorbar(cp,format='%0.4f')
axs.clear()
def animate(i):
time_p= time_point + '%02d'%(i)
ft, ct, tt = data_parser(time_p)
fine_X, fine_Y, fine_oxy = ft[0]
cX, cY, cOxy, cGlu, cChem = ct
w_X = np.concatenate((fine_X,cX),axis=0)
w_Y = np.concatenate((fine_Y,cY),axis=0)
w_O = np.concatenate((fine_oxy,cOxy),axis=0)
axs.clear()
axs.contourf(w_Y,w_X,w_O, **kw)
axs.set_title('Oxygen, Z=16 um, time = ' +str(saving_times[i])+ ' minutes')
axs.invert_xaxis()
axs.axis('scaled')
number_of_frames = len(saving_times)
ani = matplotlib.animation.FuncAnimation(fig,animate,blit=False, frames=number_of_frames,repeat=False)
plt.show()
ani.save('./oxygen.gif', writer='imagemagick', fps=4)
fig2, ax = plt.subplots()
# color bar
tp = "output00000020"
ft, ct, tt = data_parser(tp)
fine_X, fine_Y, fine_glu = ft[1]
cX, cY, cOxy, cGlu, cChem = ct
w_X = np.concatenate((fine_X,cX),axis=0)
w_Y = np.concatenate((fine_Y,cY),axis=0)
w_G = np.concatenate((fine_glu,cGlu),axis=0)
zmin2 = min([min(zl) for zl in w_G])
zmax2 = max([max(zl) for zl in w_G])
levels2 = np.linspace(zmin2, 16.897255)
kw2 = dict(levels=levels2, vmin=zmin2, vmax=16.897255, origin='lower')
cp2 = ax.contourf(w_X,w_Y,w_G, **kw2)
cbar2 = plt.colorbar(cp2,format='%0.2f')
ax.clear()
def animate2(i):
time_p= time_point + '%02d'%(i)
ft, ct, tt = data_parser(time_p)
fine_X, fine_Y, fine_glu = ft[1]
cX, cY, cOxy, cGlu, cChem = ct
w_X = np.concatenate((fine_X,cX),axis=0)
w_Y = np.concatenate((fine_Y,cY),axis=0)
w_G = np.concatenate((fine_glu,cGlu),axis=0)
ax.clear()
ax.contourf(w_Y,w_X,w_G, **kw2)
ax.set_title('Glucose, Z=16 um, time = ' +str(saving_times[i])+ ' minutes')
ax.invert_xaxis()
ax.axis('scaled')
ani2 = matplotlib.animation.FuncAnimation(fig2,animate2,blit=False, frames=number_of_frames,repeat=False)
plt.show()
ani2.save('./glucose.gif', writer='imagemagick', fps=4)
fig3, ax3 = plt.subplots()
# color bar
tp = "output00000020"
ft, ct, tt = data_parser(tp)
fine_X, fine_Y, fine_chem = ft[2]
cX, cY, cOxy, cGlu, cChem = ct
w_X = np.concatenate((fine_X,cX),axis=0)
w_Y = np.concatenate((fine_Y,cY),axis=0)
w_C = np.concatenate((fine_chem,cChem),axis=0)
zmin3 = min([min(zl) for zl in w_C])
zmax3 = max([max(zl) for zl in w_C])
levels3 = np.linspace(0, zmax3)
kw3 = dict(levels=levels3, vmin=0, vmax=zmax3, origin='lower')
cp3 = ax3.contourf(w_X,w_Y,w_C, **kw3)
cbar3 = plt.colorbar(cp3,format='%0.5f')
ax3.clear()
def animate3(i):
time_p= time_point + '%02d'%(i)
ft, ct, tt = data_parser(time_p)
fine_X, fine_Y, fine_chem = ft[2]
cX, cY, cOxy, cGlu, cChem = ct
w_X = np.concatenate((fine_X,cX),axis=0)
w_Y = np.concatenate((fine_Y,cY),axis=0)
w_C = np.concatenate((fine_chem,cChem),axis=0)
ax3.clear()
ax3.contourf(w_Y,w_X,w_C, **kw3)
ax3.set_title('Chemokine, Z=16 um, time = ' +str(saving_times[i])+ ' minutes')
ax3.invert_xaxis()
ax3.axis('scaled')
ani3 = matplotlib.animation.FuncAnimation(fig3,animate3,blit=False, frames=number_of_frames,repeat=False)
plt.show()
ani3.save('./chemokine.gif', writer='imagemagick', fps=4)
#%%
if Total_Amount_Analysis == 'Y':
o2_uptake_rate_per_cell = 0.005
glu_uptake_rate_per_cell = 0.01
chem_secretion_rate_per_cell_per_min = 0.01
number_of_cells = 170278
total_O2 = []
total_glu = []
total_chem = []
initial_O2= 0;
previous_data = np.array([0,0,0])
previous_time = 0;
for i in range(number_of_frames):
time_p = time_point + '%02d'%(i)
if path.exists(time_p + "_microenvironment0.mat"):
fine_data = sio.loadmat(time_p + "_microenvironment0.mat")['multiscale_microenvironment']
dx = fine_data[0,1]-fine_data[0,0]
micEnv_O2 =sum(fine_data[4,:])
micEnv_glu = sum(fine_data[5,:])
micEnv_chem = sum(fine_data[6,:])
if i == 0:
initial_O2 = micEnv_O2
initial_glu = micEnv_glu
initial_chem = micEnv_chem
uptaken_O2 = o2_uptake_rate_per_cell*dx*dx*dx * number_of_cells * saving_times[i]
uptaken_glu = glu_uptake_rate_per_cell*dx*dx*dx * number_of_cells * saving_times[i]
total_O2.append(micEnv_O2)
total_glu.append(micEnv_glu)
total_chem.append(micEnv_chem)
plt.figure()
plt.plot(saving_times, total_O2)
plt.title('Oxygen')
plt.xlabel('time(min)')
plt.ylabel('Concentration(mM)')
plt.figure()
plt.plot(saving_times, total_glu)
plt.title('Glucose')
plt.xlabel('time(min)')
plt.ylabel('Concentration(mM)')
plt.figure()
plt.plot(saving_times, total_chem)
plt.title('Chemokine')
plt.xlabel('time(min)')
plt.ylabel('Concentration(mM)')
| 31.681063
| 132
| 0.604761
|
6bcbe98eeec1d5c4bbab52dd539e3877dde5367c
| 3,309
|
py
|
Python
|
ros_radar_mine/neuro_learning/controller/evol_main/evol_main_ANN.py
|
tudelft/blimp_snn
|
23acbef8822337387aee196a3a10854e82bb4f80
|
[
"Apache-2.0"
] | 3
|
2021-11-08T20:20:21.000Z
|
2021-12-29T09:05:37.000Z
|
ros_radar_mine/neuro_learning/controller/evol_main/evol_main_ANN.py
|
tudelft/blimp_snn
|
23acbef8822337387aee196a3a10854e82bb4f80
|
[
"Apache-2.0"
] | null | null | null |
ros_radar_mine/neuro_learning/controller/evol_main/evol_main_ANN.py
|
tudelft/blimp_snn
|
23acbef8822337387aee196a3a10854e82bb4f80
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 30 17:41:45 2021
@author: marina
"""
# Set absolute package path
import sys, os
sys.path.append(os.path.abspath(".."))
import torch
import array
import numpy as np
import random
import copy
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from evol_algo.evol_algo_ANN import myeaSimple, myeaPID
from evol_funcs.evol_funcs_ANN import initializeIndividual, mutation, evaluate, evaluate_ANNyPID
import multiprocessing
#from torch import multiprocessing as mp
import extra.aux_funcs as af # :)
import time
import sys
############################################################
# CONFIG GLOBAL VARIABLE
############################################################
cf = af.set_config('../config/config.yaml')
#cf = af.set_config(curr_path + '/' + str(sys.argv[1]))
#########################################################
# DEAP FRAMEWORK DEFINITIONS
#########################################################
# Basic initializations
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("attr_params", initializeIndividual, cf = cf)
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_params, 1)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# Evaluation and mutation functions
toolbox.register("evaluate", evaluate_ANNyPID)
toolbox.register("mutate", mutation)
# Selection function: modify??
toolbox.register("select", tools.selTournament, tournsize = cf["evol"]["tourn_size"])
"""
toolbox.register("select", tools.selBest)
toolbox.register("select", tools.selNSGA2)
"""
t1 = time.time()
if cf["evol"]["parallel"]:
#os.environ['MKL_THREADING_LAYER'] = 'GNU'
#multiprocessing.set_start_method('spawn', force =True)
CPU_count = multiprocessing.cpu_count()#-1
pool = multiprocessing.Pool(CPU_count)
toolbox.register("map", pool.starmap)
pop = toolbox.population(n=cf["evol"]["n_pop"])
hof = tools.HallOfFame(cf["evol"]["n_hof"])
fit_stats = tools.Statistics(lambda ind: ind.fitness.values)
fit_stats.register("avg", np.mean)
fit_stats.register("std", np.std)
fit_stats.register("min", np.min)
fit_stats.register("max", np.max)
# Create directory to save networks
mainDir = cf["evol"]["save_name"]
# myeaSimple, myeaPID, myeaJPaper, myeaMuPlusLambda
pop, hof, log = myeaSimple(pop, toolbox, mutpb=cf["evol"]["p_mut_ind"], ngen=cf["evol"]["n_gen"],
mainDir = mainDir, cf = cf, stats=fit_stats, halloffame=hof, verbose=True)
#pop, hof, log = myeaMuPlusLambda(pop, toolbox, mu=cf["evol"]["n_pop"], lambda_=cf["evol"]["n_pop"]*2, mutpb=cf["evol"]["p_mut_ind"], ngen=cf["evol"]["n_gen"], mainDir = mainDir, stats=fit_stats, halloffame=hof, verbose=True)
if cf["evol"]["save"]:
df = pd.DataFrame(log)
df.to_csv(mainDir + '_log.csv')
t2 = time.time()
if cf["evol"]["parallel"]:
pool.close()
pool.join()
print("Multiprocessing with",CPU_count,"core(s) took",round((t2-t1),2),"s")
else:
print("Non-parallel processing took",round((t2-t1),2),"s")
| 32.126214
| 225
| 0.668782
|
0e39ee85c4f49a03591b8e56bfbe3a2012baf902
| 5,193
|
py
|
Python
|
ojos_ca/domain/value_object/binary/core.py
|
ojos/python-ca
|
dba9e9c61fd997c8c2ed60a6bd6f076c5f216265
|
[
"MIT"
] | null | null | null |
ojos_ca/domain/value_object/binary/core.py
|
ojos/python-ca
|
dba9e9c61fd997c8c2ed60a6bd6f076c5f216265
|
[
"MIT"
] | null | null | null |
ojos_ca/domain/value_object/binary/core.py
|
ojos/python-ca
|
dba9e9c61fd997c8c2ed60a6bd6f076c5f216265
|
[
"MIT"
] | null | null | null |
import base64
import re
from typing import Any
from ojos_ca.domain.value_object.core import IsInstance
class Binary(IsInstance):
CLASS_INFO = bytes
@property
def base64(self) -> str:
return base64.b64encode(self.value).decode()
def pre_set(self, value: Any):
if value is None:
return value
if isinstance(value, str):
return base64.b64decode(value.encode())
else:
return value
class BinaryFile(Binary):
@property
def mime_type(self):
return self._mime_type
@property
def extension(self):
return self._extension
class Image(BinaryFile):
def _is_jpg(self, value: bytes) -> bool:
if bool(re.match(br"^\xff\xd8", value[:2])):
self._mime_type = 'image/jpeg'
self._extension = 'jpg'
return True
return False
def _is_png(self, value: bytes) -> bool:
if bool(re.match(br"^\x89\x50\x4e\x47", value[:4])):
self._mime_type = 'image/png'
self._extension = 'png'
return True
return False
def _is_gif(self, value: bytes) -> bool:
if bool(re.match(br"^\x47\x49\x46\x38", value[:4])):
self._mime_type = 'image/gif'
self._extension = 'gif'
return True
return False
def _is_bmp(self, value: bytes) -> bool:
if bool(re.match(br"^\x42\x4d", value[:2])):
self._mime_type = 'image/bmp'
self._extension = 'bmp'
return True
return False
def _is_tiff(self, value: bytes) -> bool:
if bool(re.match(br"^\x00\x2a", value[:2])):
self._mime_type = 'image/tiff'
self._extension = 'tiff'
return True
return False
def conditions(self, value: Any) -> bool:
return self._is_jpg(value) or self._is_png(value) or self._is_gif(value) or\
self._is_bmp(value) or self._is_tiff(value)
class Video(BinaryFile):
def _is_avi(self, value: bytes) -> bool:
if bool(re.match(br"^\x52\x49\x46\x46", value[:4])):
self._mime_type = 'video/x-msvideo'
self._extension = 'avi'
return True
return False
def _is_wmv(self, value: bytes) -> bool:
if bool(re.match(br"^\x30\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C", value[:16])):
self._mime_type = 'video/x-ms-wmv'
self._extension = 'wmv'
return True
return False
def _is_mov(self, value: bytes) -> bool:
if bool(re.match(br"^\x00\x00\x00\x14\x66\x74\x79\x70\x71\x74\x20\x20\x00\x00\x00\x00\x71\x74\x20\x20\x00\x00\x00\x08\x77\x69\x64\x65", value[:28])):
self._mime_type = 'video/quicktime'
self._extension = 'mov'
return True
return False
def _is_mp4(self, value: bytes) -> bool:
if bool(re.match(br"^\x00\x00\x00\x20\x66\x74\x79\x70\x69\x73\x6F\x6D\x00\x00\x02\x00", value[:16])):
self._mime_type = 'video/mp4'
self._extension = 'mp4'
return True
return False
def _is_webm(self, value: bytes) -> bool:
if bool(re.match(br"^\x1a\x45\xdf\xa3", value[:4])):
self._mime_type = 'video/webm'
self._extension = 'webm'
return True
return False
def conditions(self, value: Any) -> bool:
return self._is_avi(value) or self._is_wmv(value) or self._is_mov(value) or\
self._is_mp4(value) or self._is_webm(value)
# class Audio(BinaryFile):
# def _is_midi(self, value: bytes) -> bool:
# if bool(re.match(br"^\x52\x49\x46\x46", value[:4])):
# self._mime_type = 'audio/midi'
# self._extension = 'midi'
# return True
# return False
# def _is_wav(self, value: bytes) -> bool:
# if bool(re.match(br"^\x1a\x45\xdf\xa3", value[:4])):
# self._mime_type = 'audio/x-wav'
# self._extension = 'wav'
# return True
# return False
# def _is_mp3(self, value: bytes) -> bool:
# if bool(re.match(br"^\x30\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C", value[:16])):
# self._mime_type = 'audio/mpeg'
# self._extension = 'mp3'
# return True
# return False
# def _is_mp4(self, value: bytes) -> bool:
# if bool(re.match(br"^\x00\x00\x00\x20\x66\x74\x79\x70\x69\x73\x6F\x6D\x00\x00\x02\x00", value[:16])):
# self._mime_type = 'audio/mp4'
# self._extension = 'mp4'
# return True
# return False
# def _is_aiff(self, value: bytes) -> bool:
# if bool(re.match(br"^\x00\x00\x00\x14\x66\x74\x79\x70\x71\x74\x20\x20\x00\x00\x00\x00\x71\x74\x20\x20\x00\x00\x00\x08\x77\x69\x64\x65", value[:28])):
# self._mime_type = 'audio/x-aiff'
# self._extension = 'aiff'
# return True
# return False
# def conditions(self, value: Any) -> bool:
# return self._is_midi(value) or self._is_wav(value) or self._is_mp3(value) or\
# self._is_mp4(value) or self._is_aiff(value)
| 34.164474
| 159
| 0.569998
|
b2b309dcee406c56131a3698a26a7b4095c8d363
| 2,143
|
py
|
Python
|
tottle/types/objects/chat.py
|
muffleo/tottle
|
69a5bdda879ab56d43505d517d3369a687c135a2
|
[
"MIT"
] | 12
|
2020-09-06T15:31:34.000Z
|
2021-02-27T20:30:34.000Z
|
tottle/types/objects/chat.py
|
cyanlabs-org/tottle
|
6cf02022ed7b445c9b5af475c6e854b91780d792
|
[
"MIT"
] | 2
|
2021-04-13T06:43:42.000Z
|
2021-07-07T20:52:39.000Z
|
tottle/types/objects/chat.py
|
cyanlabs-org/tottle
|
6cf02022ed7b445c9b5af475c6e854b91780d792
|
[
"MIT"
] | 4
|
2020-09-12T03:09:25.000Z
|
2021-03-22T08:52:04.000Z
|
from typing import Optional
from pydantic import BaseModel
from tottle.types.objects.user import User
class ChatPhoto(BaseModel):
small_file_id: Optional[str] = None
small_file_unique_id: Optional[str] = None
big_file_id: Optional[str] = None
big_file_unique_id: Optional[str] = None
class ChatPermissions(BaseModel):
can_send_messages: Optional[bool] = None
can_send_media_messages: Optional[bool] = None
can_send_polls: Optional[bool] = None
can_send_other_messages: Optional[bool] = None
can_add_web_page_previews: Optional[bool] = None
can_change_info: Optional[bool] = None
can_invite_users: Optional[bool] = None
can_pin_messages: Optional[bool] = None
class ChatMember(BaseModel):
user: Optional["User"] = None
status: Optional[str] = None
custom_title: Optional[str] = None
until_date: Optional[int] = None
can_be_edited: Optional[bool] = None
can_post_messages: Optional[bool] = None
can_edit_messages: Optional[bool] = None
can_delete_messages: Optional[bool] = None
can_restrict_members: Optional[bool] = None
can_promote_members: Optional[bool] = None
can_change_info: Optional[bool] = None
can_invite_users: Optional[bool] = None
can_pin_messages: Optional[bool] = None
is_member: Optional[bool] = None
can_send_messages: Optional[bool] = None
can_send_media_messages: Optional[bool] = None
can_send_polls: Optional[bool] = None
can_send_other_messages: Optional[bool] = None
can_add_web_page_previews: Optional[bool] = None
class Chat(BaseModel):
id: Optional[int] = None
type: Optional[str] = None
title: Optional[str] = None
username: Optional[str] = None
first_name: Optional[str] = None
last_name: Optional[str] = None
photo: Optional["ChatPhoto"] = None
description: Optional[str] = None
invite_link: Optional[str] = None
pinned_message: Optional["Message"] = None
permissions: Optional["ChatPermissions"] = None
slow_mode_delay: Optional[int] = None
sticker_set_name: Optional[str] = None
can_set_sticker_set: Optional[bool] = None
| 33.484375
| 52
| 0.722818
|
dc0b010bf0bb5bb0495b49ea59ff4f9704fcdbeb
| 2,083
|
py
|
Python
|
experiments/state_distance/iclr2018/ant_distance_3_to_5.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
experiments/state_distance/iclr2018/ant_distance_3_to_5.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
experiments/state_distance/iclr2018/ant_distance_3_to_5.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
from rlkit.misc.data_processing import Experiment
import matplotlib.pyplot as plt
import numpy as np
from rlkit.misc.visualization_util import sliding_mean
def main():
tdm_trials = Experiment(
"/home/vitchyr/git/railrl/data/doodads3/01-02-ddpg-tdm-ant-nupo-sweep/",
criteria={
'exp_id': '27', # 23 for NUPO = 20, 27 for NUPO = 10
}
).get_trials()
mb_trials = Experiment(
"/home/vitchyr/git/railrl/data/doodads3/01-02-ant-distance-3-to-5/",
criteria={
'exp_id': '0',
'algorithm': 'Model-Based-Dagger',
}
).get_trials()
ddpg_trials = Experiment(
"/home/vitchyr/git/railrl/data/doodads3/01-02-ant-distance-3-to-5/",
criteria={
'exp_id': '3',
'algorithm': 'DDPG',
}
).get_trials()
MAX_ITERS = 1000000
plt.figure()
base_key = 'Final Distance to goal Mean'
for trials, name, key in [
(tdm_trials, 'TDMs', base_key),
(ddpg_trials, 'DDPG', base_key),
(mb_trials, 'Model-Based', base_key),
]:
key = key.replace(" ", "_")
all_values = []
for trial in trials:
try:
values_ts = trial.data[key]
except:
import ipdb; ipdb.set_trace()
values_ts = sliding_mean(values_ts, window=10)
all_values.append(values_ts)
min_len = min(map(len, all_values))
costs = np.vstack([
values[:min_len]
for values in all_values
])
costs = costs[:, :min(costs.shape[1], MAX_ITERS)]
mean = np.mean(costs, axis=0)
std = np.std(costs, axis=0)
epochs = np.arange(0, len(costs[0]))
plt.fill_between(epochs, mean - std, mean + std, alpha=0.1)
plt.plot(epochs, mean, label=name)
plt.xlabel("Environment Samples (x1,000)")
plt.ylabel("Final Euclidean Distance to Goal Position")
plt.legend()
plt.savefig('results/iclr2018/ant-distance-3-to-5.jpg')
plt.show()
if __name__ == '__main__':
main()
| 30.188406
| 80
| 0.571771
|
2f00fe0d16c59791bfebd29338977740406a12ed
| 1,488
|
py
|
Python
|
test.py
|
ahwhbc/LookIntoPerson
|
448cb265b4a834c678cbcabe9c7f5c2a68de20dc
|
[
"MIT"
] | 79
|
2018-06-27T07:58:51.000Z
|
2022-03-18T08:55:50.000Z
|
test.py
|
ahwhbc/LookIntoPerson
|
448cb265b4a834c678cbcabe9c7f5c2a68de20dc
|
[
"MIT"
] | 6
|
2018-08-07T14:35:31.000Z
|
2022-03-26T02:21:32.000Z
|
test.py
|
ahwhbc/LookIntoPerson
|
448cb265b4a834c678cbcabe9c7f5c2a68de20dc
|
[
"MIT"
] | 19
|
2018-07-18T08:36:58.000Z
|
2021-03-05T03:20:18.000Z
|
# import the necessary packages
import argparse
import os
import cv2 as cv
import keras.backend as K
import numpy as np
from config import num_classes
from data_generator import random_choice, safe_crop, to_bgr
from model import build_model
if __name__ == '__main__':
# Parse arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help="path to the image to be processed")
args = vars(ap.parse_args())
filename = args["image"]
img_rows, img_cols = 320, 320
channel = 3
model_weights_path = 'models/model.54-2.2507.hdf5'
model = build_model()
model.load_weights(model_weights_path)
print(model.summary())
image = cv.imread(filename)
image = cv.resize(image, (img_rows, img_cols), cv.INTER_CUBIC)
image_size = image.shape[:2]
x, y = random_choice(image_size)
image = safe_crop(image, x, y)
print('Start processing image: {}'.format(filename))
x_test = np.empty((1, img_rows, img_cols, 3), dtype=np.float32)
x_test[0, :, :, 0:3] = image / 255.
out = model.predict(x_test)
out = np.reshape(out, (img_rows, img_cols, num_classes))
out = np.argmax(out, axis=2)
out = to_bgr(out)
ret = image * 0.6 + out * 0.4
ret = ret.astype(np.uint8)
if not os.path.exists('images'):
os.makedirs('images')
cv.imwrite('images/test_image.png', image)
cv.imwrite('images/test_merged.png', ret)
cv.imwrite('images/test_out.png', out)
K.clear_session()
| 26.571429
| 78
| 0.668683
|
e99e63e70be655cf48b17ba6f3eaea700c5ae0c0
| 279
|
py
|
Python
|
dynamic_progamme/__init__.py
|
tianyuningmou/Algorithm
|
30b52da686cf037133488de0068a2d8739a210e2
|
[
"MIT"
] | 1
|
2018-04-11T14:40:33.000Z
|
2018-04-11T14:40:33.000Z
|
dynamic_progamme/__init__.py
|
tianyuningmou/Algorithm
|
30b52da686cf037133488de0068a2d8739a210e2
|
[
"MIT"
] | null | null | null |
dynamic_progamme/__init__.py
|
tianyuningmou/Algorithm
|
30b52da686cf037133488de0068a2d8739a210e2
|
[
"MIT"
] | 1
|
2018-03-21T14:01:59.000Z
|
2018-03-21T14:01:59.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright () 2018
All rights reserved
FILE: __init__.py.py
AUTHOR: tianyuningmou
DATE CREATED: @Time : 2018/3/12 上午11:54
DESCRIPTION: .
VERSION: : #1
CHANGED By: : tianyuningmou
CHANGE: :
MODIFIED: : @Time : 2018/3/12 上午11:54
"""
# 动态规划的问题
| 13.285714
| 40
| 0.65233
|
bf538f7c7880d7a8d2c56c3951262c1aa4a67ded
| 46,203
|
py
|
Python
|
src/prefect/client/client.py
|
louisditzel/prefect
|
b1a02fee623b965e756a38aa09059db780ab67eb
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/prefect/client/client.py
|
louisditzel/prefect
|
b1a02fee623b965e756a38aa09059db780ab67eb
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/prefect/client/client.py
|
louisditzel/prefect
|
b1a02fee623b965e756a38aa09059db780ab67eb
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import datetime
import json
import os
import re
import uuid
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union
from urllib.parse import urljoin
import pendulum
import toml
import time
from slugify import slugify
import prefect
from prefect.utilities.exceptions import AuthorizationError, ClientError
from prefect.utilities.graphql import (
EnumValue,
GraphQLResult,
compress,
parse_graphql,
with_args,
)
from prefect.utilities.logging import create_diagnostic_logger
if TYPE_CHECKING:
from prefect.core import Flow
import requests
JSONLike = Union[bool, dict, list, str, int, float, None]
# type definitions for GraphQL results
TaskRunInfoResult = NamedTuple(
"TaskRunInfoResult",
[
("id", str),
("task_id", str),
("task_slug", str),
("version", int),
("state", "prefect.engine.state.State"),
],
)
FlowRunInfoResult = NamedTuple(
"FlowRunInfoResult",
[
("id", str),
("name", str),
("flow_id", str),
("parameters", Dict[str, Any]),
("context", Dict[str, Any]),
("version", int),
("scheduled_start_time", datetime.datetime),
("state", "prefect.engine.state.State"),
("task_runs", List[TaskRunInfoResult]),
],
)
class Client:
"""
Client for communication with Prefect Cloud
If the arguments aren't specified the client initialization first checks the prefect
configuration and if the server is not set there it checks the current context. The
token will only be present in the current context.
Args:
- api_server (str, optional): the URL to send all GraphQL requests
to; if not provided, will be pulled from `cloud.graphql` config var
- api_token (str, optional): a Prefect Cloud API token, taken from
`config.cloud.auth_token` if not provided. If this token is USER-scoped, it may
be used to log in to any tenant that the user is a member of. In that case,
ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself
will be used as authorization.
"""
def __init__(self, api_server: str = None, api_token: str = None):
self._access_token = None
self._refresh_token = None
self._access_token_expires_at = pendulum.now()
self._active_tenant_id = None
self._attached_headers = {} # type: Dict[str, str]
self.logger = create_diagnostic_logger("Diagnostics")
# store api server
self.api_server = api_server or prefect.context.config.cloud.get("graphql")
# store api token
self._api_token = api_token or prefect.context.config.cloud.get(
"auth_token", None
)
# if no api token was passed, attempt to load state from local storage
if not self._api_token and prefect.config.backend == "cloud":
settings = self._load_local_settings()
self._api_token = settings.get("api_token")
if self._api_token:
self._active_tenant_id = settings.get("active_tenant_id")
if self._active_tenant_id:
try:
self.login_to_tenant(tenant_id=self._active_tenant_id)
except AuthorizationError:
# if an authorization error is raised, then the token is invalid and should
# be cleared
self.logout_from_tenant()
# -------------------------------------------------------------------------
# Utilities
def get(
self,
path: str,
server: str = None,
headers: dict = None,
params: Dict[str, JSONLike] = None,
token: str = None,
) -> dict:
"""
Convenience function for calling the Prefect API with token auth and GET request
Args:
- path (str): the path of the API url. For example, to GET
http://prefect-server/v1/auth/login, path would be 'auth/login'.
- server (str, optional): the server to send the GET request to;
defaults to `self.api_server`
- headers (dict, optional): Headers to pass with the request
- params (dict): GET parameters
- token (str): an auth token. If not supplied, the `client.access_token` is used.
Returns:
- dict: Dictionary representation of the request made
"""
response = self._request(
method="GET",
path=path,
params=params,
server=server,
headers=headers,
token=token,
)
if response.text:
return response.json()
else:
return {}
def post(
self,
path: str,
server: str = None,
headers: dict = None,
params: Dict[str, JSONLike] = None,
token: str = None,
) -> dict:
"""
Convenience function for calling the Prefect API with token auth and POST request
Args:
- path (str): the path of the API url. For example, to POST
http://prefect-server/v1/auth/login, path would be 'auth/login'.
- server (str, optional): the server to send the POST request to;
defaults to `self.api_server`
- headers(dict): headers to pass with the request
- params (dict): POST parameters
- token (str): an auth token. If not supplied, the `client.access_token` is used.
Returns:
- dict: Dictionary representation of the request made
"""
response = self._request(
method="POST",
path=path,
params=params,
server=server,
headers=headers,
token=token,
)
if response.text:
return response.json()
else:
return {}
def graphql(
self,
query: Any,
raise_on_error: bool = True,
headers: Dict[str, str] = None,
variables: Dict[str, JSONLike] = None,
token: str = None,
) -> GraphQLResult:
"""
Convenience function for running queries against the Prefect GraphQL API
Args:
- query (Any): A representation of a graphql query to be executed. It will be
parsed by prefect.utilities.graphql.parse_graphql().
- raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL
returns any `errors`.
- headers (dict): any additional headers that should be passed as part of the
request
- variables (dict): Variables to be filled into a query with the key being
equivalent to the variables that are accepted by the query
- token (str): an auth token. If not supplied, the `client.access_token` is used.
Returns:
- dict: Data returned from the GraphQL query
Raises:
- ClientError if there are errors raised by the GraphQL mutation
"""
result = self.post(
path="",
server=self.api_server,
headers=headers,
params=dict(query=parse_graphql(query), variables=json.dumps(variables)),
token=token,
)
if raise_on_error and "errors" in result:
if "UNAUTHENTICATED" in str(result["errors"]):
raise AuthorizationError(result["errors"])
elif "Malformed Authorization header" in str(result["errors"]):
raise AuthorizationError(result["errors"])
raise ClientError(result["errors"])
else:
return GraphQLResult(result) # type: ignore
def _request(
self,
method: str,
path: str,
params: Dict[str, JSONLike] = None,
server: str = None,
headers: dict = None,
token: str = None,
) -> "requests.models.Response":
"""
Runs any specified request (GET, POST, DELETE) against the server
Args:
- method (str): The type of request to be made (GET, POST, DELETE)
- path (str): Path of the API URL
- params (dict, optional): Parameters used for the request
- server (str, optional): The server to make requests against, base API
server is used if not specified
- headers (dict, optional): Headers to pass with the request
- token (str): an auth token. If not supplied, the `client.access_token` is used.
Returns:
- requests.models.Response: The response returned from the request
Raises:
- ClientError: if the client token is not in the context (due to not being logged in)
- ValueError: if a method is specified outside of the accepted GET, POST, DELETE
- requests.HTTPError: if a status code is returned that is not `200` or `401`
"""
if server is None:
server = self.api_server
assert isinstance(server, str) # mypy assert
if token is None:
token = self.get_auth_token()
# 'import requests' is expensive time-wise, we should do this just-in-time to keep
# the 'import prefect' time low
import requests
url = urljoin(server, path.lstrip("/")).rstrip("/")
params = params or {}
headers = headers or {}
if token:
headers["Authorization"] = "Bearer {}".format(token)
headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__)
if self._attached_headers:
headers.update(self._attached_headers)
session = requests.Session()
retries = requests.packages.urllib3.util.retry.Retry(
total=6,
backoff_factor=1,
status_forcelist=[500, 502, 503, 504],
method_whitelist=["DELETE", "GET", "POST"],
)
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries))
if prefect.context.config.cloud.get("diagnostics") is True:
self.logger.debug(f"Preparing request to {url}")
clean_headers = {
head: re.sub("Bearer .*", "Bearer XXXX", val)
for head, val in headers.items()
}
self.logger.debug(f"Headers: {clean_headers}")
self.logger.debug(f"Request: {params}")
start_time = time.time()
if method == "GET":
response = session.get(url, headers=headers, params=params, timeout=30)
elif method == "POST":
response = session.post(url, headers=headers, json=params, timeout=30)
elif method == "DELETE":
response = session.delete(url, headers=headers, timeout=30)
else:
raise ValueError("Invalid method: {}".format(method))
if prefect.context.config.cloud.get("diagnostics") is True:
end_time = time.time()
self.logger.debug(f"Response: {response.json()}")
self.logger.debug(
f"Request duration: {round(end_time - start_time, 4)} seconds"
)
# Check if request returned a successful status
response.raise_for_status()
return response
def attach_headers(self, headers: dict) -> None:
"""
Set headers to be attached to this Client
Args:
- headers (dict): A dictionary of headers to attach to this client. These headers
get added on to the existing dictionary of headers.
"""
self._attached_headers.update(headers)
# -------------------------------------------------------------------------
# Auth
# -------------------------------------------------------------------------
@property
def _local_settings_path(self) -> Path:
"""
Returns the local settings directory corresponding to the current API servers
"""
path = "{home}/client/{server}".format(
home=prefect.context.config.home_dir,
server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"),
)
return Path(os.path.expanduser(path)) / "settings.toml"
def _save_local_settings(self, settings: dict) -> None:
"""
Writes settings to local storage
"""
self._local_settings_path.parent.mkdir(exist_ok=True, parents=True)
with self._local_settings_path.open("w+") as f:
toml.dump(settings, f)
def _load_local_settings(self) -> dict:
"""
Loads settings from local storage
"""
if self._local_settings_path.exists():
with self._local_settings_path.open("r") as f:
return toml.load(f) # type: ignore
return {}
def save_api_token(self) -> None:
"""
Saves the API token in local storage.
"""
settings = self._load_local_settings()
settings["api_token"] = self._api_token
self._save_local_settings(settings)
def get_auth_token(self) -> str:
"""
Returns an auth token:
- if no explicit access token is stored, returns the api token
- if there is an access token:
- if there's a refresh token and the access token expires in the next 30 seconds,
then we refresh the access token and store the result
- return the access token
Returns:
- str: the access token
"""
if not self._access_token:
return self._api_token
expiration = self._access_token_expires_at or pendulum.now()
if self._refresh_token and pendulum.now().add(seconds=30) > expiration:
self._refresh_access_token()
return self._access_token
def get_available_tenants(self) -> List[Dict]:
"""
Returns a list of available tenants.
NOTE: this should only be called by users who have provided a USER-scoped API token.
Returns:
- List[Dict]: a list of dictionaries containing the id, slug, and name of
available tenants
"""
result = self.graphql(
{"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}},
# use the API token to see all available tenants
token=self._api_token,
) # type: ignore
return result.data.tenant # type: ignore
def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool:
"""
Log in to a specific tenant
NOTE: this should only be called by users who have provided a USER-scoped API token.
Args:
- tenant_slug (str): the tenant's slug
- tenant_id (str): the tenant's id
Returns:
- bool: True if the login was successful
Raises:
- ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided
- ValueError: if the `tenant_id` is not a valid UUID
- ValueError: if no matching tenants are found
"""
if tenant_slug is None and tenant_id is None:
raise ValueError(
"At least one of `tenant_slug` or `tenant_id` must be provided."
)
elif tenant_id:
try:
uuid.UUID(tenant_id)
except ValueError:
raise ValueError("The `tenant_id` must be a valid UUID.")
tenant = self.graphql(
{
"query($slug: String, $id: uuid)": {
"tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"}
}
},
variables=dict(slug=tenant_slug, id=tenant_id),
# use the API token to query the tenant
token=self._api_token,
) # type: ignore
if not tenant.data.tenant: # type: ignore
raise ValueError("No matching tenants found.")
tenant_id = tenant.data.tenant[0].id # type: ignore
payload = self.graphql(
{
"mutation($input: switch_tenant_input!)": {
"switch_tenant(input: $input)": {
"access_token",
"expires_at",
"refresh_token",
}
}
},
variables=dict(input=dict(tenant_id=tenant_id)),
# Use the API token to switch tenants
token=self._api_token,
) # type: ignore
self._access_token = payload.data.switch_tenant.access_token # type: ignore
self._access_token_expires_at = pendulum.parse( # type: ignore
payload.data.switch_tenant.expires_at # type: ignore
) # type: ignore
self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore
self._active_tenant_id = tenant_id
# save the tenant setting
settings = self._load_local_settings()
settings["active_tenant_id"] = self._active_tenant_id
self._save_local_settings(settings)
return True
def logout_from_tenant(self) -> None:
self._access_token = None
self._refresh_token = None
self._active_tenant_id = None
# remove the tenant setting
settings = self._load_local_settings()
settings["active_tenant_id"] = None
self._save_local_settings(settings)
def _refresh_access_token(self) -> bool:
"""
Refresh the client's JWT access token.
NOTE: this should only be called by users who have provided a USER-scoped API token.
Returns:
- bool: True if the refresh succeeds
"""
payload = self.graphql(
{
"mutation($input: refresh_token_input!)": {
"refresh_token(input: $input)": {
"access_token",
"expires_at",
"refresh_token",
}
}
},
variables=dict(input=dict(access_token=self._access_token)),
# pass the refresh token as the auth header
token=self._refresh_token,
) # type: ignore
self._access_token = payload.data.refresh_token.access_token # type: ignore
self._access_token_expires_at = pendulum.parse( # type: ignore
payload.data.refresh_token.expires_at # type: ignore
) # type: ignore
self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore
return True
# -------------------------------------------------------------------------
# Actions
# -------------------------------------------------------------------------
def register(
self,
flow: "Flow",
project_name: str = None,
build: bool = True,
set_schedule_active: bool = True,
version_group_id: str = None,
compressed: bool = True,
no_url: bool = False,
) -> str:
"""
Push a new flow to Prefect Cloud
Args:
- flow (Flow): a flow to register
- project_name (str, optional): the project that should contain this flow.
- build (bool, optional): if `True`, the flow's environment is built
prior to serialization; defaults to `True`
- set_schedule_active (bool, optional): if `False`, will set the
schedule to inactive in the database to prevent auto-scheduling runs (if the Flow has a schedule).
Defaults to `True`. This can be changed later.
- version_group_id (str, optional): the UUID version group ID to use for versioning this Flow
in Cloud; if not provided, the version group ID associated with this Flow's project and name
will be used.
- compressed (bool, optional): if `True`, the serialized flow will be; defaults to `True`
compressed
- no_url (bool, optional): if `True`, the stdout from this function will not contain the
URL link to the newly-registered flow in the Cloud UI
Returns:
- str: the ID of the newly-registered flow
Raises:
- ClientError: if the register failed
"""
required_parameters = {p for p in flow.parameters() if p.required}
if flow.schedule is not None and required_parameters:
required_names = {p.name for p in required_parameters}
if not all(
[
required_names <= set(c.parameter_defaults.keys())
for c in flow.schedule.clocks
]
):
raise ClientError(
"Flows with required parameters can not be scheduled automatically."
)
if any(e.key for e in flow.edges) and flow.result_handler is None:
warnings.warn(
"No result handler was specified on your Flow. Cloud features such as input caching and resuming task runs from failure may not work properly.",
UserWarning,
)
if compressed:
create_mutation = {
"mutation($input: create_flow_from_compressed_string_input!)": {
"create_flow_from_compressed_string(input: $input)": {"id"}
}
}
else:
create_mutation = {
"mutation($input: create_flow_input!)": {
"create_flow(input: $input)": {"id"}
}
}
project = None
if prefect.config.backend == "cloud":
if project_name is None:
raise TypeError(
"'project_name' is a required field when registering a flow with Cloud. "
"If you are attempting to register a Flow with a local Prefect server you may need to run `prefect backend server` first."
)
query_project = {
"query": {
with_args("project", {"where": {"name": {"_eq": project_name}}}): {
"id": True
}
}
}
project = self.graphql(query_project).data.project # type: ignore
if not project:
raise ValueError(
'Project {} not found. Run `client.create_project("{}")` to create it.'.format(
project_name, project_name
)
)
serialized_flow = flow.serialize(build=build) # type: Any
# verify that the serialized flow can be deserialized
try:
prefect.serialization.flow.FlowSchema().load(serialized_flow)
except Exception as exc:
raise ValueError(
"Flow could not be deserialized successfully. Error was: {}".format(
repr(exc)
)
)
if compressed:
serialized_flow = compress(serialized_flow)
res = self.graphql(
create_mutation,
variables=dict(
input=dict(
project_id=project[0].id if project else None,
serialized_flow=serialized_flow,
set_schedule_active=set_schedule_active,
version_group_id=version_group_id,
)
),
) # type: Any
flow_id = (
res.data.create_flow_from_compressed_string.id
if compressed
else res.data.create_flow.id
)
if not no_url:
# Generate direct link to Cloud flow
flow_url = self.get_cloud_url("flow", flow_id)
print("Flow: {}".format(flow_url))
return flow_id
def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str:
"""
Convenience method for creating Prefect Cloud URLs for a given subdirectory.
Args:
- subdirectory (str): the subdirectory to use (e.g., `"flow-run"`)
- id (str): the ID of the page
- as_user (bool, optional): whether this query is being made from a USER scoped token;
defaults to `True`. Only used internally for queries made from RUNNERs
Returns:
- str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory and ID
Example:
```python
from prefect import Client
client = Client()
client.get_cloud_url("flow-run", "424242-ca-94611-111-55")
# returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55"
```
"""
# Generate direct link to UI
if prefect.config.backend == "cloud":
tenant_slug = self.get_default_tenant_slug(as_user=as_user)
else:
tenant_slug = ""
base_url = (
re.sub("api-", "", prefect.config.cloud.api)
if re.search("api-", prefect.config.cloud.api)
else re.sub("api", "cloud", prefect.config.cloud.api)
)
full_url = prefect.config.cloud.api
if tenant_slug:
full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id])
elif prefect.config.backend == "server":
full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id,])
return full_url
def get_default_tenant_slug(self, as_user: bool = True) -> str:
"""
Get the default tenant slug for the currently authenticated user
Args:
- as_user (bool, optional): whether this query is being made from a USER scoped token;
defaults to `True`. Only used internally for queries made from RUNNERs
Returns:
- str: the slug of the current default tenant for this user
"""
if as_user:
query = {
"query": {"user": {"default_membership": {"tenant": "slug"}}}
} # type: dict
else:
query = {"query": {"tenant": {"slug"}}}
res = self.graphql(query)
if as_user:
user = res.get("data").user[0]
slug = user.default_membership.tenant.slug
else:
slug = res.get("data").tenant[0].slug
return slug
def create_project(self, project_name: str, project_description: str = None) -> str:
"""
Create a new Project
Args:
- project_name (str): the project that should contain this flow
- project_description (str, optional): the project description
Returns:
- str: the ID of the newly-created project
Raises:
- ClientError: if the project creation failed
"""
project_mutation = {
"mutation($input: create_project_input!)": {
"create_project(input: $input)": {"id"}
}
}
res = self.graphql(
project_mutation,
variables=dict(
input=dict(name=project_name, description=project_description)
),
) # type: Any
return res.data.create_project.id
def create_flow_run(
self,
flow_id: str = None,
context: dict = None,
parameters: dict = None,
scheduled_start_time: datetime.datetime = None,
idempotency_key: str = None,
run_name: str = None,
version_group_id: str = None,
) -> str:
"""
Create a new flow run for the given flow id. If `start_time` is not provided, the flow run will be scheduled to start immediately.
If both `flow_id` and `version_group_id` are provided, only the `flow_id` will be used.
Args:
- flow_id (str, optional): the id of the Flow you wish to schedule
- context (dict, optional): the run context
- parameters (dict, optional): a dictionary of parameter values to pass to the flow run
- scheduled_start_time (datetime, optional): the time to schedule the execution for; if not provided, defaults to now
- idempotency_key (str, optional): an idempotency key; if provided, this run will be cached for 24
hours. Any subsequent attempts to create a run with the same idempotency key
will return the ID of the originally created run (no new run will be created after the first).
An error will be raised if parameters or context are provided and don't match the original.
Each subsequent request will reset the TTL for 24 hours.
- run_name (str, optional): The name assigned to this flow run
- version_group_id (str, optional): if provided, the unique unarchived flow within this version group will be scheduled
to run. This input can be used as a stable API for running flows which are regularly updated.
Returns:
- str: the ID of the newly-created flow run
Raises:
- ClientError: if the GraphQL query is bad for any reason
"""
create_mutation = {
"mutation($input: create_flow_run_input!)": {
"create_flow_run(input: $input)": {"id": True}
}
}
if not flow_id and not version_group_id:
raise ValueError("One of flow_id or version_group_id must be provided")
inputs = (
dict(flow_id=flow_id) if flow_id else dict(version_group_id=version_group_id) # type: ignore
)
if parameters is not None:
inputs.update(parameters=parameters) # type: ignore
if context is not None:
inputs.update(context=context) # type: ignore
if idempotency_key is not None:
inputs.update(idempotency_key=idempotency_key) # type: ignore
if scheduled_start_time is not None:
inputs.update(
scheduled_start_time=scheduled_start_time.isoformat()
) # type: ignore
if run_name is not None:
inputs.update(flow_run_name=run_name) # type: ignore
res = self.graphql(create_mutation, variables=dict(input=inputs))
return res.data.create_flow_run.id # type: ignore
def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult:
"""
Retrieves version and current state information for the given flow run.
Args:
- flow_run_id (str): the id of the flow run to get information for
Returns:
- GraphQLResult: an object representing information about the flow run
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
"""
query = {
"query": {
with_args("flow_run_by_pk", {"id": flow_run_id}): {
"id": True,
"name": True,
"flow_id": True,
"parameters": True,
"context": True,
"version": True,
"scheduled_start_time": True,
"serialized_state": True,
# load all task runs except dynamic task runs
with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): {
"id": True,
"task": {"id": True, "slug": True},
"version": True,
"serialized_state": True,
},
}
}
}
result = self.graphql(query).data.flow_run_by_pk # type: ignore
if result is None:
raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id))
# convert scheduled_start_time from string to datetime
result.scheduled_start_time = pendulum.parse(result.scheduled_start_time)
# create "state" attribute from serialized_state
result.state = prefect.engine.state.State.deserialize(
result.pop("serialized_state")
)
# reformat task_runs
task_runs = []
for tr in result.task_runs:
tr.state = prefect.engine.state.State.deserialize(
tr.pop("serialized_state")
)
task_info = tr.pop("task")
tr.task_id = task_info["id"]
tr.task_slug = task_info["slug"]
task_runs.append(TaskRunInfoResult(**tr))
result.task_runs = task_runs
result.context = (
result.context.to_dict() if result.context is not None else None
)
result.parameters = (
result.parameters.to_dict() if result.parameters is not None else None
)
return FlowRunInfoResult(**result)
def update_flow_run_heartbeat(self, flow_run_id: str) -> None:
"""
Convenience method for heartbeating a flow run.
Does NOT raise an error if the update fails.
Args:
- flow_run_id (str): the flow run ID to heartbeat
"""
mutation = {
"mutation": {
with_args(
"update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}}
): {"success"}
}
}
self.graphql(mutation, raise_on_error=True)
def update_task_run_heartbeat(self, task_run_id: str) -> None:
"""
Convenience method for heartbeating a task run.
Does NOT raise an error if the update fails.
Args:
- task_run_id (str): the task run ID to heartbeat
"""
mutation = {
"mutation": {
with_args(
"update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}}
): {"success"}
}
}
self.graphql(mutation, raise_on_error=True)
def set_flow_run_state(
self, flow_run_id: str, version: int, state: "prefect.engine.state.State"
) -> None:
"""
Sets new state for a flow run in the database.
Args:
- flow_run_id (str): the id of the flow run to set state for
- version (int): the current version of the flow run state
- state (State): the new state for this flow run
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
"""
mutation = {
"mutation($input: set_flow_run_states_input!)": {
"set_flow_run_states(input: $input)": {"states": {"id"}}
}
}
serialized_state = state.serialize()
result = self.graphql(
mutation,
variables=dict(
input=dict(
states=[
dict(
state=serialized_state,
flow_run_id=flow_run_id,
version=version,
)
]
)
),
) # type: Any
def get_latest_cached_states(
self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime
) -> List["prefect.engine.state.State"]:
"""
Pulls all Cached states for the given task that were created after the provided date.
Args:
- task_id (str): the task id for this task run
- cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the task id alone will be used
- created_after (datetime.datetime): the earliest date the state should have been created at
Returns:
- List[State]: a list of Cached states created after the given date
"""
where_clause = {
"where": {
"state": {"_eq": "Cached"},
"_or": [
{
"_and": [
{"cache_key": {"_eq": cache_key}},
{"cache_key": {"_is_null": False}},
]
},
{"task_id": {"_eq": task_id}},
],
"state_timestamp": {"_gte": created_after.isoformat()},
},
"order_by": {"state_timestamp": EnumValue("desc")},
}
query = {"query": {with_args("task_run", where_clause): "serialized_state"}}
result = self.graphql(query) # type: Any
deserializer = prefect.engine.state.State.deserialize
valid_states = [
deserializer(res.serialized_state) for res in result.data.task_run
]
return valid_states
def get_task_run_info(
self, flow_run_id: str, task_id: str, map_index: Optional[int] = None
) -> TaskRunInfoResult:
"""
Retrieves version and current state information for the given task run.
Args:
- flow_run_id (str): the id of the flow run that this task run lives in
- task_id (str): the task id for this task run
- map_index (int, optional): the mapping index for this task run; if
`None`, it is assumed this task is _not_ mapped
Returns:
- NamedTuple: a tuple containing `id, task_id, version, state`
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
"""
mutation = {
"mutation": {
with_args(
"get_or_create_task_run",
{
"input": {
"flow_run_id": flow_run_id,
"task_id": task_id,
"map_index": -1 if map_index is None else map_index,
}
},
): {
"id": True,
}
}
}
result = self.graphql(mutation) # type: Any
if result is None:
raise ClientError("Failed to create task run.")
task_run_id = result.data.get_or_create_task_run.id
query = {
"query": {
with_args("task_run_by_pk", {"id": task_run_id}): {
"version": True,
"serialized_state": True,
"task": {"slug": True},
}
}
}
task_run = self.graphql(query).data.task_run_by_pk # type: ignore
if task_run is None:
raise ClientError('Task run ID not found: "{}"'.format(task_run_id))
state = prefect.engine.state.State.deserialize(task_run.serialized_state)
return TaskRunInfoResult(
id=task_run_id,
task_id=task_id,
task_slug=task_run.task.slug,
version=task_run.version,
state=state,
)
def set_task_run_state(
self,
task_run_id: str,
version: int,
state: "prefect.engine.state.State",
cache_for: datetime.timedelta = None,
) -> "prefect.engine.state.State":
"""
Sets new state for a task run.
Args:
- task_run_id (str): the id of the task run to set state for
- version (int): the current version of the task run state
- state (State): the new state for this task run
- cache_for (timedelta, optional): how long to store the result of this task for, using the
serializer set in config; if not provided, no caching occurs
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
Returns:
- State: the state the current task run should be considered in
"""
mutation = {
"mutation($input: set_task_run_states_input!)": {
"set_task_run_states(input: $input)": {
"states": {"id", "status", "message"}
}
}
}
serialized_state = state.serialize()
result = self.graphql(
mutation,
variables=dict(
input=dict(
states=[
dict(
state=serialized_state,
task_run_id=task_run_id,
version=version,
)
]
)
),
) # type: Any
state_payload = result.data.set_task_run_states.states[0]
if state_payload.status == "QUEUED":
# If appropriate, the state attribute of the Queued state can be
# set by the caller of this method
return prefect.engine.state.Queued(
message=state_payload.get("message"),
start_time=pendulum.now("UTC").add(
seconds=prefect.context.config.cloud.queue_interval
),
)
return state
def set_secret(self, name: str, value: Any) -> None:
"""
Set a secret with the given name and value.
Args:
- name (str): the name of the secret; used for retrieving the secret
during task runs
- value (Any): the value of the secret
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the secret-setting was unsuccessful
"""
mutation = {
"mutation($input: set_secret_input!)": {
"set_secret(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(name=name, value=value))
) # type: Any
if not result.data.set_secret.success:
raise ValueError("Setting secret failed.")
def get_task_tag_limit(self, tag: str) -> Optional[int]:
"""
Retrieve the current task tag concurrency limit for a given tag.
Args:
- tag (str): the tag to update
Raises:
- ClientError: if the GraphQL query fails
"""
query = {
"query": {
with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): {
"limit": True
}
}
}
result = self.graphql(query) # type: Any
if result.data.task_tag_limit:
return result.data.task_tag_limit[0].limit
else:
return None
def update_task_tag_limit(self, tag: str, limit: int) -> None:
"""
Update the task tag concurrency limit for a given tag; requires tenant admin permissions.
Args:
- tag (str): the tag to update
- limit (int): the concurrency limit to enforce on the tag; should be a value >= 0
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided
"""
if limit < 0:
raise ValueError("Concurrency limits must be >= 0")
mutation = {
"mutation($input: update_task_tag_limit_input!)": {
"update_task_tag_limit(input: $input)": {"id"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(tag=tag, limit=limit))
) # type: Any
if not result.data.update_task_tag_limit.id:
raise ValueError("Updating the task tag concurrency limit failed.")
def delete_task_tag_limit(self, limit_id: str) -> None:
"""
Deletes a given task tag concurrency limit; requires tenant admin permissions.
Args:
- limit_id (str): the ID of the tag to delete
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided
"""
mutation = {
"mutation($input: delete_task_tag_limit_input!)": {
"delete_task_tag_limit(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(limit_id=limit_id))
) # type: Any
if not result.data.delete_task_tag_limit.success:
raise ValueError("Deleting the task tag concurrency limit failed.")
def write_run_logs(self, logs: List[Dict]) -> None:
"""
Uploads a collection of logs to Cloud.
Args:
- logs (List[Dict]): a list of log entries to add
Raises:
- ValueError: if uploading the logs fail
"""
mutation = {
"mutation($input: write_run_logs_input!)": {
"write_run_logs(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(logs=logs))
) # type: Any
if not result.data.write_run_logs.success:
raise ValueError("Writing logs failed.")
def register_agent(
self, agent_type: str, name: str = None, labels: List[str] = None
) -> str:
"""
Register an agent with Cloud
Args:
- agent_type (str): The type of agent being registered
- name: (str, optional): The name of the agent being registered
- labels (List[str], optional): A list of any present labels on the agent
being registered
Returns:
- The agent ID as a string
"""
mutation = {
"mutation($input: register_agent_input!)": {
"register_agent(input: $input)": {"id"}
}
}
result = self.graphql(
mutation,
variables=dict(input=dict(type=agent_type, name=name, labels=labels)),
)
if not result.data.register_agent.id:
raise ValueError("Error registering agent")
return result.data.register_agent.id
| 36.180893
| 160
| 0.552064
|
501339b8d673885c749fd1421c34050899a6cefa
| 2,031
|
py
|
Python
|
package/fast_svd.py
|
AyoubBelhadji/random_matrix_factorization
|
44ee6cd01b1d3f5d70d8392b3b7c1ccdb93e2d89
|
[
"MIT"
] | null | null | null |
package/fast_svd.py
|
AyoubBelhadji/random_matrix_factorization
|
44ee6cd01b1d3f5d70d8392b3b7c1ccdb93e2d89
|
[
"MIT"
] | null | null | null |
package/fast_svd.py
|
AyoubBelhadji/random_matrix_factorization
|
44ee6cd01b1d3f5d70d8392b3b7c1ccdb93e2d89
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 12 11:45:41 2017
@author: ayoubbelhadji1
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot
from scipy.stats import chi2
import pylab as mp
### Parameters
N = 1000 # Number of points
d = 2 # Dimension
s_n = 10 # Number of leverage sampling size
s_num_iterations = 1000 # Number of leverage sampling iterations
mean = [0, 0]
cov = [[1, 0], [0, 1]]
### Data Generation
r_X = np.random.multivariate_normal(mean, cov, N).T
r_X_index = list(range(0,N))
### Computing leverage scores
leverage_scores_r_X = np.sum(r_X*r_X, axis=0)/(np.linalg.norm(r_X)**2)
leverage_sampling = np.zeros((s_n,s_num_iterations))
C = np.zeros((d,s_n))
delta_quadratic_norm_sum = 0
delta_matrix = np.zeros((d,d))
for l in range(1,s_num_iterations):
### Sampling according to leverage scores
leverage_sampling[:,l] = np.random.choice(r_X_index, s_n, p=leverage_scores_r_X,replace=False)
sqrt_p_vector = np.divide(np.ones((d,s_n)),np.sqrt(leverage_scores_r_X[np.ndarray.tolist(leverage_sampling[:,l].astype(int))]))
C = (1/np.sqrt(s_n))*(np.multiply(r_X[:,np.ndarray.tolist(leverage_sampling[:,l].astype(int))],sqrt_p_vector))
delta_quadratic_norm_sum = delta_quadratic_norm_sum + (np.linalg.norm(np.dot(C,C.T) - np.dot(r_X,r_X.T)))**2
delta_matrix = delta_matrix + np.dot(C,C.T)-np.dot(r_X,r_X.T)
delta_quadratic_norm_sum = delta_quadratic_norm_sum/s_num_iterations
delta_matrix = delta_matrix/s_num_iterations
norm_sum_bound = (1/s_n)*np.linalg.norm(r_X)**4
print(delta_quadratic_norm_sum/norm_sum_bound)
print(np.linalg.norm(delta_matrix))
## Plots
#matplotlib.pyplot.scatter(C[0,:],C[1,:])
#matplotlib.pyplot.show()
#matplotlib.pyplot.scatter(r_X[leverage_sampling,0],r_X[leverage_sampling,1])
#matplotlib.pyplot.show()
##empirical_cov = np.cov(r_X.T)
##plot_ellipse(r_X,cov=empirical_cov)
##leverage_empirical_cov = np.cov(r_X[leverage_sampling,:].T)
##plot_ellipse(r_X[leverage_sampling,:],cov=leverage_empirical_cov)
| 34.423729
| 131
| 0.742491
|
14aeb5f61da104831d8ff04ebbafa904dbe340af
| 1,709
|
py
|
Python
|
Sprint_Challenge/aq_dashboard.py
|
hughjafro/DS-Unit-3-Sprint-4-Productization-and-Cloud
|
b14b6779b02db7585ec5d9d6e741c7d53b555741
|
[
"MIT"
] | null | null | null |
Sprint_Challenge/aq_dashboard.py
|
hughjafro/DS-Unit-3-Sprint-4-Productization-and-Cloud
|
b14b6779b02db7585ec5d9d6e741c7d53b555741
|
[
"MIT"
] | null | null | null |
Sprint_Challenge/aq_dashboard.py
|
hughjafro/DS-Unit-3-Sprint-4-Productization-and-Cloud
|
b14b6779b02db7585ec5d9d6e741c7d53b555741
|
[
"MIT"
] | null | null | null |
"""OpenAQ Air Quality Dashboard with Flask."""
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
import openaq
"""Create and configure an instance of the flask app"""
APP = Flask(__name__)
api = openaq.OpenAQ()
APP.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
APP.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB = SQLAlchemy(APP)
def la_pm(city='Los Angeles', parameter='pm25'):
status, body = api.measurements(city='Los Angeles', parameter='pm25')
utc_datetime_value = []
for result in body['results']:
date = result['date']['utc']
value = result['value']
utc_datetime_value.append((date, value))
return utc_datetime_value
@APP.route('/')
def root():
"""Base view."""
# utc_datetime_value = la_pm(city, parameter)
value_10 = Record.query.filter(Record.value >= 10).all()
return render_template('base.html', title='Air Quality', value_10=value_10)
class Record(DB.Model):
id = DB.Column(DB.Integer, primary_key=True)
datetime = DB.Column(DB.String(25))
value = DB.Column(DB.Float, nullable=False)
def __repr__(self):
return '<Time {} --- Value {}>'.format(self.datetime, self.value)
@APP.route('/refresh')
def refresh():
"""Pull fresh data from Open AQ and replace existing data."""
DB.drop_all()
DB.create_all()
# TODO Get data from OpenAQ, make Record objects with it, and add to db
utc_datetime_value = la_pm('Los Angeles', 'pm25')
for x in utc_datetime_value:
record = Record(datetime=x[0], value=x[1])
DB.session.add(record)
DB.session.commit()
return render_template('base.html', title='Data refreshed!')
| 31.072727
| 79
| 0.680515
|
e6f277ff8747c830839ca30e23e75460fbb325b0
| 62,042
|
py
|
Python
|
no_rendering_mode.py
|
aleallievi/scenario_runner
|
3744fc15dadf169cfa25e3192fb2257a5ec3b557
|
[
"MIT"
] | 447
|
2021-03-26T09:29:17.000Z
|
2022-03-30T03:03:35.000Z
|
no_rendering_mode.py
|
aleallievi/scenario_runner
|
3744fc15dadf169cfa25e3192fb2257a5ec3b557
|
[
"MIT"
] | 56
|
2021-04-21T03:12:50.000Z
|
2022-03-30T13:34:16.000Z
|
no_rendering_mode.py
|
aleallievi/scenario_runner
|
3744fc15dadf169cfa25e3192fb2257a5ec3b557
|
[
"MIT"
] | 82
|
2021-04-14T04:34:04.000Z
|
2022-03-29T07:35:15.000Z
|
#!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
# Allows visualising a 2D map generated by vehicles.
"""
Welcome to CARLA No-Rendering Mode Visualizer
TAB : toggle hero mode
Mouse Wheel : zoom in / zoom out
Mouse Drag : move map (map mode only)
W : throttle
S : brake
AD : steer
Q : toggle reverse
Space : hand-brake
P : toggle autopilot
M : toggle manual transmission
,/. : gear up/down
F1 : toggle HUD
I : toggle actor ids
H/? : toggle help
ESC : quit
"""
# ==============================================================================
# -- find carla module ---------------------------------------------------------
# ==============================================================================
import glob
import os
import sys
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
# ==============================================================================
# -- imports -------------------------------------------------------------------
# ==============================================================================
import carla
from carla import TrafficLightState as tls
import argparse
import logging
import datetime
import weakref
import math
import random
try:
import pygame
from pygame.locals import KMOD_CTRL
from pygame.locals import KMOD_SHIFT
from pygame.locals import K_COMMA
from pygame.locals import K_DOWN
from pygame.locals import K_ESCAPE
from pygame.locals import K_F1
from pygame.locals import K_LEFT
from pygame.locals import K_PERIOD
from pygame.locals import K_RIGHT
from pygame.locals import K_SLASH
from pygame.locals import K_SPACE
from pygame.locals import K_TAB
from pygame.locals import K_UP
from pygame.locals import K_a
from pygame.locals import K_d
from pygame.locals import K_h
from pygame.locals import K_i
from pygame.locals import K_m
from pygame.locals import K_p
from pygame.locals import K_q
from pygame.locals import K_s
from pygame.locals import K_w
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
# ==============================================================================
# -- Constants -----------------------------------------------------------------
# ==============================================================================
# Colors
# We will use the color palette used in Tango Desktop Project (Each color is indexed depending on brightness level)
# See: https://en.wikipedia.org/wiki/Tango_Desktop_Project
COLOR_BUTTER_0 = pygame.Color(252, 233, 79)
COLOR_BUTTER_1 = pygame.Color(237, 212, 0)
COLOR_BUTTER_2 = pygame.Color(196, 160, 0)
COLOR_ORANGE_0 = pygame.Color(252, 175, 62)
COLOR_ORANGE_1 = pygame.Color(245, 121, 0)
COLOR_ORANGE_2 = pygame.Color(209, 92, 0)
COLOR_CHOCOLATE_0 = pygame.Color(233, 185, 110)
COLOR_CHOCOLATE_1 = pygame.Color(193, 125, 17)
COLOR_CHOCOLATE_2 = pygame.Color(143, 89, 2)
COLOR_CHAMELEON_0 = pygame.Color(138, 226, 52)
COLOR_CHAMELEON_1 = pygame.Color(115, 210, 22)
COLOR_CHAMELEON_2 = pygame.Color(78, 154, 6)
COLOR_SKY_BLUE_0 = pygame.Color(114, 159, 207)
COLOR_SKY_BLUE_1 = pygame.Color(52, 101, 164)
COLOR_SKY_BLUE_2 = pygame.Color(32, 74, 135)
COLOR_PLUM_0 = pygame.Color(173, 127, 168)
COLOR_PLUM_1 = pygame.Color(117, 80, 123)
COLOR_PLUM_2 = pygame.Color(92, 53, 102)
COLOR_SCARLET_RED_0 = pygame.Color(239, 41, 41)
COLOR_SCARLET_RED_1 = pygame.Color(204, 0, 0)
COLOR_SCARLET_RED_2 = pygame.Color(164, 0, 0)
COLOR_ALUMINIUM_0 = pygame.Color(238, 238, 236)
COLOR_ALUMINIUM_1 = pygame.Color(211, 215, 207)
COLOR_ALUMINIUM_2 = pygame.Color(186, 189, 182)
COLOR_ALUMINIUM_3 = pygame.Color(136, 138, 133)
COLOR_ALUMINIUM_4 = pygame.Color(85, 87, 83)
COLOR_ALUMINIUM_4_5 = pygame.Color(66, 62, 64)
COLOR_ALUMINIUM_5 = pygame.Color(46, 52, 54)
COLOR_WHITE = pygame.Color(255, 255, 255)
COLOR_BLACK = pygame.Color(0, 0, 0)
# Module Defines
MODULE_WORLD = 'WORLD'
MODULE_HUD = 'HUD'
MODULE_INPUT = 'INPUT'
PIXELS_PER_METER = 12
MAP_DEFAULT_SCALE = 0.1
HERO_DEFAULT_SCALE = 1.0
PIXELS_AHEAD_VEHICLE = 150
# ==============================================================================
# -- Util -----------------------------------------------------------
# ==============================================================================
def get_actor_display_name(actor, truncate=250):
name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:])
return (name[:truncate - 1] + u'\u2026') if len(name) > truncate else name
class Util(object):
@staticmethod
def blits(destination_surface, source_surfaces, rect=None, blend_mode=0):
for surface in source_surfaces:
destination_surface.blit(surface[0], surface[1], rect, blend_mode)
@staticmethod
def length(v):
return math.sqrt(v.x**2 + v.y**2 + v.z**2)
@staticmethod
def get_bounding_box(actor):
bb = actor.trigger_volume.extent
corners = [carla.Location(x=-bb.x, y=-bb.y),
carla.Location(x=bb.x, y=-bb.y),
carla.Location(x=bb.x, y=bb.y),
carla.Location(x=-bb.x, y=bb.y),
carla.Location(x=-bb.x, y=-bb.y)]
corners = [x + actor.trigger_volume.location for x in corners]
t = actor.get_transform()
t.transform(corners)
return corners
# ==============================================================================
# -- ModuleManager -------------------------------------------------------------
# ==============================================================================
class ModuleManager(object):
def __init__(self):
self.modules = []
def register_module(self, module):
self.modules.append(module)
def clear_modules(self):
del self.modules[:]
def tick(self, clock):
# Update all the modules
for module in self.modules:
module.tick(clock)
def render(self, display):
display.fill(COLOR_ALUMINIUM_4)
for module in self.modules:
module.render(display)
def get_module(self, name):
for module in self.modules:
if module.name == name:
return module
def start_modules(self):
for module in self.modules:
module.start()
# ==============================================================================
# -- FadingText ----------------------------------------------------------------
# ==============================================================================
class FadingText(object):
def __init__(self, font, dim, pos):
self.font = font
self.dim = dim
self.pos = pos
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
def set_text(self, text, color=COLOR_WHITE, seconds=2.0):
text_texture = self.font.render(text, True, color)
self.surface = pygame.Surface(self.dim)
self.seconds_left = seconds
self.surface.fill(COLOR_BLACK)
self.surface.blit(text_texture, (10, 11))
def tick(self, clock):
delta_seconds = 1e-3 * clock.get_time()
self.seconds_left = max(0.0, self.seconds_left - delta_seconds)
self.surface.set_alpha(500.0 * self.seconds_left)
def render(self, display):
display.blit(self.surface, self.pos)
# ==============================================================================
# -- HelpText ------------------------------------------------------------------
# ==============================================================================
class HelpText(object):
def __init__(self, font, width, height):
lines = __doc__.split('\n')
self.font = font
self.dim = (680, len(lines) * 22 + 12)
self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
self.surface.fill(COLOR_BLACK)
for n, line in enumerate(lines):
text_texture = self.font.render(line, True, COLOR_WHITE)
self.surface.blit(text_texture, (22, n * 22))
self._render = False
self.surface.set_alpha(220)
def toggle(self):
self._render = not self._render
def render(self, display):
if self._render:
display.blit(self.surface, self.pos)
# ==============================================================================
# -- ModuleHUD -----------------------------------------------------------------
# ==============================================================================
class ModuleHUD (object):
def __init__(self, name, width, height):
self.name = name
self.dim = (width, height)
self._init_hud_params()
self._init_data_params()
def start(self):
pass
def _init_hud_params(self):
fonts = [x for x in pygame.font.get_fonts() if 'mono' in x]
default_font = 'ubuntumono'
mono = default_font if default_font in fonts else fonts[0]
mono = pygame.font.match_font(mono)
self._font_mono = pygame.font.Font(mono, 14)
self._header_font = pygame.font.SysFont('Arial', 14, True)
self.help = HelpText(pygame.font.Font(mono, 24), *self.dim)
self._notifications = FadingText(
pygame.font.Font(pygame.font.get_default_font(), 20),
(self.dim[0], 40), (0, self.dim[1] - 40))
def _init_data_params(self):
self.show_info = True
self.show_actor_ids = False
self._info_text = {}
def notification(self, text, seconds=2.0):
self._notifications.set_text(text, seconds=seconds)
def tick(self, clock):
self._notifications.tick(clock)
def add_info(self, module_name, info):
self._info_text[module_name] = info
def render_vehicles_ids(self, vehicle_id_surface, list_actors, world_to_pixel, hero_actor, hero_transform):
vehicle_id_surface.fill(COLOR_BLACK)
if self.show_actor_ids:
vehicle_id_surface.set_alpha(150)
for actor in list_actors:
x, y = world_to_pixel(actor[1].location)
angle = 0
if hero_actor is not None:
angle = -hero_transform.rotation.yaw - 90
color = COLOR_SKY_BLUE_0
if int(actor[0].attributes['number_of_wheels']) == 2:
color = COLOR_CHOCOLATE_0
if actor[0].attributes['role_name'] == 'hero':
color = COLOR_CHAMELEON_0
font_surface = self._header_font.render(str(actor[0].id), True, color)
rotated_font_surface = pygame.transform.rotate(font_surface, angle)
rect = rotated_font_surface.get_rect(center=(x, y))
vehicle_id_surface.blit(rotated_font_surface, rect)
return vehicle_id_surface
def render(self, display):
if self.show_info:
info_surface = pygame.Surface((240, self.dim[1]))
info_surface.set_alpha(100)
display.blit(info_surface, (0, 0))
v_offset = 4
bar_h_offset = 100
bar_width = 106
i = 0
for module_name, module_info in self._info_text.items():
if not module_info:
continue
surface = self._header_font.render(module_name, True, COLOR_ALUMINIUM_0).convert_alpha()
display.blit(surface, (8 + bar_width / 2, 18 * i + v_offset))
v_offset += 12
i += 1
for item in module_info:
if v_offset + 18 > self.dim[1]:
break
if isinstance(item, list):
if len(item) > 1:
points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)]
pygame.draw.lines(display, (255, 136, 0), False, points, 2)
item = None
elif isinstance(item, tuple):
if isinstance(item[1], bool):
rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6))
pygame.draw.rect(display, COLOR_ALUMINIUM_0, rect, 0 if item[1] else 1)
else:
rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6))
pygame.draw.rect(display, COLOR_ALUMINIUM_0, rect_border, 1)
f = (item[1] - item[2]) / (item[3] - item[2])
if item[2] < 0.0:
rect = pygame.Rect((bar_h_offset + f * (bar_width - 6), v_offset + 8), (6, 6))
else:
rect = pygame.Rect((bar_h_offset, v_offset + 8), (f * bar_width, 6))
pygame.draw.rect(display, COLOR_ALUMINIUM_0, rect)
item = item[0]
if item: # At this point has to be a str.
surface = self._font_mono.render(item, True, COLOR_ALUMINIUM_0).convert_alpha()
display.blit(surface, (8, 18 * i + v_offset))
v_offset += 18
v_offset += 24
self._notifications.render(display)
self.help.render(display)
# ==============================================================================
# -- TrafficLightSurfaces ------------------------------------------------------
# ==============================================================================
class TrafficLightSurfaces(object):
"""Holds the surfaces (scaled and rotated) for painting traffic lights"""
def __init__(self):
def make_surface(tl):
w = 40
surface = pygame.Surface((w, 3 * w), pygame.SRCALPHA)
surface.fill(COLOR_ALUMINIUM_5 if tl != 'h' else COLOR_ORANGE_2)
if tl != 'h':
hw = int(w / 2)
off = COLOR_ALUMINIUM_4
red = COLOR_SCARLET_RED_0
yellow = COLOR_BUTTER_0
green = COLOR_CHAMELEON_0
pygame.draw.circle(surface, red if tl == tls.Red else off, (hw, hw), int(0.4 * w))
pygame.draw.circle(surface, yellow if tl == tls.Yellow else off, (hw, w + hw), int(0.4 * w))
pygame.draw.circle(surface, green if tl == tls.Green else off, (hw, 2 * w + hw), int(0.4 * w))
return pygame.transform.smoothscale(surface, (15, 45) if tl != 'h' else (19, 49))
self._original_surfaces = {
'h': make_surface('h'),
tls.Red: make_surface(tls.Red),
tls.Yellow: make_surface(tls.Yellow),
tls.Green: make_surface(tls.Green),
tls.Off: make_surface(tls.Off),
tls.Unknown: make_surface(tls.Unknown)
}
self.surfaces = dict(self._original_surfaces)
def rotozoom(self, angle, scale):
for key, surface in self._original_surfaces.items():
self.surfaces[key] = pygame.transform.rotozoom(surface, angle, scale)
# ==============================================================================
# -- World ---------------------------------------------------------------------
# ==============================================================================
class MapImage(object):
def __init__(self, carla_world, carla_map, pixels_per_meter, show_triggers, show_connections, show_spawn_points):
self._pixels_per_meter = pixels_per_meter
self.scale = 1.0
self.show_triggers = show_triggers
self.show_connections = show_connections
self.show_spawn_points = show_spawn_points
waypoints = carla_map.generate_waypoints(2)
margin = 50
max_x = max(waypoints, key=lambda x: x.transform.location.x).transform.location.x + margin
max_y = max(waypoints, key=lambda x: x.transform.location.y).transform.location.y + margin
min_x = min(waypoints, key=lambda x: x.transform.location.x).transform.location.x - margin
min_y = min(waypoints, key=lambda x: x.transform.location.y).transform.location.y - margin
self.width = max(max_x - min_x, max_y - min_y)
self._world_offset = (min_x, min_y)
width_in_pixels = int(self._pixels_per_meter * self.width)
self.big_map_surface = pygame.Surface((width_in_pixels, width_in_pixels)).convert()
self.draw_road_map(self.big_map_surface, carla_world, carla_map, self.world_to_pixel, self.world_to_pixel_width)
self.surface = self.big_map_surface
def draw_road_map(self, map_surface, carla_world, carla_map, world_to_pixel, world_to_pixel_width):
map_surface.fill(COLOR_ALUMINIUM_4)
precision = 0.05
def lane_marking_color_to_tango(lane_marking_color):
tango_color = COLOR_BLACK
if lane_marking_color == carla.LaneMarkingColor.White:
tango_color = COLOR_ALUMINIUM_2
elif lane_marking_color == carla.LaneMarkingColor.Blue:
tango_color = COLOR_SKY_BLUE_0
elif lane_marking_color == carla.LaneMarkingColor.Green:
tango_color = COLOR_CHAMELEON_0
elif lane_marking_color == carla.LaneMarkingColor.Red:
tango_color = COLOR_SCARLET_RED_0
elif lane_marking_color == carla.LaneMarkingColor.Yellow:
tango_color = COLOR_ORANGE_0
return tango_color
def draw_solid_line(surface, color, closed, points, width):
if len(points) >= 2:
pygame.draw.lines(surface, color, closed, points, width)
def draw_broken_line(surface, color, closed, points, width):
broken_lines = [x for n, x in enumerate(zip(*(iter(points),) * 20)) if n % 3 == 0]
for line in broken_lines:
pygame.draw.lines(surface, color, closed, line, width)
def get_lane_markings(lane_marking_type, lane_marking_color, waypoints, sign):
margin = 0.20
if lane_marking_type == carla.LaneMarkingType.Broken or (lane_marking_type == carla.LaneMarkingType.Solid):
marking_1 = [world_to_pixel(lateral_shift(w.transform, sign * w.lane_width * 0.5)) for w in waypoints]
return [(lane_marking_type, lane_marking_color, marking_1)]
elif lane_marking_type == carla.LaneMarkingType.SolidBroken or lane_marking_type == carla.LaneMarkingType.BrokenSolid:
marking_1 = [world_to_pixel(lateral_shift(w.transform, sign * w.lane_width * 0.5)) for w in waypoints]
marking_2 = [world_to_pixel(lateral_shift(w.transform,
sign * (w.lane_width * 0.5 + margin * 2))) for w in waypoints]
return [(carla.LaneMarkingType.Solid, lane_marking_color, marking_1),
(carla.LaneMarkingType.Broken, lane_marking_color, marking_2)]
elif lane_marking_type == carla.LaneMarkingType.BrokenBroken:
marking = [world_to_pixel(lateral_shift(w.transform,
sign * (w.lane_width * 0.5 - margin))) for w in waypoints]
return [(carla.LaneMarkingType.Broken, lane_marking_color, marking)]
elif lane_marking_type == carla.LaneMarkingType.SolidSolid:
marking = [world_to_pixel(lateral_shift(w.transform,
sign * ((w.lane_width * 0.5) - margin))) for w in waypoints]
return [(carla.LaneMarkingType.Solid, lane_marking_color, marking)]
return [(carla.LaneMarkingType.NONE, carla.LaneMarkingColor.Other, [])]
def draw_lane_marking(surface, waypoints, is_left):
sign = -1 if is_left else 1
lane_marking = None
marking_type = carla.LaneMarkingType.NONE
previous_marking_type = carla.LaneMarkingType.NONE
marking_color = carla.LaneMarkingColor.Other
previous_marking_color = carla.LaneMarkingColor.Other
waypoints_list = []
temp_waypoints = []
current_lane_marking = carla.LaneMarkingType.NONE
for sample in waypoints:
lane_marking = sample.left_lane_marking if sign < 0 else sample.right_lane_marking
if lane_marking is None:
continue
marking_type = lane_marking.type
marking_color = lane_marking.color
if current_lane_marking != marking_type:
markings = get_lane_markings(
previous_marking_type,
lane_marking_color_to_tango(previous_marking_color),
temp_waypoints,
sign)
current_lane_marking = marking_type
for marking in markings:
waypoints_list.append(marking)
temp_waypoints = temp_waypoints[-1:]
else:
temp_waypoints.append((sample))
previous_marking_type = marking_type
previous_marking_color = marking_color
# Add last marking
last_markings = get_lane_markings(
previous_marking_type,
lane_marking_color_to_tango(previous_marking_color),
temp_waypoints,
sign)
for marking in last_markings:
waypoints_list.append(marking)
for markings in waypoints_list:
if markings[0] == carla.LaneMarkingType.Solid:
draw_solid_line(surface, markings[1], False, markings[2], 2)
elif markings[0] == carla.LaneMarkingType.Broken:
draw_broken_line(surface, markings[1], False, markings[2], 2)
def draw_arrow(surface, transform, color=COLOR_ALUMINIUM_2):
transform.rotation.yaw += 180
forward = transform.get_forward_vector()
transform.rotation.yaw += 90
right_dir = transform.get_forward_vector()
end = transform.location
start = end - 2.0 * forward
right = start + 0.8 * forward + 0.4 * right_dir
left = start + 0.8 * forward - 0.4 * right_dir
pygame.draw.lines(
surface, color, False, [
world_to_pixel(x) for x in [
start, end]], 4)
pygame.draw.lines(
surface, color, False, [
world_to_pixel(x) for x in [
left, start, right]], 4)
def draw_traffic_signs(surface, font_surface, actor, color=COLOR_ALUMINIUM_2, trigger_color=COLOR_PLUM_0):
transform = actor.get_transform()
waypoint = carla_map.get_waypoint(transform.location)
angle = -waypoint.transform.rotation.yaw - 90.0
font_surface = pygame.transform.rotate(font_surface, angle)
pixel_pos = world_to_pixel(waypoint.transform.location)
offset = font_surface.get_rect(center=(pixel_pos[0], pixel_pos[1]))
surface.blit(font_surface, offset)
# Draw line in front of stop
forward_vector = carla.Location(waypoint.transform.get_forward_vector())
left_vector = carla.Location(-forward_vector.y, forward_vector.x,
forward_vector.z) * waypoint.lane_width / 2 * 0.7
line = [(waypoint.transform.location + (forward_vector * 1.5) + (left_vector)),
(waypoint.transform.location + (forward_vector * 1.5) - (left_vector))]
line_pixel = [world_to_pixel(p) for p in line]
pygame.draw.lines(surface, color, True, line_pixel, 2)
# draw bounding box
if self.show_triggers:
corners = Util.get_bounding_box(actor)
corners = [world_to_pixel(p) for p in corners]
pygame.draw.lines(surface, trigger_color, True, corners, 2)
def lateral_shift(transform, shift):
transform.rotation.yaw += 90
return transform.location + shift * transform.get_forward_vector()
def draw_topology(carla_topology, index):
topology = [x[index] for x in carla_topology]
topology = sorted(topology, key=lambda w: w.transform.location.z)
for waypoint in topology:
# if waypoint.road_id == 150 or waypoint.road_id == 16:
waypoints = [waypoint]
nxt = waypoint.next(precision)
if len(nxt) > 0:
nxt = nxt[0]
while nxt.road_id == waypoint.road_id:
waypoints.append(nxt)
nxt = nxt.next(precision)
if len(nxt) > 0:
nxt = nxt[0]
else:
break
# Draw Road
road_left_side = [lateral_shift(w.transform, -w.lane_width * 0.5) for w in waypoints]
road_right_side = [lateral_shift(w.transform, w.lane_width * 0.5) for w in waypoints]
polygon = road_left_side + [x for x in reversed(road_right_side)]
polygon = [world_to_pixel(x) for x in polygon]
if len(polygon) > 2:
pygame.draw.polygon(map_surface, COLOR_ALUMINIUM_5, polygon, 5)
pygame.draw.polygon(map_surface, COLOR_ALUMINIUM_5, polygon)
# Draw Shoulders and Parkings
PARKING_COLOR = COLOR_ALUMINIUM_4_5
SHOULDER_COLOR = COLOR_ALUMINIUM_5
final_color = SHOULDER_COLOR
# Draw Right
shoulder = []
for w in waypoints:
r = w.get_right_lane()
if r is not None and (
r.lane_type == carla.LaneType.Shoulder or r.lane_type == carla.LaneType.Parking):
if r.lane_type == carla.LaneType.Parking:
final_color = PARKING_COLOR
shoulder.append(r)
shoulder_left_side = [lateral_shift(w.transform, -w.lane_width * 0.5) for w in shoulder]
shoulder_right_side = [lateral_shift(w.transform, w.lane_width * 0.5) for w in shoulder]
polygon = shoulder_left_side + [x for x in reversed(shoulder_right_side)]
polygon = [world_to_pixel(x) for x in polygon]
if len(polygon) > 2:
pygame.draw.polygon(map_surface, final_color, polygon, 5)
pygame.draw.polygon(map_surface, final_color, polygon)
draw_lane_marking(
map_surface,
shoulder,
False)
# Draw Left
shoulder = []
for w in waypoints:
r = w.get_left_lane()
if r is not None and (
r.lane_type == carla.LaneType.Shoulder or r.lane_type == carla.LaneType.Parking):
if r.lane_type == carla.LaneType.Parking:
final_color = PARKING_COLOR
shoulder.append(r)
shoulder_left_side = [lateral_shift(w.transform, -w.lane_width * 0.5) for w in shoulder]
shoulder_right_side = [lateral_shift(w.transform, w.lane_width * 0.5) for w in shoulder]
polygon = shoulder_left_side + [x for x in reversed(shoulder_right_side)]
polygon = [world_to_pixel(x) for x in polygon]
if len(polygon) > 2:
pygame.draw.polygon(map_surface, final_color, polygon, 5)
pygame.draw.polygon(map_surface, final_color, polygon)
draw_lane_marking(
map_surface,
shoulder,
True)
# Draw Lane Markings and Arrows
if not waypoint.is_intersection:
draw_lane_marking(
map_surface,
waypoints,
True)
draw_lane_marking(
map_surface,
waypoints,
False)
for n, wp in enumerate(waypoints):
if ((n + 1) % 400) == 0:
draw_arrow(map_surface, wp.transform)
topology = carla_map.get_topology()
draw_topology(topology, 0)
draw_topology(topology, 1)
if self.show_spawn_points:
for sp in carla_map.get_spawn_points():
draw_arrow(map_surface, sp, color=COLOR_CHOCOLATE_0)
if self.show_connections:
dist = 1.5
to_pixel = lambda wp: world_to_pixel(wp.transform.location)
for wp in carla_map.generate_waypoints(dist):
col = (0, 255, 255) if wp.is_intersection else (0, 255, 0)
for nxt in wp.next(dist):
pygame.draw.line(map_surface, col, to_pixel(wp), to_pixel(nxt), 2)
if wp.lane_change & carla.LaneChange.Right:
r = wp.get_right_lane()
if r and r.lane_type == carla.LaneType.Driving:
pygame.draw.line(map_surface, col, to_pixel(wp), to_pixel(r), 2)
if wp.lane_change & carla.LaneChange.Left:
l = wp.get_left_lane()
if l and l.lane_type == carla.LaneType.Driving:
pygame.draw.line(map_surface, col, to_pixel(wp), to_pixel(l), 2)
actors = carla_world.get_actors()
# Draw Traffic Signs
font_size = world_to_pixel_width(1)
font = pygame.font.SysFont('Arial', font_size, True)
stops = [actor for actor in actors if 'stop' in actor.type_id]
yields = [actor for actor in actors if 'yield' in actor.type_id]
stop_font_surface = font.render("STOP", False, COLOR_ALUMINIUM_2)
stop_font_surface = pygame.transform.scale(
stop_font_surface, (stop_font_surface.get_width(), stop_font_surface.get_height() * 2))
yield_font_surface = font.render("YIELD", False, COLOR_ALUMINIUM_2)
yield_font_surface = pygame.transform.scale(
yield_font_surface, (yield_font_surface.get_width(), yield_font_surface.get_height() * 2))
for ts_stop in stops:
draw_traffic_signs(map_surface, stop_font_surface, ts_stop, trigger_color=COLOR_SCARLET_RED_1)
for ts_yield in yields:
draw_traffic_signs(map_surface, yield_font_surface, ts_yield, trigger_color=COLOR_ORANGE_1)
def world_to_pixel(self, location, offset=(0, 0)):
x = self.scale * self._pixels_per_meter * (location.x - self._world_offset[0])
y = self.scale * self._pixels_per_meter * (location.y - self._world_offset[1])
return [int(x - offset[0]), int(y - offset[1])]
def world_to_pixel_width(self, width):
return int(self.scale * self._pixels_per_meter * width)
def scale_map(self, scale):
if scale != self.scale:
self.scale = scale
width = int(self.big_map_surface.get_width() * self.scale)
self.surface = pygame.transform.smoothscale(self.big_map_surface, (width, width))
class ModuleWorld(object):
def __init__(self, name, args, timeout):
self.client = None
self.name = name
self.args = args
self.timeout = timeout
self.server_fps = 0.0
self.simulation_time = 0
self.server_clock = pygame.time.Clock()
# World data
self.world = None
self.town_map = None
self.actors_with_transforms = []
# Store necessary modules
self.module_hud = None
self.module_input = None
self.surface_size = [0, 0]
self.prev_scaled_size = 0
self.scaled_size = 0
# Hero actor
self.hero_actor = None
self.spawned_hero = None
self.hero_transform = None
self.scale_offset = [0, 0]
self.vehicle_id_surface = None
self.result_surface = None
self.traffic_light_surfaces = TrafficLightSurfaces()
self.affected_traffic_light = None
# Map info
self.map_image = None
self.border_round_surface = None
self.original_surface_size = None
self.hero_surface = None
self.actors_surface = None
def _get_data_from_carla(self):
try:
self.client = carla.Client(self.args.host, self.args.port)
self.client.set_timeout(self.timeout)
if self.args.map is None:
world = self.client.get_world()
else:
world = self.client.load_world(self.args.map)
town_map = world.get_map()
return (world, town_map)
except RuntimeError as ex:
logging.error(ex)
exit_game()
def start(self):
self.world, self.town_map = self._get_data_from_carla()
# Create Surfaces
self.map_image = MapImage(
carla_world=self.world,
carla_map=self.town_map,
pixels_per_meter=PIXELS_PER_METER,
show_triggers=self.args.show_triggers,
show_connections=self.args.show_connections,
show_spawn_points=self.args.show_spawn_points)
# Store necessary modules
self.module_hud = module_manager.get_module(MODULE_HUD)
self.module_input = module_manager.get_module(MODULE_INPUT)
self.original_surface_size = min(self.module_hud.dim[0], self.module_hud.dim[1])
self.surface_size = self.map_image.big_map_surface.get_width()
self.scaled_size = int(self.surface_size)
self.prev_scaled_size = int(self.surface_size)
# Render Actors
self.actors_surface = pygame.Surface((self.map_image.surface.get_width(), self.map_image.surface.get_height()))
self.actors_surface.set_colorkey(COLOR_BLACK)
self.vehicle_id_surface = pygame.Surface((self.surface_size, self.surface_size)).convert()
self.vehicle_id_surface.set_colorkey(COLOR_BLACK)
self.border_round_surface = pygame.Surface(self.module_hud.dim, pygame.SRCALPHA).convert()
self.border_round_surface.set_colorkey(COLOR_WHITE)
self.border_round_surface.fill(COLOR_BLACK)
center_offset = (int(self.module_hud.dim[0] / 2), int(self.module_hud.dim[1] / 2))
pygame.draw.circle(self.border_round_surface, COLOR_ALUMINIUM_1, center_offset, int(self.module_hud.dim[1] / 2))
pygame.draw.circle(self.border_round_surface, COLOR_WHITE, center_offset, int((self.module_hud.dim[1] - 8) / 2))
scaled_original_size = self.original_surface_size * (1.0 / 0.9)
self.hero_surface = pygame.Surface((scaled_original_size, scaled_original_size)).convert()
self.result_surface = pygame.Surface((self.surface_size, self.surface_size)).convert()
self.result_surface.set_colorkey(COLOR_BLACK)
# Start hero mode by default
self.select_hero_actor()
self.hero_actor.set_autopilot(False)
self.module_input.wheel_offset = HERO_DEFAULT_SCALE
self.module_input.control = carla.VehicleControl()
weak_self = weakref.ref(self)
self.world.on_tick(lambda timestamp: ModuleWorld.on_world_tick(weak_self, timestamp))
def select_hero_actor(self):
hero_vehicles = [actor for actor in self.world.get_actors(
) if 'vehicle' in actor.type_id and actor.attributes['role_name'] == 'hero']
if len(hero_vehicles) > 0:
self.hero_actor = random.choice(hero_vehicles)
self.hero_transform = self.hero_actor.get_transform()
else:
self._spawn_hero()
def _spawn_hero(self):
# Get a random blueprint.
blueprint = random.choice(self.world.get_blueprint_library().filter(self.args.filter))
blueprint.set_attribute('role_name', 'hero')
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
# Spawn the player.
while self.hero_actor is None:
spawn_points = self.world.get_map().get_spawn_points()
spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform()
self.hero_actor = self.world.try_spawn_actor(blueprint, spawn_point)
self.hero_transform = self.hero_actor.get_transform()
# Save it in order to destroy it when closing program
self.spawned_hero = self.hero_actor
def tick(self, clock):
actors = self.world.get_actors()
self.actors_with_transforms = [(actor, actor.get_transform()) for actor in actors]
if self.hero_actor is not None:
self.hero_transform = self.hero_actor.get_transform()
self.update_hud_info(clock)
def update_hud_info(self, clock):
hero_mode_text = []
if self.hero_actor is not None:
hero_speed = self.hero_actor.get_velocity()
hero_speed_text = 3.6 * math.sqrt(hero_speed.x ** 2 + hero_speed.y ** 2 + hero_speed.z ** 2)
affected_traffic_light_text = 'None'
if self.affected_traffic_light is not None:
state = self.affected_traffic_light.state
if state == carla.TrafficLightState.Green:
affected_traffic_light_text = 'GREEN'
elif state == carla.TrafficLightState.Yellow:
affected_traffic_light_text = 'YELLOW'
else:
affected_traffic_light_text = 'RED'
affected_speed_limit_text = self.hero_actor.get_speed_limit()
hero_mode_text = [
'Hero Mode: ON',
'Hero ID: %7d' % self.hero_actor.id,
'Hero Vehicle: %14s' % get_actor_display_name(self.hero_actor, truncate=14),
'Hero Speed: %3d km/h' % hero_speed_text,
'Hero Affected by:',
' Traffic Light: %12s' % affected_traffic_light_text,
' Speed Limit: %3d km/h' % affected_speed_limit_text
]
else:
hero_mode_text = ['Hero Mode: OFF']
self.server_fps = self.server_clock.get_fps()
self.server_fps = 'inf' if self.server_fps == float('inf') else round(self.server_fps)
module_info_text = [
'Server: % 16s FPS' % self.server_fps,
'Client: % 16s FPS' % round(clock.get_fps()),
'Simulation Time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)),
'Map Name: %10s' % self.town_map.name,
]
module_info_text = module_info_text
module_hud = module_manager.get_module(MODULE_HUD)
module_hud.add_info(self.name, module_info_text)
module_hud.add_info('HERO', hero_mode_text)
@staticmethod
def on_world_tick(weak_self, timestamp):
self = weak_self()
if not self:
return
self.server_clock.tick()
self.server_fps = self.server_clock.get_fps()
self.simulation_time = timestamp.elapsed_seconds
def _split_actors(self):
vehicles = []
traffic_lights = []
speed_limits = []
walkers = []
for actor_with_transform in self.actors_with_transforms:
actor = actor_with_transform[0]
if 'vehicle' in actor.type_id:
vehicles.append(actor_with_transform)
elif 'traffic_light' in actor.type_id:
traffic_lights.append(actor_with_transform)
elif 'speed_limit' in actor.type_id:
speed_limits.append(actor_with_transform)
elif 'walker' in actor.type_id:
walkers.append(actor_with_transform)
info_text = []
if self.hero_actor is not None and len(vehicles) > 1:
location = self.hero_transform.location
vehicle_list = [x[0] for x in vehicles if x[0].id != self.hero_actor.id]
def distance(v): return location.distance(v.get_location())
for n, vehicle in enumerate(sorted(vehicle_list, key=distance)):
if n > 15:
break
vehicle_type = get_actor_display_name(vehicle, truncate=22)
info_text.append('% 5d %s' % (vehicle.id, vehicle_type))
module_manager.get_module(MODULE_HUD).add_info(
'NEARBY VEHICLES',
info_text)
return (vehicles, traffic_lights, speed_limits, walkers)
def _render_traffic_lights(self, surface, list_tl, world_to_pixel):
self.affected_traffic_light = None
for tl in list_tl:
world_pos = tl.get_location()
pos = world_to_pixel(world_pos)
if self.args.show_triggers:
corners = Util.get_bounding_box(tl)
corners = [world_to_pixel(p) for p in corners]
pygame.draw.lines(surface, COLOR_BUTTER_1, True, corners, 2)
if self.hero_actor is not None:
corners = Util.get_bounding_box(tl)
corners = [world_to_pixel(p) for p in corners]
tl_t = tl.get_transform()
transformed_tv = tl_t.transform(tl.trigger_volume.location)
hero_location = self.hero_actor.get_location()
d = hero_location.distance(transformed_tv)
s = Util.length(tl.trigger_volume.extent) + Util.length(self.hero_actor.bounding_box.extent)
if (d <= s):
# Highlight traffic light
self.affected_traffic_light = tl
srf = self.traffic_light_surfaces.surfaces['h']
surface.blit(srf, srf.get_rect(center=pos))
srf = self.traffic_light_surfaces.surfaces[tl.state]
surface.blit(srf, srf.get_rect(center=pos))
def _render_speed_limits(self, surface, list_sl, world_to_pixel, world_to_pixel_width):
font_size = world_to_pixel_width(2)
radius = world_to_pixel_width(2)
font = pygame.font.SysFont('Arial', font_size)
for sl in list_sl:
x, y = world_to_pixel(sl.get_location())
# Render speed limit
white_circle_radius = int(radius * 0.75)
pygame.draw.circle(surface, COLOR_SCARLET_RED_1, (x, y), radius)
pygame.draw.circle(surface, COLOR_ALUMINIUM_0, (x, y), white_circle_radius)
limit = sl.type_id.split('.')[2]
font_surface = font.render(limit, True, COLOR_ALUMINIUM_5)
if self.args.show_triggers:
corners = Util.get_bounding_box(sl)
corners = [world_to_pixel(p) for p in corners]
pygame.draw.lines(surface, COLOR_PLUM_2, True, corners, 2)
# Blit
if self.hero_actor is not None:
# Rotate font surface with respect to hero vehicle front
angle = -self.hero_transform.rotation.yaw - 90.0
font_surface = pygame.transform.rotate(font_surface, angle)
offset = font_surface.get_rect(center=(x, y))
surface.blit(font_surface, offset)
else:
surface.blit(font_surface, (x - radius / 2, y - radius / 2))
def _render_walkers(self, surface, list_w, world_to_pixel):
for w in list_w:
color = COLOR_PLUM_0
# Compute bounding box points
bb = w[0].bounding_box.extent
corners = [
carla.Location(x=-bb.x, y=-bb.y),
carla.Location(x=bb.x, y=-bb.y),
carla.Location(x=bb.x, y=bb.y),
carla.Location(x=-bb.x, y=bb.y)]
w[1].transform(corners)
corners = [world_to_pixel(p) for p in corners]
pygame.draw.polygon(surface, color, corners)
def _render_vehicles(self, surface, list_v, world_to_pixel):
for v in list_v:
color = COLOR_SKY_BLUE_0
if int(v[0].attributes['number_of_wheels']) == 2:
color = COLOR_CHOCOLATE_1
if v[0].attributes['role_name'] == 'hero':
color = COLOR_CHAMELEON_0
# Compute bounding box points
bb = v[0].bounding_box.extent
corners = [carla.Location(x=-bb.x, y=-bb.y),
carla.Location(x=bb.x - 0.8, y=-bb.y),
carla.Location(x=bb.x, y=0),
carla.Location(x=bb.x - 0.8, y=bb.y),
carla.Location(x=-bb.x, y=bb.y),
carla.Location(x=-bb.x, y=-bb.y)
]
v[1].transform(corners)
corners = [world_to_pixel(p) for p in corners]
pygame.draw.lines(surface, color, False, corners, int(math.ceil(4.0 * self.map_image.scale)))
def render_actors(self, surface, vehicles, traffic_lights, speed_limits, walkers):
# Static actors
self._render_traffic_lights(surface, [tl[0] for tl in traffic_lights], self.map_image.world_to_pixel)
self._render_speed_limits(surface, [sl[0] for sl in speed_limits], self.map_image.world_to_pixel,
self.map_image.world_to_pixel_width)
# Dynamic actors
self._render_vehicles(surface, vehicles, self.map_image.world_to_pixel)
self._render_walkers(surface, walkers, self.map_image.world_to_pixel)
def clip_surfaces(self, clipping_rect):
self.actors_surface.set_clip(clipping_rect)
self.vehicle_id_surface.set_clip(clipping_rect)
self.result_surface.set_clip(clipping_rect)
def _compute_scale(self, scale_factor):
m = self.module_input.mouse_pos
# Percentage of surface where mouse position is actually
px = (m[0] - self.scale_offset[0]) / float(self.prev_scaled_size)
py = (m[1] - self.scale_offset[1]) / float(self.prev_scaled_size)
# Offset will be the previously accumulated offset added with the
# difference of mouse positions in the old and new scales
diff_between_scales = ((float(self.prev_scaled_size) * px) - (float(self.scaled_size) * px),
(float(self.prev_scaled_size) * py) - (float(self.scaled_size) * py))
self.scale_offset = (self.scale_offset[0] + diff_between_scales[0],
self.scale_offset[1] + diff_between_scales[1])
# Update previous scale
self.prev_scaled_size = self.scaled_size
# Scale performed
self.map_image.scale_map(scale_factor)
def render(self, display):
if self.actors_with_transforms is None:
return
self.result_surface.fill(COLOR_BLACK)
vehicles, traffic_lights, speed_limits, walkers = self._split_actors()
scale_factor = self.module_input.wheel_offset
self.scaled_size = int(self.map_image.width * scale_factor)
if self.scaled_size != self.prev_scaled_size:
self._compute_scale(scale_factor)
# Render Actors
self.actors_surface.fill(COLOR_BLACK)
self.render_actors(
self.actors_surface,
vehicles,
traffic_lights,
speed_limits,
walkers)
# Render Ids
self.module_hud.render_vehicles_ids(self.vehicle_id_surface, vehicles,
self.map_image.world_to_pixel, self.hero_actor, self.hero_transform)
# Blit surfaces
surfaces = ((self.map_image.surface, (0, 0)),
(self.actors_surface, (0, 0)),
(self.vehicle_id_surface, (0, 0)),
)
angle = 0.0 if self.hero_actor is None else self.hero_transform.rotation.yaw + 90.0
self.traffic_light_surfaces.rotozoom(-angle, self.map_image.scale)
center_offset = (0, 0)
if self.hero_actor is not None:
hero_location_screen = self.map_image.world_to_pixel(self.hero_transform.location)
hero_front = self.hero_transform.get_forward_vector()
translation_offset = (
hero_location_screen[0] -
self.hero_surface.get_width() /
2 +
hero_front.x *
PIXELS_AHEAD_VEHICLE,
(hero_location_screen[1] -
self.hero_surface.get_height() /
2 +
hero_front.y *
PIXELS_AHEAD_VEHICLE))
# Apply clipping rect
clipping_rect = pygame.Rect(translation_offset[0],
translation_offset[1],
self.hero_surface.get_width(),
self.hero_surface.get_height())
self.clip_surfaces(clipping_rect)
Util.blits(self.result_surface, surfaces)
self.border_round_surface.set_clip(clipping_rect)
self.hero_surface.fill(COLOR_ALUMINIUM_4)
self.hero_surface.blit(self.result_surface, (-translation_offset[0],
-translation_offset[1]))
rotated_result_surface = pygame.transform.rotozoom(self.hero_surface, angle, 0.9).convert()
center = (display.get_width() / 2, display.get_height() / 2)
rotation_pivot = rotated_result_surface.get_rect(center=center)
display.blit(rotated_result_surface, rotation_pivot)
display.blit(self.border_round_surface, (0, 0))
else:
# Translation offset
translation_offset = (self.module_input.mouse_offset[0] * scale_factor + self.scale_offset[0],
self.module_input.mouse_offset[1] * scale_factor + self.scale_offset[1])
center_offset = (abs(display.get_width() - self.surface_size) / 2 * scale_factor, 0)
# Apply clipping rect
clipping_rect = pygame.Rect(-translation_offset[0] - center_offset[0], -translation_offset[1],
self.module_hud.dim[0], self.module_hud.dim[1])
self.clip_surfaces(clipping_rect)
Util.blits(self.result_surface, surfaces)
display.blit(self.result_surface, (translation_offset[0] + center_offset[0],
translation_offset[1]))
def destroy(self):
if self.spawned_hero is not None:
self.spawned_hero.destroy()
# ==============================================================================
# -- Input -----------------------------------------------------------
# ==============================================================================
class ModuleInput(object):
def __init__(self, name):
self.name = name
self.mouse_pos = (0, 0)
self.mouse_offset = [0.0, 0.0]
self.wheel_offset = 0.1
self.wheel_amount = 0.025
self._steer_cache = 0.0
self.control = None
self._autopilot_enabled = False
def start(self):
hud = module_manager.get_module(MODULE_HUD)
hud.notification("Press 'H' or '?' for help.", seconds=4.0)
def render(self, display):
pass
def tick(self, clock):
self.parse_input(clock)
def _parse_events(self):
self.mouse_pos = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game()
elif event.type == pygame.KEYUP:
if self._is_quit_shortcut(event.key):
exit_game()
elif event.key == K_h or (event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT):
module_hud = module_manager.get_module(MODULE_HUD)
module_hud.help.toggle()
elif event.key == K_TAB:
module_world = module_manager.get_module(MODULE_WORLD)
module_hud = module_manager.get_module(MODULE_HUD)
if module_world.hero_actor is None:
module_world.select_hero_actor()
self.wheel_offset = HERO_DEFAULT_SCALE
self.control = carla.VehicleControl()
module_hud.notification('Hero Mode')
else:
self.wheel_offset = MAP_DEFAULT_SCALE
self.mouse_offset = [0, 0]
self.mouse_pos = [0, 0]
module_world.scale_offset = [0, 0]
module_world.hero_actor = None
module_hud.notification('Map Mode')
elif event.key == K_F1:
module_hud = module_manager.get_module(MODULE_HUD)
module_hud.show_info = not module_hud.show_info
elif event.key == K_i:
module_hud = module_manager.get_module(MODULE_HUD)
module_hud.show_actor_ids = not module_hud.show_actor_ids
elif isinstance(self.control, carla.VehicleControl):
if event.key == K_q:
self.control.gear = 1 if self.control.reverse else -1
elif event.key == K_m:
self.control.manual_gear_shift = not self.control.manual_gear_shift
world = module_manager.get_module(MODULE_WORLD)
self.control.gear = world.hero_actor.get_control().gear
module_hud = module_manager.get_module(MODULE_HUD)
module_hud.notification('%s Transmission' % (
'Manual' if self.control.manual_gear_shift else 'Automatic'))
elif self.control.manual_gear_shift and event.key == K_COMMA:
self.control.gear = max(-1, self.control.gear - 1)
elif self.control.manual_gear_shift and event.key == K_PERIOD:
self.control.gear = self.control.gear + 1
elif event.key == K_p:
world = module_manager.get_module(MODULE_WORLD)
if world.hero_actor is not None:
self._autopilot_enabled = not self._autopilot_enabled
world.hero_actor.set_autopilot(self._autopilot_enabled)
module_hud = module_manager.get_module(MODULE_HUD)
module_hud.notification('Autopilot %s' % ('On' if self._autopilot_enabled else 'Off'))
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 4:
self.wheel_offset += self.wheel_amount
if self.wheel_offset >= 1.0:
self.wheel_offset = 1.0
elif event.button == 5:
self.wheel_offset -= self.wheel_amount
if self.wheel_offset <= 0.1:
self.wheel_offset = 0.1
def _parse_keys(self, milliseconds):
keys = pygame.key.get_pressed()
self.control.throttle = 1.0 if keys[K_UP] or keys[K_w] else 0.0
steer_increment = 5e-4 * milliseconds
if keys[K_LEFT] or keys[K_a]:
self._steer_cache -= steer_increment
elif keys[K_RIGHT] or keys[K_d]:
self._steer_cache += steer_increment
else:
self._steer_cache = 0.0
self._steer_cache = min(0.7, max(-0.7, self._steer_cache))
self.control.steer = round(self._steer_cache, 1)
self.control.brake = 1.0 if keys[K_DOWN] or keys[K_s] else 0.0
self.control.hand_brake = keys[K_SPACE]
def _parse_mouse(self):
if pygame.mouse.get_pressed()[0]:
x, y = pygame.mouse.get_pos()
self.mouse_offset[0] += (1.0 / self.wheel_offset) * (x - self.mouse_pos[0])
self.mouse_offset[1] += (1.0 / self.wheel_offset) * (y - self.mouse_pos[1])
self.mouse_pos = (x, y)
def parse_input(self, clock):
self._parse_events()
self._parse_mouse()
if not self._autopilot_enabled:
if isinstance(self.control, carla.VehicleControl):
self._parse_keys(clock.get_time())
self.control.reverse = self.control.gear < 0
world = module_manager.get_module(MODULE_WORLD)
if (world.hero_actor is not None):
world.hero_actor.apply_control(self.control)
@staticmethod
def _is_quit_shortcut(key):
return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)
# ==============================================================================
# -- Global Objects ------------------------------------------------------------
# ==============================================================================
module_manager = ModuleManager()
# ==============================================================================
# -- Game Loop ---------------------------------------------------------------
# ==============================================================================
def game_loop(args):
try:
# Init Pygame
pygame.init()
display = pygame.display.set_mode(
(args.width, args.height),
pygame.HWSURFACE | pygame.DOUBLEBUF)
pygame.display.set_caption(args.description)
font = pygame.font.Font(pygame.font.get_default_font(), 20)
text_surface = font.render('Rendering map...', True, COLOR_WHITE)
display.blit(text_surface, text_surface.get_rect(center=(args.width / 2, args.height / 2)))
pygame.display.flip()
# Init modules
input_module = ModuleInput(MODULE_INPUT)
hud_module = ModuleHUD(MODULE_HUD, args.width, args.height)
world_module = ModuleWorld(MODULE_WORLD, args, timeout=2.0)
# Register Modules
module_manager.register_module(world_module)
module_manager.register_module(hud_module)
module_manager.register_module(input_module)
module_manager.start_modules()
clock = pygame.time.Clock()
while True:
clock.tick_busy_loop(60)
module_manager.tick(clock)
module_manager.render(display)
pygame.display.flip()
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
finally:
if world_module is not None:
world_module.destroy()
def exit_game():
module_manager.clear_modules()
pygame.quit()
sys.exit()
# ==============================================================================
# -- Main --------------------------------------------------------------------
# ==============================================================================
def main():
# Parse arguments
argparser = argparse.ArgumentParser(
description='CARLA No Rendering Mode Visualizer')
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'--res',
metavar='WIDTHxHEIGHT',
default='1280x720',
help='window resolution (default: 1280x720)')
argparser.add_argument(
'--filter',
metavar='PATTERN',
default='vehicle.*',
help='actor filter (default: "vehicle.*")')
argparser.add_argument(
'--map',
metavar='TOWN',
default=None,
help='start a new episode at the given TOWN')
argparser.add_argument(
'--no-rendering',
action='store_true',
default=True,
help='switch off server rendering')
argparser.add_argument(
'--show-triggers',
action='store_true',
help='show trigger boxes of traffic signs')
argparser.add_argument(
'--show-connections',
action='store_true',
help='show waypoint connections')
argparser.add_argument(
'--show-spawn-points',
action='store_true',
help='show recommended spawn points')
args = argparser.parse_args()
args.description = argparser.description
args.width, args.height = [int(x) for x in args.res.split('x')]
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', args.host, args.port)
print(__doc__)
game_loop(args)
if __name__ == '__main__':
main()
| 41.499666
| 130
| 0.563151
|
3f5bb894a0d45f017022d6ce7677f45d51047df3
| 3,384
|
py
|
Python
|
parse_location_history.py
|
palmdalian/parse-location-history
|
4c03976884824bd42c5ff36f2ca1207d966d1c50
|
[
"BSD-3-Clause"
] | null | null | null |
parse_location_history.py
|
palmdalian/parse-location-history
|
4c03976884824bd42c5ff36f2ca1207d966d1c50
|
[
"BSD-3-Clause"
] | null | null | null |
parse_location_history.py
|
palmdalian/parse-location-history
|
4c03976884824bd42c5ff36f2ca1207d966d1c50
|
[
"BSD-3-Clause"
] | null | null | null |
import sys, os, json, datetime, time, xml.dom.minidom
from decimal import *
from optparse import OptionParser
parser = OptionParser(usage='usage: %prog [-f | --file] inputJSON [-s | --start] month/day/year [-e | --end] month/day/year [-o | --output] outputKML')
parser.add_option("-s", "--start", action="store", type="string", dest="startDate")
parser.add_option("-e", "--end", action="store", type="string", dest="endDate")
parser.add_option("-f", "--file", action="store", type="string", dest="file")
parser.add_option("-o", "--output", action="store", type="string", dest="output")
(options, args) = parser.parse_args()
if not options.output: # if filename is not given
parser.error('Output file not given')
if not options.startDate:
parser.error('Start date not given')
if not options.endDate:
options.endDate = options.startDate
if not options.file:
parser.error('Input JSON not given')
getcontext().prec = 7
dates = []
#from here: http://stackoverflow.com/questions/1060279/iterating-through-a-range-of-dates-in-python
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + datetime.timedelta(n)
def createKML(locations):
# This constructs the KML document from the CSV file.
kmlDoc = xml.dom.minidom.Document()
kmlElement = kmlDoc.createElementNS('http://earth.google.com/kml/2.2', 'kml')
kmlElement.setAttribute('xmlns', 'http://earth.google.com/kml/2.2')
kmlElement = kmlDoc.appendChild(kmlElement)
documentElement = kmlDoc.createElement('Document')
startDate = map (int, options.startDate.split("/"))
endDate = map (int, options.endDate.split("/"))
startDateTime = datetime.date(startDate[2],startDate[0],startDate[1])
# minTime = time.mktime(startDateTime.timetuple()) * 1000
endDateTime = datetime.date(endDate[2],endDate[0],endDate[1]+1)
# maxTime = time.mktime(endDateTime.timetuple()) * 1000
for singledate in daterange(startDateTime, endDateTime):
dates.append(singledate)
for i in range(0,len(dates)):
if i < len(dates)-1:
print dates[i]
print dates[i+1]
minTime = time.mktime(dates[i].timetuple()) * 1000
maxTime = time.mktime(dates[i+1].timetuple()) * 1000
placemarkElement = kmlDoc.createElement('Placemark')
trackElement = kmlDoc.createElement('gx:Track')
placemarkElement.appendChild(trackElement)
for point in locations:
timestampMs = int(point["timestampMs"])
if minTime < timestampMs < maxTime:
# if "activitys" in point:
# if point["activitys"][0]["activities"][0]["type"] == "onFoot":
whereElement = kmlDoc.createElement('gx:coord')
whenElement = kmlDoc.createElement('when')
whereText = kmlDoc.createTextNode(str(Decimal(point["longitudeE7"]) / Decimal(10000000)) + " " + str(Decimal(point["latitudeE7"]) / Decimal(10000000)) + " 0")
whenText = kmlDoc.createTextNode(str(timestampMs))
whereElement.appendChild(whereText)
whenElement.appendChild(whenText)
trackElement.appendChild(whereElement)
trackElement.appendChild(whenElement)
documentElement.appendChild(placemarkElement)
documentElement = kmlElement.appendChild(documentElement)
kmlFile = open(options.output, 'w')
kmlFile.write(kmlDoc.toprettyxml(' ', newl = '\n', encoding = 'utf-8'))
with open(options.file, 'rb') as f:
data = json.load(f)
createKML(data["locations"])
| 40.771084
| 165
| 0.70656
|
9d0d100373bebc70103a51bc4c0f2e34bf41daef
| 3,122
|
py
|
Python
|
google/cloud/iot/v1/iot-v1-py/google/cloud/iot_v1/types/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/cloud/iot/v1/iot-v1-py/google/cloud/iot_v1/types/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/cloud/iot/v1/iot-v1-py/google/cloud/iot_v1/types/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .device_manager import (
BindDeviceToGatewayRequest,
BindDeviceToGatewayResponse,
CreateDeviceRegistryRequest,
CreateDeviceRequest,
DeleteDeviceRegistryRequest,
DeleteDeviceRequest,
GatewayListOptions,
GetDeviceRegistryRequest,
GetDeviceRequest,
ListDeviceConfigVersionsRequest,
ListDeviceConfigVersionsResponse,
ListDeviceRegistriesRequest,
ListDeviceRegistriesResponse,
ListDevicesRequest,
ListDevicesResponse,
ListDeviceStatesRequest,
ListDeviceStatesResponse,
ModifyCloudToDeviceConfigRequest,
SendCommandToDeviceRequest,
SendCommandToDeviceResponse,
UnbindDeviceFromGatewayRequest,
UnbindDeviceFromGatewayResponse,
UpdateDeviceRegistryRequest,
UpdateDeviceRequest,
)
from .resources import (
Device,
DeviceConfig,
DeviceCredential,
DeviceRegistry,
DeviceState,
EventNotificationConfig,
GatewayConfig,
HttpConfig,
MqttConfig,
PublicKeyCertificate,
PublicKeyCredential,
RegistryCredential,
StateNotificationConfig,
X509CertificateDetails,
GatewayAuthMethod,
GatewayType,
HttpState,
LogLevel,
MqttState,
PublicKeyCertificateFormat,
PublicKeyFormat,
)
__all__ = (
'BindDeviceToGatewayRequest',
'BindDeviceToGatewayResponse',
'CreateDeviceRegistryRequest',
'CreateDeviceRequest',
'DeleteDeviceRegistryRequest',
'DeleteDeviceRequest',
'GatewayListOptions',
'GetDeviceRegistryRequest',
'GetDeviceRequest',
'ListDeviceConfigVersionsRequest',
'ListDeviceConfigVersionsResponse',
'ListDeviceRegistriesRequest',
'ListDeviceRegistriesResponse',
'ListDevicesRequest',
'ListDevicesResponse',
'ListDeviceStatesRequest',
'ListDeviceStatesResponse',
'ModifyCloudToDeviceConfigRequest',
'SendCommandToDeviceRequest',
'SendCommandToDeviceResponse',
'UnbindDeviceFromGatewayRequest',
'UnbindDeviceFromGatewayResponse',
'UpdateDeviceRegistryRequest',
'UpdateDeviceRequest',
'Device',
'DeviceConfig',
'DeviceCredential',
'DeviceRegistry',
'DeviceState',
'EventNotificationConfig',
'GatewayConfig',
'HttpConfig',
'MqttConfig',
'PublicKeyCertificate',
'PublicKeyCredential',
'RegistryCredential',
'StateNotificationConfig',
'X509CertificateDetails',
'GatewayAuthMethod',
'GatewayType',
'HttpState',
'LogLevel',
'MqttState',
'PublicKeyCertificateFormat',
'PublicKeyFormat',
)
| 27.628319
| 74
| 0.741832
|
f20d2cd5e758caa42d89cc293f5b4c3b3604c378
| 3,215
|
py
|
Python
|
alljoyn/services/controlpanel/cpp/tools/CPSAppGenerator/GeneratorUtils/containerWidget.py
|
octoblu/alljoyn
|
a74003fa25af1d0790468bf781a4d49347ec05c4
|
[
"ISC"
] | 37
|
2015-01-18T21:27:23.000Z
|
2018-01-12T00:33:43.000Z
|
alljoyn/services/controlpanel/cpp/tools/CPSAppGenerator/GeneratorUtils/containerWidget.py
|
octoblu/alljoyn
|
a74003fa25af1d0790468bf781a4d49347ec05c4
|
[
"ISC"
] | 14
|
2015-02-24T11:44:01.000Z
|
2020-07-20T18:48:44.000Z
|
alljoyn/services/controlpanel/cpp/tools/CPSAppGenerator/GeneratorUtils/containerWidget.py
|
octoblu/alljoyn
|
a74003fa25af1d0790468bf781a4d49347ec05c4
|
[
"ISC"
] | 29
|
2015-01-23T16:40:52.000Z
|
2019-10-21T12:22:30.000Z
|
# Copyright AllSeen Alliance. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import sys
import propertyWidget as pw
import listPropertyWidget as lpw
import actionWidget as aw
import labelWidget as lw
import commonWidget as common
class Container (common.Widget):
def __init__(self, generated, element, parentName, languageSetName, isRoot = 0) :
common.Widget.__init__(self, generated, element, parentName, languageSetName)
self.isRoot = isRoot
self.widgetName = "Container"
if isRoot :
self.parentAddFunc = "setRootWidget"
def generate(self) :
common.Widget.generate(self)
if self.isRoot:
self.setDismissable()
self.generateChildElements()
def setDismissable(self) :
if not hasattr(self.element, "dismissable") :
return
self.generated.initCode += " {0}->setIsDismissable({1});\n".format(self.name, self.element.dismissable)
def generateChildElements (self) :
elements = self.element.elements.sub_nodes
for element in elements:
elementType = element._name
if elementType == "action" :
action = aw.Action(self.generated, element, self.name, self.languageSetName)
action.generate()
elif elementType == "container" :
container = Container(self.generated, element, self.name, self.languageSetName)
container.generate()
elif elementType == "scalarProperty" or elementType == "stringProperty" or elementType == "booleanProperty" :
propertyW = pw.Property(self.generated, element, self.name, self.languageSetName)
propertyW.generate()
elif elementType == "dateProperty" or elementType == "timeProperty" :
propertyW = pw.Property(self.generated, element, self.name, self.languageSetName)
propertyW.generate()
elif elementType == "labelProperty" :
label = lw.Label(self.generated, element, self.name, self.languageSetName)
label.generate()
# elif elementType == "listProperty" :
# listProp = lpw.ListProperty(self.generated, element, (self.parentObjectPath + self.objectPathSuffix), self.languageSetName)
# listProp.generate()
else :
print >> sys.stderr, "ERROR - This type is not supported. Exiting " + elementType
sys.exit(2)
| 44.041096
| 140
| 0.661586
|
8164375a191b18a0b4496e5ada3ce9d7b2538fa1
| 3,834
|
py
|
Python
|
insights/parsers/tests/test_hammer_ping.py
|
mglantz/insights-core
|
6f20bbbe03f53ee786f483b2a28d256ff1ad0fd4
|
[
"Apache-2.0"
] | 1
|
2020-02-19T06:36:22.000Z
|
2020-02-19T06:36:22.000Z
|
insights/parsers/tests/test_hammer_ping.py
|
mglantz/insights-core
|
6f20bbbe03f53ee786f483b2a28d256ff1ad0fd4
|
[
"Apache-2.0"
] | 10
|
2018-04-16T15:38:04.000Z
|
2018-05-15T18:43:02.000Z
|
insights/parsers/tests/test_hammer_ping.py
|
mglantz/insights-core
|
6f20bbbe03f53ee786f483b2a28d256ff1ad0fd4
|
[
"Apache-2.0"
] | null | null | null |
from insights.parsers.hammer_ping import HammerPing
from insights.tests import context_wrap
HAMMERPING_ERR_1 = """
Error: Connection refused - connect(2) for "localhost" port 443
"""
HAMMERPING_ERR_2 = """
Could not load the API description from the server
- is the server down?
- was 'foreman-rake apipie:cache' run on the server when using apipie cache? (typical production settings)
Warning: An error occured while loading module hammer_cli_csv
Could not load the API description from the server
- is the server down?
- was 'foreman-rake apipie:cache' run on the server when using apipie cache? (typical production settings)
Warning: An error occured while loading module hammer_cli_foreman
"""
HAMMERPING_ERR_3 = """
candlepin:
Status: ok
Server Response: Duration: 61ms
candlepin_auth:
Status: ok
"""
HAMMERPING_OK = """
candlepin:
Status: ok
Server Response: Duration: 61ms
candlepin_auth:
Status: ok
Server Response: Duration: 61ms
pulp:
Status: ok
Server Response: Duration: 61ms
pulp_auth:
Status: ok
Server Response: Duration: 61ms
elasticsearch:
Status: ok
Server Response: Duration: 35ms
foreman_tasks:
Status: ok
server Response: Duration: 1ms
""".strip()
HAMMERPING = """
candlepin:
Status: FAIL
Server Response:Message:404 Resource Not Found
candlepin_auth:
Status: FAIL
Server Response: Message: Katello::Resources::Candlepin::CandlepinPing: 404 Resource Not Found
pulp:
Status: ok
Server Response: Duration: 61ms
pulp_auth:
Status:
Server Response:
elasticsearch:
Status: ok
Server Response: Duration: 35ms
foreman_tasks:
Status: ok
server Response: Duration: 1ms
""".strip()
def test_hammer_ping_err_1():
status = HammerPing(context_wrap(HAMMERPING_ERR_1))
assert not status.are_all_ok
assert status.errors is not []
def test_hammer_ping_err_2():
status = HammerPing(context_wrap(HAMMERPING_ERR_2))
assert not status.are_all_ok
assert status.errors is not []
def test_hammer_ping_err_3():
status = HammerPing(context_wrap(HAMMERPING_ERR_3))
assert not status.are_all_ok
assert status.errors is not []
def test_hammer_ping_ok():
status = HammerPing(context_wrap(HAMMERPING_OK))
assert status.are_all_ok
assert status.service_list == [
'candlepin', 'candlepin_auth', 'pulp', 'pulp_auth',
'elasticsearch', 'foreman_tasks'
]
assert status.services_of_status('FAIL') == []
assert 'nonexistent' not in status.service_list
assert 'nonexistent' not in status.status_of_service
assert 'nonexistent' not in status.response_of_service
def test_hammer_ping():
status = HammerPing(context_wrap(HAMMERPING))
assert not status.are_all_ok
assert status.service_list == [
'candlepin', 'candlepin_auth', 'pulp', 'pulp_auth',
'elasticsearch', 'foreman_tasks'
]
assert status.services_of_status('OK') == [
'pulp', 'elasticsearch', 'foreman_tasks'
]
assert status.services_of_status('FAIL') == [
'candlepin', 'candlepin_auth'
]
assert status.status_of_service['pulp_auth'] == ''
assert status.status_of_service['candlepin'] == 'fail'
assert status.status_of_service['elasticsearch'] == 'ok'
assert status.response_of_service['pulp_auth'] == ''
assert status.response_of_service['candlepin_auth'] == 'Message: Katello::Resources::Candlepin::CandlepinPing: 404 Resource Not Found'
assert status.response_of_service['elasticsearch'] == 'Duration: 35ms'
assert 'nonexistent' not in status.service_list
assert 'nonexistent' not in status.status_of_service
assert 'nonexistent' not in status.response_of_service
| 30.428571
| 138
| 0.701617
|
fdaf461cd99168c39df3093f83e366e1f08176d7
| 12,163
|
py
|
Python
|
examples/libtest/StringTest.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 739
|
2015-01-01T02:05:11.000Z
|
2022-03-30T15:26:16.000Z
|
examples/libtest/StringTest.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 33
|
2015-03-25T23:17:04.000Z
|
2021-08-19T08:25:22.000Z
|
examples/libtest/StringTest.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 167
|
2015-01-01T22:27:47.000Z
|
2022-03-17T13:29:19.000Z
|
# -*- coding: utf-8 -*-
from UnitTest import UnitTest
import write
class StringTest(UnitTest):
def testBasestring(self):
s = 'A string'
self.assertTrue(isinstance(s, str), "isinstance(s, str)")
self.assertTrue(isinstance(s, basestring), "isinstance(s, basestring)")
def testToString(self):
# TODO: this fails on IE, because we can not override toString
# in the normal way
# we need to do something like this
# http://webreflection.blogspot.com/2007/07/quick-fix-internet-explorer-and.html
o = ClassWithOwnToString()
self.assertEquals(o.toString(), 'ClassWithOwnToString as a String')
o = ClassWithOwnToString2()
try:
self.assertEquals(o.toString(), 'ClassWithOwnToString2 as a String')
except AttributeError, e:
#AttributeError: 'ClassWithOwnToString2' object has no attribute 'toString
# mapping of toString to __str__ is not available in normal python
pass
def testReplace(self):
text="this is a rather long string"
expected_result1="th--- --- a rather long string"
expected_result2="thi-- is a rather long string"
expected_result3="this_is_a_rather_long_string"
result=text.replace("is", "---")
self.assertEquals(result, expected_result1)
result=text.replace("s", "--", 1)
self.assertEquals(result, expected_result2)
result=text.replace(" ", "_")
self.assertEquals(result, expected_result3)
def testRFind(self):
text="this is a yes it is a rather long string"
result=text.rfind("not found")
self.assertEquals(result, -1)
result=text.rfind("is")
self.assertEquals(result, 17)
result=text.rfind("is", 18)
self.assertEquals(result, -1)
result=text.rfind("is", 17)
self.assertEquals(result, 17)
result=text.rfind("is", 16)
self.assertEquals(result, 17)
result=text.rfind("is", 2, 3)
self.assertEquals(result, -1)
def testFind(self):
text="this is a rather long string"
result=text.find("not found")
self.assertEquals(result, -1)
result=text.find("is")
self.assertEquals(result, 2)
result=text.find("is", 3)
self.assertEquals(result, 5)
result=text.find("is", 2, 3)
self.assertEquals(result, -1)
def testJoin(self):
data="this is a rather long string"
data=data.split(" ")
sep1=", "
sep2=""
expected_result1="this, is, a, rather, long, string"
expected_result2="thisisaratherlongstring"
result=sep1.join(data)
self.assertEquals(result, expected_result1)
result=sep2.join(data)
self.assertEquals(result, expected_result2)
def testSplit(self):
text=" this is a rather long string "
space=" "
empty=""
expected_result1=" this is a rather long string "
expected_result2="thisis a rather long string "
expected_result3="this is a rather long string"
t = text.split(space)
self.assertEquals(t[0], '')
self.assertEquals(t[1], 'this')
self.assertEquals(t[2], 'is')
self.assertEquals(t[3], '')
self.assertEquals(t[4], 'a')
result=space.join(t)
self.assertEquals(result, expected_result1)
result=empty.join(text.split(space, 2))
self.assertEquals(result, expected_result2)
result=space.join(text.split())
self.assertEquals(result, expected_result3)
result=empty.split()
self.assertEquals(result, [])
result=empty.split(None)
self.assertEquals(result, [])
result=empty.split(' ')
self.assertEquals(result, [''])
self.assertEquals('1.2.3'.rsplit('.', 1), ['1.2', '3'])
self.assertEquals('1.2.3'.rsplit('.', 2), ['1', '2', '3'])
def testStrip(self):
text=" this is a rather long string "
expected_result1="this is a rather long string"
expected_result2="a rather long string"
result=text.strip()
self.assertEquals(result, expected_result1)
result=text.strip(" sthi")
self.assertEquals(result, expected_result2)
result=text.strip("")
self.assertEquals(result, text)
def testUnicode(self):
text=u"""Liebe 'hallo' "grüsse" Grüsse"""
self.assertEqual(text, text[:])
def testIsDigit(self):
self.assertEqual("123".isdigit(), True)
self.assertEqual("-123".isdigit(), False)
self.assertEqual("123.45".isdigit(), False)
self.assertEqual("1a".isdigit(), False)
self.assertEqual(" ".isdigit(), False)
def testStringIterate(self):
text=" this is a rather long string "
t = ''
for x in text:
t += x
self.assertEqual(text, t)
def testStrTuple(self):
self.assertEqual(str((5,6)), "(5, 6)")
def testStrList(self):
self.assertEqual(str([5,6]), "[5, 6]")
def testStrFloat(self):
f1 = 1.5
self.assertEqual(str(f1), "1.5")
self.assertEqual(f1.__str__(), "1.5", "float.__str__() returns type instead of value, bug #487")
def testStartsWith(self):
s = 'abcd'
self.assertEqual(s.startswith('ab'), True)
self.assertEqual(s.startswith('ab', 0), True)
self.assertEqual(s.startswith('ab', 1), False)
self.assertEqual(s.startswith('bc', 1), True)
self.assertEqual(s.startswith('ab', 0, 8), True)
self.assertEqual(s.startswith('ab', 0, 3), True)
self.assertEqual(s.startswith('ab', 0, 2), True)
self.assertEqual(s.startswith('ab', 0, 1), False)
def testEndsWith(self):
s = 'abcd'
self.assertEqual(s.endswith('cd'), True)
self.assertEqual(s.endswith('cd', 0), True)
self.assertEqual(s.endswith('cd', 2), True)
self.assertEqual(s.endswith('cd', 3), False)
self.assertEqual(s.endswith('cd', 0, 3), False)
self.assertEqual(s.endswith('bc', 0, 3), True)
def testLjust(self):
self.assertEqual('a'.ljust(0), 'a')
self.assertEqual('a'.ljust(4), 'a ')
self.assertEqual('a'.ljust(4, 'b'), 'abbb')
def testRjust(self):
self.assertEqual('a'.rjust(4, 'b'), 'bbba')
def testCenter(self):
self.assertEqual('a'.center(4, '1'), '1a11')
def testZfill(self):
self.assertEqual('a'.zfill(4), '000a')
def testSprintfList(self):
self.assertEqual("%s" % 'foo', "foo")
self.assertEqual("%% %s" % '', "% ")
self.assertEqual("[%% %s]" % '', "[% ]")
self.assertEqual("[%c]" % 0x20, '[ ]')
self.assertEqual("[%r]" % 11, "[11]")
self.assertEqual("[%s]" % 11, "[11]")
self.assertEqual("[%d]" % 11, "[11]")
self.assertEqual("[%i]" % 11, "[11]")
self.assertEqual("[%u]" % 11, "[11]")
self.assertEqual("[%e]" % 11, "[1.100000e+01]")
self.assertEqual("[%E]" % 11, "[1.100000E+01]")
self.assertEqual("[%f]" % 11, "[11.000000]")
self.assertEqual("[%.2f]" % 11, "[11.00]")
self.assertEqual("[%F]" % 11, "[11.000000]")
self.assertEqual("[%g]" % 11, "[11]")
self.assertEqual("[%G]" % 11, "[11]")
self.assertEqual("[%o]" % 11, "[13]")
self.assertEqual("[%x]" % 11, "[b]")
self.assertEqual("[%X]" % 11, "[B]")
self.assertEqual("%*g,%10f" % (6, 1.234, 1.234), " 1.234, 1.234000")
self.assertEqual("%0*g,%010f" % (6, 1.234, 1.234), "01.234,001.234000")
self.assertEqual("[%04x]" % 1234, "[04d2]")
# FIXME: Next line fails. Slightly different output.
#self.assertEqual("[%g,%g,%g,%g,%g]" % (1.234567, 123456.7, 1234567, 0.0001234, 0.00001234), "[1.23457,123457,1.23457e+06,0.0001234,1.234e-05]")
self.assertEqual("[%3% %s]" % 'a', "[ % a]")
try:
s = "%*g,%10f" % (1, 2)
self.fail('Failed to raise error for "%*g,%10f" % (1, 2)')
except TypeError, e:
self.assertEqual(str(e), "not enough arguments for format string")
try:
s = "%*g,%10f" % (1, 2, 3, 4)
self.fail('Failed to raise error for "%*g,%10f" % (1, 2, 3, 4)')
except TypeError, e:
self.assertEqual(str(e), "not all arguments converted during string formatting")
# Check for handling of newlines in format string
self.assertEqual("\n%s\n%s\n" % ('s1', 's2'), '\ns1\ns2\n')
def testSprintfDict(self):
testdict = {'s1': 'string',
's2': 'another string',
'v0': 0,
'v1': 1,
'v2': 1.234,
}
self.assertEqual("[%(v1)12s|%(v1)-12s]" % testdict, '[ 1|1 ]')
self.assertEqual("[%(v1)012o|%(v1)-012o]" % testdict, '[000000000001|1 ]')
self.assertEqual("[%(v1)#012o|%(v1)#-012o]" % testdict, '[000000000001|01 ]')
self.assertEqual("[%(v0)#012o|%(v0)#-012o]" % testdict, '[000000000000|0 ]')
self.assertEqual("[%(v1)#012x|%(v1)#-012x]" % testdict, '[0x0000000001|0x1 ]')
self.assertEqual("%(s1)3% %(s1)s" % testdict, ' % string')
#FIXME: next line failes, since it's a mixture of Dict/Tuple format
#self.assertEqual("%3% %(s1)s" % testdict, ' % string')
self.assertEqual("%(v1)#g" % testdict, '1.00000')
try:
s = "%(not-there)s" % testdict
self.fail('Failed to raise error for "%(not-there)s" % testdict')
except KeyError, e:
self.assertEqual(str(e), "'not-there'")
# Check for handling of newlines in format string
self.assertEqual("\n%(s1)s\n%(s1)s\n" % testdict, '\nstring\nstring\n')
self.assertEqual("%%s %(foo)s" % {'foo': 1}, "%s 1")
self.assertEqual("%s %%(foo)s" % {'foo': 1}, "{'foo': 1} %(foo)s")
self.assertEqual("%s %(foo)s" % {'foo': 1}, "{'foo': 1} 1")
def testSprintfVar(self):
f = "%s"
self.assertEqual(f % 'test', 'test')
def testIndex(self):
s = "12345"
self.assertEqual(s[0], '1')
self.assertEqual(s[-1], '5')
self.assertEqual(s[1:-1], '234')
try:
a = s[200]
self.fail("Failed to raise an IndexError")
except IndexError, e:
self.assertEqual(e[0], 'string index out of range')
try:
a = s[-200]
self.fail("Failed to raise an IndexError")
except IndexError, e:
self.assertEqual(e[0], 'string index out of range')
def testOperator(self):
self.assertEqual("1".__add__("2"), "12")
self.assertEqual("1".__mul__(2), "11")
self.assertEqual("1".__rmul__(3), "111")
self.assertEqual("2" * 3, "222")
self.assertEqual(3 * "3", "333")
def testIsAlnum(self):
self.assertTrue("abc".isalnum())
self.assertTrue("0bc".isalnum())
self.assertFalse(".?abc".isalnum())
self.assertFalse(" 0bc".isalnum())
def testIsAlpha(self):
self.assertTrue("abc".isalpha())
self.assertFalse("0bc".isalpha())
def testIsUpper(self):
self.assertTrue("ABC".isupper(), "ABC")
self.assertFalse("AbC".isupper(), "AbC")
self.assertTrue("A0C".isupper(), "A0C")
self.assertFalse("A0c".isupper(), "A0c")
self.assertTrue("A C".isupper(), "A C")
self.assertFalse("A c".isupper(), "A c")
def testIsLower(self):
self.assertTrue("abc".islower(), "abc")
self.assertFalse("AbC".islower(), "AbC")
self.assertTrue("a0c".islower(), "a0c")
self.assertFalse("A0c".islower(), "A0c")
self.assertTrue("a c".islower(), "a c")
self.assertFalse("A c".islower(), "A c")
class ClassWithOwnToString(object):
def toString(self):
return 'ClassWithOwnToString as a String'
class ClassWithOwnToString2(object):
def __str__(self):
return 'ClassWithOwnToString2 as a String'
| 35.460641
| 152
| 0.559648
|
0f50f0f9cdfef21a54ffa6a0b78cca439552fdbd
| 1,797
|
py
|
Python
|
Bank ATM Application/Python_ATM_Bank_App.py
|
KelvinBrannonJr/Python-Applications
|
5efe8c4d149db4feda6ef3e019cef4ab9d912c64
|
[
"MIT"
] | null | null | null |
Bank ATM Application/Python_ATM_Bank_App.py
|
KelvinBrannonJr/Python-Applications
|
5efe8c4d149db4feda6ef3e019cef4ab9d912c64
|
[
"MIT"
] | null | null | null |
Bank ATM Application/Python_ATM_Bank_App.py
|
KelvinBrannonJr/Python-Applications
|
5efe8c4d149db4feda6ef3e019cef4ab9d912c64
|
[
"MIT"
] | null | null | null |
class Bank_Account:
account_balance = 0
amount = 0
name = input("Enter your name: ")
def __init__(self,starting_amount = 0.00):
self.account_balance = starting_amount
def deposit_funds(self):
pass
def withdraw_funds(self):
pass
def transaction_history(self,transaction_string):
with open('bank_transaction_history','a') as file:
file.write(f"$ {transaction_string}\t Balance: {self.account_balance} \n")
print(f"Welcome {name} to Bank app")
class Deposit(Bank_Account):
def deposit_funds():
deposit_amount = float(input("Enter the amount to deposit: "))
Bank_Account.account_balance = Bank_Account.account_balance + deposit_amount
Bank_Account.transaction_history(Deposit,f"Deposited {deposit_amount}")
print("You have deposited: ",deposit_amount)
print("Your new account balance is: ", Bank_Account.account_balance)
class Withdraw(Bank_Account):
def withdraw_funds():
withdraw_amount = float(input("Enter the amount to withdraw: "))
Bank_Account.account_balance = Bank_Account.account_balance - withdraw_amount
Bank_Account.transaction_history(Withdraw,f"Withdrew {withdraw_amount}")
print("You have withdrawn: ",withdraw_amount)
print("Your new account balance is: ",Bank_Account.account_balance)
is_Exited = False
while(is_Exited == False):
user_selection = input("Would you like to deposit, withdraw, or exit? ").lower()
if(user_selection == "deposit"):
Deposit.deposit_funds()
elif(user_selection == "withdraw"):
Withdraw.withdraw_funds()
elif(user_selection == 'exit'):
is_Exited = True
break
else:
print("Sorry that is not a valid input... ")
| 32.089286
| 86
| 0.679466
|
34a7ecfee9507180fa66e6c79b180eacc1e9e3e4
| 1,331
|
py
|
Python
|
run/sestoft.py
|
lrecht/ParadigmComparison
|
4bf8d5f90241b569b57266146dc64ba844d5c774
|
[
"MIT"
] | null | null | null |
run/sestoft.py
|
lrecht/ParadigmComparison
|
4bf8d5f90241b569b57266146dc64ba844d5c774
|
[
"MIT"
] | 8
|
2020-10-13T07:07:58.000Z
|
2020-12-14T12:55:03.000Z
|
run/sestoft.py
|
lrecht/ParadigmComparison
|
4bf8d5f90241b569b57266146dc64ba844d5c774
|
[
"MIT"
] | null | null | null |
from run.cochran import run_benchmark
from . import benchmark_utils as bm_utils
import subprocess
#Performs the list of benchmarks and saves to results to output csv file
def perform_benchmarks(benchmarks, experiment_iterations, output_file):
statistics, csv_output = bm_utils.setup(output_file)
benchmark_count = len(benchmarks)
for index, b in enumerate(benchmarks):
print('\r' + "Performing benchmark " + str(index + 1) + " of " + str(benchmark_count), end='', flush=True)
print("\n", b.path, flush=True)
subprocess.run(b.get_build_command(),
shell=True, check=True, stdout=subprocess.DEVNULL)
statistics.clear()
#The measuring equipment
current = 0
while(current < experiment_iterations):
run_benchmark(b, current, experiment_iterations, csv_output, statistics)
current += 1
bm_utils.save(statistics, csv_output, b.path)
print("", flush=True)
print('\n', flush=True)
def run_benchmark(benchmark, i, iterations, csv_output, statistics):
print("\r" + str(i + 1) + " of " + str(iterations), end="", flush=True)
bm_utils.run(benchmark)
results = bm_utils.collect_results(bm_utils.RESULT_FILE_PATH)
bm_utils.handle_results(results, benchmark.path, csv_output, statistics)
| 39.147059
| 114
| 0.682945
|
76c694e30ac59ee1c185c8a7818ead80bc2f57ff
| 6,443
|
py
|
Python
|
app/main/views/letter_branding.py
|
alphagov-mirror/notifications-admin
|
04d051df6b85cf596a7d6d0f28474b04673e420a
|
[
"MIT"
] | null | null | null |
app/main/views/letter_branding.py
|
alphagov-mirror/notifications-admin
|
04d051df6b85cf596a7d6d0f28474b04673e420a
|
[
"MIT"
] | 3
|
2021-03-31T19:52:53.000Z
|
2021-12-13T20:39:53.000Z
|
app/main/views/letter_branding.py
|
alphagov-mirror/notifications-admin
|
04d051df6b85cf596a7d6d0f28474b04673e420a
|
[
"MIT"
] | null | null | null |
from botocore.exceptions import ClientError as BotoClientError
from flask import (
current_app,
redirect,
render_template,
request,
session,
url_for,
)
from notifications_python_client.errors import HTTPError
from app import letter_branding_client
from app.main import main
from app.main.forms import (
SearchByNameForm,
ServiceLetterBrandingDetails,
SVGFileUpload,
)
from app.s3_client.s3_logo_client import (
LETTER_TEMP_TAG,
delete_letter_temp_file,
delete_letter_temp_files_created_by,
letter_filename_for_db,
permanent_letter_logo_name,
persist_logo,
upload_letter_temp_logo,
)
from app.utils import get_logo_cdn_domain, user_is_platform_admin
@main.route("/letter-branding", methods=['GET'])
@user_is_platform_admin
def letter_branding():
brandings = letter_branding_client.get_all_letter_branding()
return render_template(
'views/letter-branding/select-letter-branding.html',
letter_brandings=brandings,
search_form=SearchByNameForm()
)
@main.route("/letter-branding/<uuid:branding_id>/edit", methods=['GET', 'POST'])
@main.route("/letter-branding/<uuid:branding_id>/edit/<path:logo>", methods=['GET', 'POST'])
@user_is_platform_admin
def update_letter_branding(branding_id, logo=None):
letter_branding = letter_branding_client.get_letter_branding(branding_id)
file_upload_form = SVGFileUpload()
letter_branding_details_form = ServiceLetterBrandingDetails(
name=letter_branding['name'],
)
file_upload_form_submitted = file_upload_form.file.data
details_form_submitted = request.form.get('operation') == 'branding-details'
logo = logo if logo else permanent_letter_logo_name(letter_branding['filename'], 'svg')
if file_upload_form_submitted and file_upload_form.validate_on_submit():
upload_filename = upload_letter_temp_logo(
file_upload_form.file.data.filename,
file_upload_form.file.data,
current_app.config['AWS_REGION'],
user_id=session["user_id"]
)
if logo.startswith(LETTER_TEMP_TAG.format(user_id=session['user_id'])):
delete_letter_temp_file(logo)
return redirect(url_for('.update_letter_branding', branding_id=branding_id, logo=upload_filename))
if details_form_submitted and letter_branding_details_form.validate_on_submit():
db_filename = letter_filename_for_db(logo, session['user_id'])
try:
if db_filename == letter_branding['filename']:
letter_branding_client.update_letter_branding(
branding_id=branding_id,
filename=db_filename,
name=letter_branding_details_form.name.data,
)
return redirect(url_for('main.letter_branding'))
else:
letter_branding_client.update_letter_branding(
branding_id=branding_id,
filename=db_filename,
name=letter_branding_details_form.name.data,
)
upload_letter_svg_logo(logo, db_filename, session['user_id'])
return redirect(url_for('main.letter_branding'))
except HTTPError as e:
if 'name' in e.message:
letter_branding_details_form.name.errors.append(e.message['name'][0])
else:
raise e
except BotoClientError:
# we had a problem saving the file - rollback the db changes
letter_branding_client.update_letter_branding(
branding_id=branding_id,
filename=letter_branding['filename'],
name=letter_branding['name'],
)
file_upload_form.file.errors = ['Error saving uploaded file - try uploading again']
return render_template(
'views/letter-branding/manage-letter-branding.html',
file_upload_form=file_upload_form,
letter_branding_details_form=letter_branding_details_form,
cdn_url=get_logo_cdn_domain(),
logo=logo,
is_update=True
)
@main.route("/letter-branding/create", methods=['GET', 'POST'])
@main.route("/letter-branding/create/<path:logo>", methods=['GET', 'POST'])
@user_is_platform_admin
def create_letter_branding(logo=None):
file_upload_form = SVGFileUpload()
letter_branding_details_form = ServiceLetterBrandingDetails()
file_upload_form_submitted = file_upload_form.file.data
details_form_submitted = request.form.get('operation') == 'branding-details'
if file_upload_form_submitted and file_upload_form.validate_on_submit():
upload_filename = upload_letter_temp_logo(
file_upload_form.file.data.filename,
file_upload_form.file.data,
current_app.config['AWS_REGION'],
user_id=session["user_id"]
)
if logo and logo.startswith(LETTER_TEMP_TAG.format(user_id=session['user_id'])):
delete_letter_temp_file(logo)
return redirect(url_for('.create_letter_branding', logo=upload_filename))
if details_form_submitted and letter_branding_details_form.validate_on_submit():
if logo:
db_filename = letter_filename_for_db(logo, session['user_id'])
try:
letter_branding_client.create_letter_branding(
filename=db_filename,
name=letter_branding_details_form.name.data,
)
upload_letter_svg_logo(logo, db_filename, session['user_id'])
return redirect(url_for('main.letter_branding'))
except HTTPError as e:
if 'name' in e.message:
letter_branding_details_form.name.errors.append(e.message['name'][0])
else:
raise e
else:
# Show error on upload form if trying to submit with no logo
file_upload_form.validate()
return render_template(
'views/letter-branding/manage-letter-branding.html',
file_upload_form=file_upload_form,
letter_branding_details_form=letter_branding_details_form,
cdn_url=get_logo_cdn_domain(),
logo=logo
)
def upload_letter_svg_logo(old_filename, new_filename, user_id):
persist_logo(old_filename, permanent_letter_logo_name(new_filename, 'svg'))
delete_letter_temp_files_created_by(user_id)
| 35.794444
| 106
| 0.680118
|
5c15351b18b5f02802d7663526a574b0aebfe40c
| 3,569
|
py
|
Python
|
tatk/policy/mle/camrest/camrest_data_loader.py
|
yqzhangthu/tatk
|
4d27e89604a33f19f1c7b8fe5dc92d4ba6c6f10a
|
[
"Apache-2.0"
] | null | null | null |
tatk/policy/mle/camrest/camrest_data_loader.py
|
yqzhangthu/tatk
|
4d27e89604a33f19f1c7b8fe5dc92d4ba6c6f10a
|
[
"Apache-2.0"
] | null | null | null |
tatk/policy/mle/camrest/camrest_data_loader.py
|
yqzhangthu/tatk
|
4d27e89604a33f19f1c7b8fe5dc92d4ba6c6f10a
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
import pickle
import zipfile
import torch
import torch.utils.data as data
from tatk.util.camrest.state import default_state
from tatk.policy.vector.vector_camrest import CamrestVector
class PolicyDataLoaderCamrest():
def __init__(self):
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
voc_file = os.path.join(root_dir, 'data/camrest/sys_da_voc.txt')
voc_opp_file = os.path.join(root_dir, 'data/camrest/usr_da_voc.txt')
self.vector = CamrestVector(voc_file, voc_opp_file)
processed_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'processed_data')
if os.path.exists(processed_dir):
print('Load processed data file')
self._load_data(processed_dir)
else:
print('Start preprocessing the dataset')
self._build_data(root_dir, processed_dir)
def _build_data(self, root_dir, processed_dir):
raw_data = {}
for part in ['train', 'val', 'test']:
archive = zipfile.ZipFile(os.path.join(root_dir, 'data/camrest/{}.json.zip'.format(part)), 'r')
with archive.open('{}.json'.format(part), 'r') as f:
raw_data[part] = json.load(f)
self.data = {}
for part in ['train', 'val', 'test']:
self.data[part] = []
for key in raw_data[part]:
sess = key['dial']
state = default_state()
action = {}
for i, turn in enumerate(sess):
state['user_action'] = turn['usr']['dialog_act']
if i + 1 == len(sess):
state['terminal'] = True
for da in turn['usr']['slu']:
if da['slots'][0][0] != 'slot':
state['belief_state'][da['slots'][0][0]] = da['slots'][0][1]
action = turn['sys']['dialog_act']
self.data[part].append([self.vector.state_vectorize(state),
self.vector.action_vectorize(action)])
state['system_action'] = turn['sys']['dialog_act']
os.makedirs(processed_dir)
for part in ['train', 'val', 'test']:
with open(os.path.join(processed_dir, '{}.pkl'.format(part)), 'wb') as f:
pickle.dump(self.data[part], f)
def _load_data(self, processed_dir):
self.data = {}
for part in ['train', 'val', 'test']:
with open(os.path.join(processed_dir, '{}.pkl'.format(part)), 'rb') as f:
self.data[part] = pickle.load(f)
def create_dataset(self, part, batchsz):
print('Start creating {} dataset'.format(part))
s = []
a = []
for item in self.data[part]:
s.append(torch.Tensor(item[0]))
a.append(torch.Tensor(item[1]))
s = torch.stack(s)
a = torch.stack(a)
dataset = Dataset(s, a)
dataloader = data.DataLoader(dataset, batchsz, True)
print('Finish creating {} dataset'.format(part))
return dataloader
class Dataset(data.Dataset):
def __init__(self, s_s, a_s):
self.s_s = s_s
self.a_s = a_s
self.num_total = len(s_s)
def __getitem__(self, index):
s = self.s_s[index]
a = self.a_s[index]
return s, a
def __len__(self):
return self.num_total
| 38.793478
| 129
| 0.546091
|
3ba6bb1db4bdca300d540025b3523289595b01e6
| 7,706
|
py
|
Python
|
qcodes_contrib_drivers/drivers/Attocube/ANC300sim.py
|
ThorstenGroh/Qcodes_contrib_drivers
|
97e05f8f5d8762953ee9db9bc461d0814eef657d
|
[
"MIT"
] | 1
|
2021-12-03T18:04:57.000Z
|
2021-12-03T18:04:57.000Z
|
qcodes_contrib_drivers/drivers/Attocube/ANC300sim.py
|
ThorstenGroh/Qcodes_contrib_drivers
|
97e05f8f5d8762953ee9db9bc461d0814eef657d
|
[
"MIT"
] | 2
|
2020-05-29T11:00:52.000Z
|
2020-10-09T06:18:11.000Z
|
qcodes_contrib_drivers/drivers/Attocube/ANC300sim.py
|
ThorstenGroh/Qcodes_contrib_drivers
|
97e05f8f5d8762953ee9db9bc461d0814eef657d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""QCoDeS- Simulation for the Attocube ANC300 controller.
Simulation for the Attocube ANC300 driver in the same way as it is used in our lab.
Author:
Michael Wagener, FZJ / ZEA-2, m.wagener@fz-juelich.de
"""
import pyvisa
from qcodes.instrument.visa import VisaInstrument
# if set to True, every communication line is printed
_USE_DEBUG = True
# The ANC300 script implies an echo from the device
# The AttocubeController script implies no echo!!!!
_USE_ECHO = True
class MockVisa(VisaInstrument):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def set_address(self, address):
self.visa_handle = MockVisaHandle()
class MockVisaHandle:
"""
Simulate the API needed for the communication.
"""
# List of possible commands asked the instrument to give a realistic answer.
cmddef = {'ver': ['attocube ANC300 controller version 1.1.0-1304 2013-10-17 08:16',
'ANC150 compatibillity console'],
'getcser' : ['ANC300B-C-1514-3006076'],
'getser 1': ['ANM150A-M-1545-3010045'],
'getser 2': ['ANM150A-M-1545-3010041'],
'getser 3': ['Wrong axis type','ERROR'],
'getser 4': ['Wrong axis type','ERROR'],
'getser 5': ['Wrong axis type','ERROR'],
'getser 6': ['Wrong axis type','ERROR'],
'getser 7': ['Wrong axis type','ERROR'],
'getf 1': ['frequency = 210 Hz'],
'getf 2': ['frequency = 210 Hz'],
'getf 3': ['Wrong axis type','ERROR'],
'getf 4': ['Wrong axis type','ERROR'],
'getf 5': ['Wrong axis type','ERROR'],
'getf 6': ['Wrong axis type','ERROR'],
'getf 7': ['Wrong axis type','ERROR'],
'getv 1': ['voltage = 20.000000 V'],
'getv 2': ['voltage = 20.000000 V'],
'getv 3': ['Wrong axis type','ERROR'],
'getv 4': ['Wrong axis type','ERROR'],
'getv 5': ['Wrong axis type','ERROR'],
'getv 6': ['Wrong axis type','ERROR'],
'getv 7': ['Wrong axis type','ERROR'],
'geta 1': ['voltage = 0.000000 V'],
'geta 2': ['voltage = 0.000000 V'],
'geta 3': ['Wrong axis type','ERROR'],
'geta 4': ['Wrong axis type','ERROR'],
'geta 5': ['Wrong axis type','ERROR'],
'geta 6': ['Wrong axis type','ERROR'],
'geta 7': ['Wrong axis type','ERROR'],
'getm 1': ['mode = gnd'],
'getm 2': ['mode = gnd'],
'getm 3': ['Wrong axis type','ERROR'],
'getm 4': ['Wrong axis type','ERROR'],
'getm 5': ['Wrong axis type','ERROR'],
'getm 6': ['Wrong axis type','ERROR'],
'getm 7': ['Wrong axis type','ERROR'],
'getaci 1': ['acin = off'],
'getaci 2': ['acin = off'],
'getaci 3': ['Wrong axis type','ERROR'],
'getaci 4': ['Wrong axis type','ERROR'],
'getaci 5': ['Wrong axis type','ERROR'],
'getaci 6': ['Wrong axis type','ERROR'],
'getaci 7': ['Wrong axis type','ERROR'],
'getdci 1': ['dcin = off'],
'getdci 2': ['dcin = off'],
'getdci 3': ['Wrong axis type','ERROR'],
'getdci 4': ['Wrong axis type','ERROR'],
'getdci 5': ['Wrong axis type','ERROR'],
'getdci 6': ['Wrong axis type','ERROR'],
'getdci 7': ['Wrong axis type','ERROR'],
'gettu 1': ['trigger = off'],
'gettu 2': ['trigger = 2'],
'gettu 3': ['Wrong axis type','ERROR'],
'gettu 4': ['Wrong axis type','ERROR'],
'gettu 5': ['Wrong axis type','ERROR'],
'gettu 6': ['Wrong axis type','ERROR'],
'gettu 7': ['Wrong axis type','ERROR'],
'gettd 1': ['trigger = 1'],
'gettd 2': ['trigger = 3'],
'gettd 3': ['Wrong axis type','ERROR'],
'gettd 4': ['Wrong axis type','ERROR'],
'gettd 5': ['Wrong axis type','ERROR'],
'gettd 6': ['Wrong axis type','ERROR'],
'gettd 7': ['Wrong axis type','ERROR'],
'getc 1': ['cap = 5 nF'], # TODO
'getc 2': ['cap = 5 nF'], # TODO
'getc 3': ['Wrong axis type','ERROR'],
'getc 4': ['Wrong axis type','ERROR'],
'getc 5': ['Wrong axis type','ERROR'],
'getc 6': ['Wrong axis type','ERROR'],
'getc 7': ['Wrong axis type','ERROR'],
'stepu 1': ['0'],
'stepu 2': ['0'],
'stepd 1': ['0'],
'stepd 2': ['0'],
# There is no simulation for the correct movement
}
def __init__(self):
if _USE_DEBUG:
print("DBG-Mock: init")
self.closed = False
self.answer = []
def clear(self):
if _USE_DEBUG:
print("DBG-Mock: clear")
self.answer = []
def close(self):
# make it an error to ask or write after close
if _USE_DEBUG:
print("DBG-Mock: close")
self.closed = True
def write(self, data):
"""
Writes data to device or interface synchronously.
"""
if self.closed:
raise RuntimeError("Trying to write to a closed instrument")
cmd = data.rstrip()
if _USE_DEBUG:
print("DBG-Mock: write", cmd)
# setxx <axis> <val> --> <kenn> = <val> <unit>
# getxx <axis> <val> --> <kenn> = <val> <unit>
if cmd.startswith('set'):
if _USE_ECHO:
self.answer = [ cmd, 'OK' ]
else:
self.answer = [ 'OK' ]
tmp = cmd.split()
cmd = tmp[0].replace('set','get') + ' ' + tmp[1]
if cmd in self.cmddef:
val = self.cmddef[cmd]
if isinstance( val, (list, tuple) ):
val = val[0]
else:
val = ""
val = val.split(' = ')
if len(val) == 2:
unit = val[1].split()
if len(unit) > 1:
unit = unit[1]
else:
unit = ""
setval = [ val[0]+' = '+tmp[2]+' '+unit ]
self.cmddef.update( {cmd: setval} )
else:
self.cmddef.update( {cmd: tmp[2]} )
elif cmd in self.cmddef:
if _USE_ECHO:
self.answer.append(cmd)
for c in self.cmddef[cmd]:
self.answer.append(c)
if self.answer[-1] != 'ERROR':
self.answer.append( 'OK' )
else:
if _USE_ECHO:
self.answer.append(cmd)
self.answer.append('OK')
return len(cmd), pyvisa.constants.StatusCode.success
def read(self):
"""
Reads data from device or interface synchronously.
"""
if self.closed:
raise RuntimeError("Trying to read from a closed instrument")
if _USE_DEBUG:
print("DBG-Mock: read", self.answer)
if len(self.answer) > 0:
return self.answer.pop(0)
return 'ERROR'
def ask(self, cmd):
print("DBG-Mock: MockVisaHandle ask", cmd)
if self.closed:
raise RuntimeError("Trying to ask a closed instrument")
self.write(cmd)
return self.read()
def query(self, cmd):
self.write(cmd)
return self.read()
| 36.009346
| 87
| 0.47171
|
2c6dea30ba3dccbc726b623d2eb54b6359949ded
| 4,673
|
py
|
Python
|
brainscore/metrics/xarray_utils.py
|
dmayo/brain-score
|
3ab4258152c9e3f8c7d29afb10158b184dbcebbe
|
[
"MIT"
] | 52
|
2019-12-13T06:43:44.000Z
|
2022-02-21T07:47:39.000Z
|
brainscore/metrics/xarray_utils.py
|
dmayo/brain-score
|
3ab4258152c9e3f8c7d29afb10158b184dbcebbe
|
[
"MIT"
] | 104
|
2019-12-06T18:08:54.000Z
|
2022-03-31T23:57:51.000Z
|
brainscore/metrics/xarray_utils.py
|
dmayo/brain-score
|
3ab4258152c9e3f8c7d29afb10158b184dbcebbe
|
[
"MIT"
] | 32
|
2019-12-05T14:31:14.000Z
|
2022-03-10T02:04:45.000Z
|
import numpy as np
from brainio.assemblies import NeuroidAssembly, array_is_element, walk_coords
from brainscore.metrics import Score
class Defaults:
expected_dims = ('presentation', 'neuroid')
stimulus_coord = 'image_id'
neuroid_dim = 'neuroid'
neuroid_coord = 'neuroid_id'
class XarrayRegression:
"""
Adds alignment-checking, un- and re-packaging, and comparison functionality to a regression.
"""
def __init__(self, regression, expected_dims=Defaults.expected_dims, neuroid_dim=Defaults.neuroid_dim,
neuroid_coord=Defaults.neuroid_coord, stimulus_coord=Defaults.stimulus_coord):
self._regression = regression
self._expected_dims = expected_dims
self._neuroid_dim = neuroid_dim
self._neuroid_coord = neuroid_coord
self._stimulus_coord = stimulus_coord
self._target_neuroid_values = None
def fit(self, source, target):
source, target = self._align(source), self._align(target)
source, target = source.sortby(self._stimulus_coord), target.sortby(self._stimulus_coord)
self._regression.fit(source, target)
self._target_neuroid_values = {}
for name, dims, values in walk_coords(target):
if self._neuroid_dim in dims:
assert array_is_element(dims, self._neuroid_dim)
self._target_neuroid_values[name] = values
def predict(self, source):
source = self._align(source)
predicted_values = self._regression.predict(source)
prediction = self._package_prediction(predicted_values, source=source)
return prediction
def _package_prediction(self, predicted_values, source):
coords = {coord: (dims, values) for coord, dims, values in walk_coords(source)
if not array_is_element(dims, self._neuroid_dim)}
# re-package neuroid coords
dims = source.dims
# if there is only one neuroid coordinate, it would get discarded and the dimension would be used as coordinate.
# to avoid this, we can build the assembly first and then stack on the neuroid dimension.
neuroid_level_dim = None
if len(self._target_neuroid_values) == 1: # extract single key: https://stackoverflow.com/a/20145927/2225200
(neuroid_level_dim, _), = self._target_neuroid_values.items()
dims = [dim if dim != self._neuroid_dim else neuroid_level_dim for dim in dims]
for target_coord, target_value in self._target_neuroid_values.items():
# this might overwrite values which is okay
coords[target_coord] = (neuroid_level_dim or self._neuroid_dim), target_value
prediction = NeuroidAssembly(predicted_values, coords=coords, dims=dims)
if neuroid_level_dim:
prediction = prediction.stack(**{self._neuroid_dim: [neuroid_level_dim]})
return prediction
def _align(self, assembly):
assert set(assembly.dims) == set(self._expected_dims), \
f"Expected {set(self._expected_dims)}, but got {set(assembly.dims)}"
return assembly.transpose(*self._expected_dims)
class XarrayCorrelation:
def __init__(self, correlation, correlation_coord=Defaults.stimulus_coord, neuroid_coord=Defaults.neuroid_coord):
self._correlation = correlation
self._correlation_coord = correlation_coord
self._neuroid_coord = neuroid_coord
def __call__(self, prediction, target):
# align
prediction = prediction.sortby([self._correlation_coord, self._neuroid_coord])
target = target.sortby([self._correlation_coord, self._neuroid_coord])
assert np.array(prediction[self._correlation_coord].values == target[self._correlation_coord].values).all()
assert np.array(prediction[self._neuroid_coord].values == target[self._neuroid_coord].values).all()
# compute correlation per neuroid
neuroid_dims = target[self._neuroid_coord].dims
assert len(neuroid_dims) == 1
correlations = []
for i, coord_value in enumerate(target[self._neuroid_coord].values):
target_neuroids = target.isel(**{neuroid_dims[0]: i}) # `isel` is about 10x faster than `sel`
prediction_neuroids = prediction.isel(**{neuroid_dims[0]: i})
r, p = self._correlation(target_neuroids, prediction_neuroids)
correlations.append(r)
# package
result = Score(correlations,
coords={coord: (dims, values)
for coord, dims, values in walk_coords(target) if dims == neuroid_dims},
dims=neuroid_dims)
return result
| 47.20202
| 120
| 0.685641
|
42579d11e62e618d7714ee977cd4e5d990c7bb3d
| 520
|
py
|
Python
|
pykeyset/core/profile/__init__.py
|
staticintlucas/pykeyset
|
8581252c85dfceebe22926af4640164a0895e7a0
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-07-06T16:43:25.000Z
|
2021-07-06T16:43:25.000Z
|
pykeyset/core/profile/__init__.py
|
staticintlucas/pykeyset
|
8581252c85dfceebe22926af4640164a0895e7a0
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
pykeyset/core/profile/__init__.py
|
staticintlucas/pykeyset
|
8581252c85dfceebe22926af4640164a0895e7a0
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import os.path
from pathlib import Path
from ... import resources
from .load import load_builtin, load_file
from .profile import Profile
__all__ = ["Profile", "load_builtin", "load_file"]
# TODO this function is used by the cmdlist parser. Move it somewhere more appropriate?
def load(ctx, file):
"""load a built in profile or a profile config file"""
if not os.path.isfile(file) and file in resources.profiles:
ctx.profile = load_builtin(file)
else:
ctx.profile = load_file(Path(file))
| 27.368421
| 87
| 0.717308
|
d5b0d5f0441aaf583d4c8a98676a868e10f282c4
| 5,337
|
py
|
Python
|
social-tags/src/data/delicious_t140.py
|
queirozfcom/auto-tagger
|
d9c0339648562ceca2d7cd10a02aaf56d353ae7b
|
[
"MIT"
] | null | null | null |
social-tags/src/data/delicious_t140.py
|
queirozfcom/auto-tagger
|
d9c0339648562ceca2d7cd10a02aaf56d353ae7b
|
[
"MIT"
] | 1
|
2016-02-19T03:08:47.000Z
|
2016-02-19T03:08:47.000Z
|
social-tags/src/data/delicious_t140.py
|
queirozfcom/auto-tagger
|
d9c0339648562ceca2d7cd10a02aaf56d353ae7b
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import xml.etree.ElementTree as ET
from joblib import Parallel, delayed
import pickle
import os
import gc
from src.helpers.labels import filter_tag, truncate_labels
from src.helpers.delicious_t140 import load_contents, make_path_to_file
from src.features.delicious_t140 import clean_text_delicious
def get_sample_from_cache(interim_data_root,sample_frac):
full_path = interim_data_root.rstrip('/') + "/docs_df_with_clean_content_SAMPLE_FRAC_{}_SEED_42.p".format(sample_frac)
if os.path.isfile(full_path):
docs_df = pickle.load(open(full_path, "rb"))
return docs_df
else:
raise FileNotFoundError(full_path)
def get_full_from_cache(interim_data_root, with_contents=None):
if with_contents is None:
with_contents = False
if with_contents:
full_path = interim_data_root.rstrip('/') + "/docs_df_with_content.p"
else:
full_path = interim_data_root.rstrip('/') + "/docs_df_no_content.p"
if os.path.isfile(full_path):
docs_df = pickle.load(open(full_path, "rb"))
return docs_df
else:
raise FileNotFoundError(full_path)
def load_or_get_from_cache(path_to_file, interim_data_root):
TAG_MIN_DF = 10
if os.path.isfile(interim_data_root.rstrip('/') + "/docs_df_no_content.p"):
docs_df = pickle.load(open(interim_data_root.rstrip('/') + "/docs_df_no_content.p", "rb"))
else:
tree = ET.parse(path_to_file)
dataset = tree.getroot()
elements = Parallel(n_jobs=-1)(
delayed(__get_attribute_dict_from_document_node)(document) for document in dataset)
docs_df = pd.DataFrame.from_records(elements)
# TRUNCATE LABELS HERE
labels = docs_df["tags"].map(lambda tagstring: tagstring.split(","))
labelsets = truncate_labels(labels, TAG_MIN_DF)
joined_labelsets = [",".join(labelset) for labelset in labelsets]
docs_df["tags"] = joined_labelsets
docs_df = docs_df[docs_df["filetype"] == "html"].reindex()
docs_df['num_tags'] = docs_df['tags'].apply(lambda tags: len(tags.split(',')))
docs_df.rename(columns={'users': 'num_users'}, inplace=True)
docs_df['num_users'] = docs_df['num_users'].astype('int64')
docs_df['num_tags'] = docs_df['num_tags'].astype('int64')
pickle.dump(docs_df, open(interim_data_root.rstrip('/') + "/docs_df_no_content.p", "wb"))
return docs_df
def load_or_get_from_cache_with_contents(source_dataframe_with_no_contents, interim_data_root, data_root, sample_frac=None):
"""
extracts a random sample of source_dataframe and loads text contents for each document in the sampled dataframe.
:param source_dataframe_with_no_contents: the full delicious-t140 dataframe, with document tags and IDs, but no contents yet
or None if you just want to use this method to load the dataset with contents from cache
:param interim_data_root: path to the directory where interim data is kept. This is our cache directory.
:param data_root: path to the directory where the original files were downloaded, or None if you just want
to use this method to load the dataset with contents from disk
:param sample_frac: sample fraction. default is 1.0 or 100%
:return: a sample
"""
if sample_frac is None:
sample_frac = 1.0
if os.path.isfile(interim_data_root.rstrip('/') + "/docs_df_with_content.p"):
loaded_dataframe = pickle.load(open(interim_data_root.rstrip('/') + "/docs_df_with_content.p", "rb"))
random_indices = np.random.choice(loaded_dataframe.index.values, int(len(loaded_dataframe) * sample_frac),
replace=False)
sample_df_src = loaded_dataframe.loc[random_indices]
# https://stackoverflow.com/a/27680109/436721
sample_df = sample_df_src.reset_index().drop(['index'], axis=1).copy()
else:
random_indices = np.random.choice(source_dataframe_with_no_contents.index.values, int(len(source_dataframe_with_no_contents) * sample_frac),
replace=False)
sample_df = source_dataframe_with_no_contents.loc[random_indices]
sample_df = sample_df.reset_index().drop(['index'], axis=1)
sample_df['contents'] = sample_df['hash'].map(
lambda hash: clean_text_delicious(load_contents(make_path_to_file(data_root, hash))))
pickle.dump(sample_df, open(interim_data_root.rstrip('/') + "/docs_df_with_content.p", "wb"))
return sample_df
# read the tag-assignment file (taginfo.xml) into a dataframe
# no contents
def __get_attribute_dict_from_document_node(document):
attrs_dict = dict()
for attribute_node in document:
# each unique tag is only counted once
tags = set()
if attribute_node.tag == 'tags':
for tag_node in attribute_node:
for subnode in tag_node:
if subnode.tag == 'name':
if subnode.text is not None:
tag_text = subnode.text
tags.add(filter_tag(tag_text).strip())
attrs_dict['tags'] = ','.join(tags)
else:
attrs_dict[attribute_node.tag] = attribute_node.text
return attrs_dict
| 38.956204
| 148
| 0.681469
|
81f481a81e3b98d4fc468b81817b74141b40b6ca
| 1,560
|
py
|
Python
|
src/generate_makeshift_hood.py
|
hirmiura/cdda-mod-Yararezon
|
41bda74d6196fbac3e67ef326dde7d65e3e8daf9
|
[
"MIT"
] | 1
|
2021-09-28T17:53:06.000Z
|
2021-09-28T17:53:06.000Z
|
src/generate_makeshift_hood.py
|
hirmiura/cdda-mod-Yararezon
|
41bda74d6196fbac3e67ef326dde7d65e3e8daf9
|
[
"MIT"
] | null | null | null |
src/generate_makeshift_hood.py
|
hirmiura/cdda-mod-Yararezon
|
41bda74d6196fbac3e67ef326dde7d65e3e8daf9
|
[
"MIT"
] | 1
|
2021-09-28T17:53:08.000Z
|
2021-09-28T17:53:08.000Z
|
#!/usr/bin/env -S python
# -*- coding: utf-8 -*-
import io
import json
import sys
from cdda_gettext import gt
# MSYS2での文字化け対策
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
hood_prefix = 'makeshift_hood'
# フードの素材のIDリスト
compos = [
'sheet',
'blanket',
'down_blanket',
'fur_blanket',
'quilt',
'quilt_patchwork',
]
# 素材のデータを読み込む
with open('data/json/items/generic/bedding.json') as f:
bedding = json.load(f)
data = []
for bid in compos:
# 素材データ
mat = next((b for b in bedding if b['id'] == bid), None)
name = gt.ngettext(mat['name']['str'], None, 1) # gettextで和名
encumbrance = mat['armor'][0]['encumbrance']
# フード
hood = {
"id": f"{hood_prefix}_{bid}",
"type": "ARMOR",
"copy-from": bid,
"name": {"str": f"簡易フード({name})"},
"description": f"{name}で作った簡易フードです。嵩張りますが暖かいです。",
"armor": [
{
"encumbrance": encumbrance,
"coverage": 100,
"covers": ["head", "mouth"]
}
]
}
data.append(hood)
# レシピ
recipe = {
"result": f"{hood_prefix}_{bid}",
"type": "recipe",
"category": "CC_ARMOR",
"subcategory": "CSC_ARMOR_HEAD",
"skill_used": "survival",
"difficulty": 0,
"time": "3 m",
"reversible": True,
"autolearn": True,
"components": [
[[bid, 1]]
]
}
data.append(recipe)
# ダンプ
json_text = json.dumps(data, ensure_ascii=False)
print(json_text)
| 21.369863
| 66
| 0.532692
|
f0a7ad27736f97d85e9936b4fdb7a6a7ad85843f
| 3,877
|
py
|
Python
|
week1/utilities/click_models.py
|
gsingers/search_with_machine_learning_course
|
94622c24eb07e52793cf5e2289f2f69359bb87af
|
[
"Apache-2.0"
] | 11
|
2021-12-30T18:55:56.000Z
|
2022-03-15T17:33:10.000Z
|
week1/utilities/click_models.py
|
gsingers/search_with_machine_learning_course
|
94622c24eb07e52793cf5e2289f2f69359bb87af
|
[
"Apache-2.0"
] | 9
|
2021-12-31T05:24:33.000Z
|
2022-03-08T07:44:45.000Z
|
week1/utilities/click_models.py
|
gsingers/search_with_machine_learning_course
|
94622c24eb07e52793cf5e2289f2f69359bb87af
|
[
"Apache-2.0"
] | 127
|
2021-12-24T17:03:26.000Z
|
2022-03-13T17:20:21.000Z
|
# Implements various click models
import pandas as pd
import numpy as np
def binary_func(x):
if x > 0:
return 1
return 0
def step(x):
if x < 0.05: return 0
elif x >= 0.05 and x < 0.10: return 0.5
elif x >= 0.10 and x < 0.3: return 0.75
else: return 1
# Given a click model type, transform the "grade" into an appropriate value between 0 and 1, inclusive
# This operates on the data frame and adds a "grade" column
#
def apply_click_model(data_frame, click_model_type="binary", downsample=True, prior=1000, alpha=30, beta=70, quantiles=10):
if click_model_type == "binary":
print("Binary click model")
data_frame["grade"] = data_frame["clicks"].apply(lambda x: binary_func(x))
if downsample:
data_frame = down_sample_buckets(data_frame)
elif click_model_type == "ctr":
print("CTR click model")
data_frame["grade"] = (data_frame["clicks"] / (data_frame["num_impressions"] + prior)).fillna(0)
if downsample:
data_frame = down_sample_continuous(data_frame)
elif click_model_type == "beta":
print("Beta click model")
clicks_alpha = data_frame["clicks"] + alpha
data_frame["grade"] = ((clicks_alpha) / ((data_frame["num_impressions"] + beta) + (clicks_alpha))).fillna(0)
if downsample:
data_frame = down_sample_continuous(data_frame)
elif click_model_type == "quantiles": #similar to step, but quantiles
print("CTR Quantiles click model")
data_frame["grade"] = pd.qcut((data_frame["clicks"] / (data_frame["num_impressions"] + prior)).fillna(0), quantiles, labels=False) / quantiles
if downsample:
data_frame = down_sample_continuous(data_frame)
elif click_model_type == "beta_quantiles": #similar to step, but quantiles
print("Beta quantiles click model")
clicks_alpha = data_frame["clicks"] + alpha
data_frame["grade"] = pd.qcut(((clicks_alpha) / ((data_frame["num_impressions"] + beta) + (clicks_alpha))).fillna(0), quantiles, labels=False) / quantiles
if downsample:
data_frame = down_sample_continuous(data_frame)
elif click_model_type == "heuristic":
print("Heuristic click model")
data_frame["grade"] = (data_frame["clicks"] / (data_frame["num_impressions"] + prior)).fillna(0).apply(lambda x: step(x))
if downsample:
#print("Size pre-downsample: %s\nVal Counts: %s\n" % (len(data_frame), data_frame['grade'].value_counts()))
data_frame = down_sample_buckets(data_frame)
#print("Size post-downsample: %s\nVal Counts: %s\n" % (len(data_frame), data_frame['grade'].value_counts()))
return data_frame
# https://stackoverflow.com/questions/55119651/downsampling-for-more-than-2-classes
def down_sample_buckets(data_frame):
g = data_frame.groupby('grade', group_keys=False)
return pd.DataFrame(g.apply(lambda x: x.sample(g.size().min()))).reset_index(drop=True)
# Generate the probabilities for our grades and then use that to sample from
# from: https://stackoverflow.com/questions/63738389/pandas-sampling-from-a-dataframe-according-to-a-target-distribution
# If you want to learn more about this, see http://www.seas.ucla.edu/~vandenbe/236C/lectures/smoothing.pdf
def down_sample_continuous(data_frame):
x = np.sort(data_frame['grade'])
f_x = np.gradient(x)*np.exp(-x**2/2)
sample_probs = f_x/np.sum(f_x)
try: # if we have too many zeros, we can get value errors, so first try w/o replacement, then with
sample = data_frame.sort_values('grade').sample(frac=0.8, weights=sample_probs, replace=False)
except Exception as e:
print("Unable to downsample, keeping original:\n%s" % e)
sample = data_frame #data_frame.sort_values('grade').sample(frac=0.8, weights=sample_probs, replace=True)
return sample
| 50.350649
| 162
| 0.681713
|
2e78ccf25ae5c623060572a172cb80502b1b4d34
| 5,061
|
py
|
Python
|
okex/context.py
|
bopo/okex
|
55107b4b6e02b0aa0d24003d095e118f7fc51f4b
|
[
"BSD-2-Clause"
] | 10
|
2017-12-14T15:28:33.000Z
|
2021-07-04T13:00:14.000Z
|
okex/context.py
|
bopo/okex
|
55107b4b6e02b0aa0d24003d095e118f7fc51f4b
|
[
"BSD-2-Clause"
] | 1
|
2018-05-26T11:20:40.000Z
|
2018-06-11T07:11:34.000Z
|
okex/context.py
|
bopo/okex
|
55107b4b6e02b0aa0d24003d095e118f7fc51f4b
|
[
"BSD-2-Clause"
] | 5
|
2018-01-29T03:44:31.000Z
|
2018-11-15T10:08:33.000Z
|
# -*- coding: utf-8 -*-
CONTRACT_TYPE = ('this_week', 'next_week', 'quarter')
SYMBOLS = ('btc_usd', 'ltc_usd', 'ltc_btc')
TYPES = ('1min', '3min', '5min', '15min', '30min', '1day', '3day', '1week', '1hour', '2hour',
'4hour', '6hour', '12hour')
ENDPOINT = {
'ticker': ('/api/v1/{}ticker.do', 'get'),
'depth': ('/api/v1/{}depth.do', 'get'),
'kline': ('/api/v1/{}kline.do', 'get'),
'trades': ('/api/v1/{}trades.do', 'get'),
'index': ('/api/v1/future_index.do', 'get'),
'exchange_rate': ('/api/v1/exchange_rate.do', 'post'),
'estimated_price': '/api/v1/future_estimated_price.do',
'hold_amount': '/api/v1/future_hold_amount.do',
'price_limit': '/api/v1/future_price_limit.do',
'user_info': '/api/v1/{}userinfo.do',
'position': '/api/v1/future_position.do',
'trades_history': '/api/v1/future_trades_history.do',
'batch_trade': '/api/v1/{}batch_trade.do',
'cancel': '/api/v1/future_cancel.do',
'order_info': '/api/v1/future_order_info.do',
'orders_info': '/api/v1/future_orders_info.do',
'user_info_4fix': '/api/v1/future_userinfo_4fix.do',
'position_4fix': '/api/v1/future_position_4fix.do',
'explosive': '/api/v1/future_explosive.do',
'withdraw': '/api/v1/withdraw.do',
'cancel_withdraw': '/api/v1/cancel_withdraw.do',
'withdraw_info': '/api/v1/withdraw_info.do',
'account_records': '/api/v1/account_records.do',
'order_history': '/api/v1/order_history.do',
'cancel_order': '/api/v1/cancel_order.do'
}
# 错误代码(现货)
ERROR_CODE = {
'10000': '必选参数不能为空',
'10001': '用户请求频率过快,超过该接口允许的限额',
'10002': '系统错误',
'10004': '请求失败',
'10005': 'SecretKey不存在',
'10006': 'Api_key不存在',
'10007': '签名不匹配',
'10008': '非法参数',
'10009': '订单不存在',
'10010': '余额不足',
'10011': '买卖的数量小于BTC/LTC最小买卖额度',
'10012': '当前网站暂时只支持btc_usd ltc_usd',
'10013': '此接口只支持https请求',
'10014': '下单价格不得≤0或≥1000000',
'10015': '下单价格与最新成交价偏差过大',
'10016': '币数量不足',
'10017': 'API鉴权失败',
'10018': '借入不能小于最低限额[usd:100,btc:0.1,ltc:1]',
'10019': '页面没有同意借贷协议',
'10020': '费率不能大于1%',
'10021': '费率不能小于0.01%',
'10023': '获取最新成交价错误',
'10024': '可借金额不足',
'10025': '额度已满,暂时无法借款',
'10026': '借款(含预约借款)及保证金部分不能提出',
'10027': '修改敏感提币验证信息,24小时内不允许提现',
'10028': '提币金额已超过今日提币限额',
'10029': '账户有借款,请撤消借款或者还清借款后提币',
'10031': '存在BTC/LTC充值,该部分等值金额需6个网络确认后方能提出',
'10032': '未绑定手机或谷歌验证',
'10033': '服务费大于最大网络手续费',
'10034': '服务费小于最低网络手续费',
'10035': '可用BTC/LTC不足',
'10036': '提币数量小于最小提币数量',
'10037': '交易密码未设置',
'10040': '取消提币失败',
'10041': '提币地址不存在或未认证',
'10042': '交易密码错误',
'10043': '合约权益错误,提币失败',
'10044': '取消借款失败',
'10047': '当前为子账户,此功能未开放',
'10048': '提币信息不存在',
'10049': '小额委托(<0.15BTC)的未成交委托数量不得大于50个',
'10050': '重复撤单',
'10052': '提币受限',
'10064': '美元充值后的48小时内,该部分资产不能提出',
'10100': '账户被冻结',
'10101': '订单类型错误',
'10102': '不是本用户的订单',
'10103': '私密订单密钥错误',
'10216': '非开放API',
'1002': '交易金额大于余额',
'1003': '交易金额小于最小交易值',
'1004': '交易金额小于0',
'1007': '没有交易市场信息',
'1008': '没有最新行情信息',
'1009': '没有订单',
'1010': '撤销订单与原订单用户不一致',
'1011': '没有查询到该用户',
'1013': '没有订单类型',
'1014': '没有登录',
'1015': '没有获取到行情深度信息',
'1017': '日期参数错误',
'1018': '下单失败',
'1019': '撤销订单失败',
'1024': '币种不存在',
'1025': '没有K线类型',
'1026': '没有基准币数量',
'1027': '参数不合法可能超出限制',
'1028': '保留小数位失败',
'1029': '正在准备中',
'1030': '有融资融币无法进行交易',
'1031': '转账余额不足',
'1032': '该币种不能转账',
'1035': '密码不合法',
'1036': '谷歌验证码不合法',
'1037': '谷歌验证码不正确',
'1038': '谷歌验证码重复使用',
'1039': '短信验证码输错限制',
'1040': '短信验证码不合法',
'1041': '短信验证码不正确',
'1042': '谷歌验证码输错限制',
'1043': '登陆密码不允许与交易密码一致',
'1044': '原密码错误',
'1045': '未设置二次验证',
'1046': '原密码未输入',
'1048': '用户被冻结',
'1201': '账号零时删除',
'1202': '账号不存在',
'1203': '转账金额大于余额',
'1204': '不同种币种不能转账',
'1205': '账号不存在主从关系',
'1206': '提现用户被冻结',
'1207': '不支持转账',
'1208': '没有该转账用户',
'1209': '当前api不可用',
'1216': '市价交易暂停,请选择限价交易',
'1217': '您的委托价格超过最新成交价的±5%,存在风险,请重新下单',
'1218': '下单失败,请稍后再试',
'20001': '用户不存在',
'20002': '用户被冻结',
'20003': '用户被爆仓冻结',
'20004': '合约账户被冻结',
'20005': '用户合约账户不存在',
'20006': '必填参数为空',
'20007': '参数错误',
'20008': '合约账户余额为空',
'20009': '虚拟合约状态错误',
'20010': '合约风险率信息不存在',
'20011': '10倍/20倍杠杆开BTC前保证金率低于90%/80%,10倍/20倍杠杆开LTC前保证金率低于80%/60%',
'20012': '10倍/20倍杠杆开BTC后保证金率低于90%/80%,10倍/20倍杠杆开LTC后保证金率低于80%/60%',
'20013': '暂无对手价',
'20014': '系统错误',
'20015': '订单信息不存在',
'20016': '平仓数量是否大于同方向可用持仓数量',
'20017': '非本人操作',
'20018': '下单价格高于前一分钟的103%或低于97%',
'20019': '该IP限制不能请求该资源',
'20020': '密钥不存在',
'20021': '指数信息不存在',
'20022': '接口调用错误(全仓模式调用全仓接口,逐仓模式调用逐仓接口)',
'20023': '逐仓用户',
'20024': 'sign签名不匹配',
'20025': '杠杆比率错误',
'20026': 'API鉴权错误',
'20027': '无交易记录',
'20028': '合约不存在',
'20029': '转出金额大于可转金额',
'20030': '账户存在借款',
'20038': '根据相关法律,您所在的国家或地区不能使用该功能。',
'20049': '用户请求接口过于频繁',
}
| 29.946746
| 93
| 0.562735
|
762f30d1bd634ff9cae7704cf73ecd8fd7f1a5f7
| 733
|
py
|
Python
|
bookstore_project/urls.py
|
SumnanAzadi/django-professional
|
445b519dcae848e92f14b908c89e15ee8c2ff870
|
[
"MIT"
] | null | null | null |
bookstore_project/urls.py
|
SumnanAzadi/django-professional
|
445b519dcae848e92f14b908c89e15ee8c2ff870
|
[
"MIT"
] | null | null | null |
bookstore_project/urls.py
|
SumnanAzadi/django-professional
|
445b519dcae848e92f14b908c89e15ee8c2ff870
|
[
"MIT"
] | null | null | null |
from django.conf import settings # new
from django.conf.urls.static import static # new
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
# Django admin
path('anything-but-admin/', admin.site.urls),
# User management
path('accounts/', include('allauth.urls')), # new
# Local apps
path('accounts/', include('users.urls')), # new
path('', include('pages.urls')),
path('books/', include('books.urls')),
path('orders/', include('orders.urls')), # new
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 31.869565
| 65
| 0.672578
|
305cfb340cdae00823f040d3b7a2c91fdb100972
| 2,799
|
py
|
Python
|
nb2page.py
|
dfdazac/nb2page
|
ffec71a2176875fc85849d137d6ba1e43327d215
|
[
"MIT"
] | null | null | null |
nb2page.py
|
dfdazac/nb2page
|
ffec71a2176875fc85849d137d6ba1e43327d215
|
[
"MIT"
] | 2
|
2018-10-02T08:15:02.000Z
|
2018-10-02T08:35:07.000Z
|
nb2page.py
|
dfdazac/nb2page
|
ffec71a2176875fc85849d137d6ba1e43327d215
|
[
"MIT"
] | null | null | null |
from sys import argv
import os
import datetime
import shutil
ASSETS_PATH = 'assets/img/'
POSTS_PATH = '_posts/'
# Read header with front matter, MathJax settings, etc.
with open('header.txt') as file:
header = file.read()
# Read markdown body
nb_path = argv[1]
nb_name = os.path.basename(nb_path)
name = os.path.splitext(nb_name)[0]
dir_name = os.path.dirname(nb_path)
files_folder = name + '_files'
md_path = os.path.join(dir_name, name + '.md')
body = ''
asset_start = '!['
assets_to_copy = []
with open(md_path) as markdown:
for line in markdown:
new_line = line
# Check for own or external assets
if new_line.startswith(asset_start):
# png are assumed to be in the files folder already
if new_line.startswith(':
new_line = new_line.replace('
# External assets have to be copied to the files folder
elif new_line.startswith(':
asset = new_line[new_line.find('(') + 1:new_line.find(')')]
new_line = ' + ')\n'
assets_to_copy.append(asset)
body += new_line
# Save updated file
print('Adding extra content')
with open(md_path, 'w') as markdown:
markdown.write(header + body)
# Rename to include timestamp
date_str = '{:%Y-%m-%d}-'.format(datetime.date.today())
new_md_name = date_str + os.path.basename(md_path)
new_path = os.path.join(dir_name, new_md_name)
os.rename(md_path, new_path)
# Copy external assets to files folder
files_folder_path = os.path.join(dir_name, files_folder)
for asset in assets_to_copy:
shutil.copy(os.path.join(dir_name, asset), files_folder_path)
# Move files to folders in page root
page_path = argv[2]
print('Moving to', page_path)
def move_replace(src, dest):
""" Move src to dest (either files or directories recursively),
asking for overwrite when required.
"""
proceed = True
if os.path.exists(dest):
answer = ''
while answer not in ('y', 'n'):
answer = input('Warning:\n\t{:s}\nalready exists, do you want to overwrite it? (y/n): '.format(dest))
if answer == 'y':
# Check whether destination to be removed is
# file or directory to proceed accordingly
if os.path.isdir(dest):
shutil.rmtree(dest)
elif os.path.isfile(dest):
os.remove(dest)
else:
proceed = False
print('Cancelled writing.')
if proceed:
shutil.move(src, dest)
move_replace(new_path, os.path.join(page_path, POSTS_PATH, new_md_name))
move_replace(files_folder_path, os.path.join(page_path, ASSETS_PATH, files_folder))
print('Done')
| 32.929412
| 113
| 0.645588
|
c59f808c6715c704e036bb3390f56e1011e095c9
| 545
|
py
|
Python
|
app/__init__.py
|
OmegaM/story
|
421c457bc121d6ca418468199108da7bd9f96435
|
[
"BSD-3-Clause"
] | null | null | null |
app/__init__.py
|
OmegaM/story
|
421c457bc121d6ca418468199108da7bd9f96435
|
[
"BSD-3-Clause"
] | null | null | null |
app/__init__.py
|
OmegaM/story
|
421c457bc121d6ca418468199108da7bd9f96435
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""Created with Pycharm IDEA
@Create on 2015/9/12 16:30
@my_story __init__.py.py
@author : OmegaMiao"""
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from config import config
loginManager = LoginManager()
loginManager.session_protection = 'strong'
app = Flask(__name__)
app.config.from_object(config['dev'])
db = SQLAlchemy(app)
loginManager.init_app(app)
from controller import app
from models import Author, Story, Category
| 18.793103
| 43
| 0.765138
|
6037afbb32c94f56c41b4f9d3134ae40bfd05406
| 456
|
py
|
Python
|
Medium/merge.py
|
a-shah8/LeetCode
|
a654e478f51b2254f7b49055beba6b5675bc5223
|
[
"MIT"
] | 1
|
2021-06-02T15:03:41.000Z
|
2021-06-02T15:03:41.000Z
|
Medium/merge.py
|
a-shah8/LeetCode
|
a654e478f51b2254f7b49055beba6b5675bc5223
|
[
"MIT"
] | null | null | null |
Medium/merge.py
|
a-shah8/LeetCode
|
a654e478f51b2254f7b49055beba6b5675bc5223
|
[
"MIT"
] | null | null | null |
## Sort first
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
intervals.sort(key=lambda x: x[0])
merged = []
for interval in intervals:
if not merged or merged[-1][1] < interval[0]:
merged.append(interval)
else:
merged[-1][1] = max(merged[-1][1], interval[1])
return merged
| 25.333333
| 67
| 0.464912
|
72c9ebd15d8a8f99f977783108eb41c203a61579
| 6,002
|
py
|
Python
|
short_lived_tokens/endec/engine.py
|
FriedBotStudio/short_lived_tokens
|
dd823cfd81ae6e211f9281826bb367a8fcb6fd5a
|
[
"MIT"
] | null | null | null |
short_lived_tokens/endec/engine.py
|
FriedBotStudio/short_lived_tokens
|
dd823cfd81ae6e211f9281826bb367a8fcb6fd5a
|
[
"MIT"
] | null | null | null |
short_lived_tokens/endec/engine.py
|
FriedBotStudio/short_lived_tokens
|
dd823cfd81ae6e211f9281826bb367a8fcb6fd5a
|
[
"MIT"
] | null | null | null |
import abc
from short_lived_tokens.endec.time_utils import get_timestamp_ms, in_range
import zlib
from abc import ABCMeta
import base64
from typing import Tuple
class EndecEngine(metaclass=ABCMeta):
"""Abstract Class `EndecEngine` can be used to define custom Encryption and Decryption Engine as per this Endec Specifications.
`Endec` is a portmanteau.
It works with a very specific Encypted Token Structure out of the box. Although the structure can be overriden.
Out of the box, a valid token is bytes array and has following structure:
`<payload> <sep_token> <unix_timestamp> <sep_token> <crc32>`
Any token with this structure is a valid token for EndecEngine.
Although you can override the `sleeve`, `unsleeve`, and `validate` methods to
define your own token structure and validation method. (Author doesn't recommended it though).
"""
@property
def token_life_ms(self) -> int:
return self._token_life_ms
def __init__(self, token_life_ms: int = 1000, key=None, sep_token: bytes = "::".encode(), is_private: bool = False) -> None:
"""constructor function initializes the `endec engine`
Args:
token_life_ms (int, optional): Lifetime of Token in milliseconds after which is considered invalid. Defaults to 1 seconds (1000 ms)
key (str|bytes, optional): PEM file in bytes or file path for it. Defaults to None
sep_token (bytes, optional): Seperation Token for payload structure. Defaults to '::' utf-8 bytes
is_private (bool, optional): Mark if Engine has Private Key loaded. Defaults to False
"""
self.sep_token = sep_token
self._token_life_ms = token_life_ms
self.is_private = is_private
self.key = None
if key:
if isinstance(key, bytes):
self.key = self.set_key(key, is_private)
elif isinstance(key, str):
self.key = self.load_key(key, is_private)
super().__init__()
@abc.abstractmethod
def decrypt(self, encrypted_token: bytes) -> bytes:
"""`decrypt` decrypts a encrypted token
Args:
encrypted_token (bytes): encrypted token as bytes
Returns:
bytes: Decrypted plain text token as bytes
"""
return None
@abc.abstractmethod
def encrypt(self, token: bytes) -> bytes:
"""`encrypt` encrypts a plain text token into a Endec Specified encrypted token
Args:
token (bytes): Plain text token as bytes
Returns:
bytes: Encrypted token as bytes
"""
return None
@abc.abstractmethod
def generate_keypair(self, key_length: int = 2048) -> Tuple[bytes, bytes]:
"""`generate_keypair` generates a public/private keypair with specified `key_length` and exports PEM file according to RFC1421_/RFC1423
Args:
key_length (int, optional): Keylength. Defaults to 2048 bits.
Returns:
Tuple[bytes, bytes]: Export Generated (PublicKey, PrivateKey) pair as PEM files. Text encoding, done according to RFC1421_/RFC1423
"""
return None
@abc.abstractmethod
def load_key(self, pemfile_path_abs: str, set_priv=False) -> None:
"""`load_key` loads a RSA Key from file path
Args:
pemfile_path_abs (str): Absolute File Path
passphrase (str, optional): Passphrase for PEM file
set_priv (bool, optional): Set True if you're reading from a Private PEM file. Defaults to False.
Raises:
FileNotFoundError
When File Path is incorrect
ValueError/IndexError/TypeError
When given key cannot be parsed or if passphrase is wrong
"""
return None
def save_key(self, pemfile_path: str, key: bytes = None):
if self.key is None and key is None:
raise Exception("Key is not set or provied")
with open(pemfile_path, 'wb') as fp:
fp.write(self.key) if key is None else fp.write(key)
def set_key(self, key: bytes, set_priv=False) -> None:
self.key = key
if set_priv:
self.is_private = True
def sleeve(self, payload: str) -> bytes:
"""`sleeve` method takes in a plain text payload as string and generates a token as per Endec Specification.
Returns:
bytes: Endec Specified Token
"""
payload_bytes = payload.encode()
timestamp_ms = get_timestamp_ms()
ts_bytes = timestamp_ms.to_bytes(8, 'big')
crc = zlib.crc32(payload_bytes + ts_bytes).to_bytes(8, 'big')
return payload_bytes + self.sep_token + ts_bytes + self.sep_token + crc
def unsleeve(self, encrypted_token: str) -> Tuple[bytes, int, int]:
"""`unsleeve` method takes in a Base64 Encoded Endec Specified token
Args:
encrypted_token (bytes): [description]
Returns:
bytes: [description]
"""
b64_decoded_token = base64.b64decode(encrypted_token)
decrypted_token = self.decrypt(b64_decoded_token)
payload, timestamp_ms, crc = tuple(
decrypted_token.split(self.sep_token))
return payload, int.from_bytes(timestamp_ms, 'big'), int.from_bytes(crc, 'big')
def validate(self, encrypted_token: str) -> bool:
"""`validate` validates an encrypted token and checks based on UNIX timestamp and returns a boolean value
Args:
encrypted_token (str): Base64 Encoded Encrypted Token. See Endec for Token Structure specification
Returns:
bool: validity state of token
"""
payload, timestamp_ms, crc = self.unsleeve(encrypted_token)
ts_bytes = timestamp_ms.to_bytes(8, 'big')
computed_crc = zlib.crc32(payload + ts_bytes)
if crc == computed_crc:
return in_range(timestamp_ms, deadline=self.token_life_ms)
return False
| 35.94012
| 143
| 0.646618
|
cb0b3559e2cfa105cb9f44beafec5d3319868764
| 704
|
py
|
Python
|
snapshotServer/migrations/0005_auto_20200408_1537.py
|
bhecquet/seleniumRobot-server
|
b5930a21a25d63f2071dd57a55855b62808800d1
|
[
"Apache-2.0"
] | null | null | null |
snapshotServer/migrations/0005_auto_20200408_1537.py
|
bhecquet/seleniumRobot-server
|
b5930a21a25d63f2071dd57a55855b62808800d1
|
[
"Apache-2.0"
] | 95
|
2017-05-04T09:00:52.000Z
|
2022-03-11T23:19:20.000Z
|
snapshotServer/migrations/0005_auto_20200408_1537.py
|
bhecquet/seleniumRobot-server
|
b5930a21a25d63f2071dd57a55855b62808800d1
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-04-08 13:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('snapshotServer', '0004_auto_20200320_1059'),
]
operations = [
migrations.AddField(
model_name='snapshot',
name='computed',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='snapshot',
name='refSnapshot',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='snapshotServer.Snapshot'),
),
]
| 28.16
| 140
| 0.609375
|
e4365a83b2780a111ca1f22c463048c0fd75c8f0
| 673
|
py
|
Python
|
Lintcode/Ladder_59_A1806/1481. Unique Substring.py
|
ctc316/algorithm-python
|
ac4580d55e05e93e407c6156c9bb801808027d60
|
[
"MIT"
] | null | null | null |
Lintcode/Ladder_59_A1806/1481. Unique Substring.py
|
ctc316/algorithm-python
|
ac4580d55e05e93e407c6156c9bb801808027d60
|
[
"MIT"
] | null | null | null |
Lintcode/Ladder_59_A1806/1481. Unique Substring.py
|
ctc316/algorithm-python
|
ac4580d55e05e93e407c6156c9bb801808027d60
|
[
"MIT"
] | null | null | null |
# Version 1: hash, Time: O(n * k + mlogm for sorting)
class Solution:
"""
@param s: a string
@param k: an integer
@return: all unique substring
"""
def uniqueSubstring(self, s, k):
records = set()
for i in range(len(s) - k + 1):
word = s[i: i + k]
if word in records:
continue
records.add(word)
return sorted(list(records))
# Version 2: Short
class Solution:
"""
@param s: a string
@param k: an integer
@return: all unique substring
"""
def uniqueSubstring(self, s, k):
return sorted(list(set(s[i:i + k] for i in range(len(s) - k + 1))))
| 24.035714
| 75
| 0.534918
|
246778ce6a68256846f394047a07cd94ffea56c8
| 771
|
py
|
Python
|
programmers/Lv3/12.py
|
JeongHoLim/practice
|
5f8914ba42b2ae01e0a00e92a7af9fcf63c8b7c2
|
[
"MIT"
] | 1
|
2022-01-16T19:57:28.000Z
|
2022-01-16T19:57:28.000Z
|
programmers/Lv3/12.py
|
JeongHoLim/practice
|
5f8914ba42b2ae01e0a00e92a7af9fcf63c8b7c2
|
[
"MIT"
] | null | null | null |
programmers/Lv3/12.py
|
JeongHoLim/practice
|
5f8914ba42b2ae01e0a00e92a7af9fcf63c8b7c2
|
[
"MIT"
] | null | null | null |
# https://programmers.co.kr/learn/courses/30/lessons/42628?language=python3
import heapq
def solution(operations):
min_heap = []
max_heap = []
for op in operations:
o1,o2 = op.split(" ")
if o1 == "I":
heapq.heappush(min_heap,int(o2))
heapq.heappush(max_heap,-int(o2))
elif min_heap:
if o2 =="1":
pop = heapq.heappop(max_heap)
min_heap.remove(-pop)
heapq.heapify(min_heap)
else:
pop = heapq.heappop(min_heap)
max_heap.remove(-pop)
heapq.heapify(max_heap)
if not min_heap:
return [0,0]
else:
return [-heapq.heappop(max_heap),heapq.heappop(min_heap)]
| 28.555556
| 75
| 0.526589
|
7c63464420f071408bff99dc9995594087b726c7
| 3,978
|
py
|
Python
|
thsr_voice_reminder/app_settings.py
|
sc420/thsr-voice-reminder
|
1112d02905cfcc5738b8da32eb34e72fb31b5f61
|
[
"MIT"
] | null | null | null |
thsr_voice_reminder/app_settings.py
|
sc420/thsr-voice-reminder
|
1112d02905cfcc5738b8da32eb34e72fb31b5f61
|
[
"MIT"
] | null | null | null |
thsr_voice_reminder/app_settings.py
|
sc420/thsr-voice-reminder
|
1112d02905cfcc5738b8da32eb34e72fb31b5f61
|
[
"MIT"
] | null | null | null |
import yaml
from thsr_voice_reminder.base import Base
class AppSettings(Base):
def __init__(self, args):
super().__init__(self, args)
self._init_settings_state()
def has_settings_changed(self):
return self._has_settings_changed
def iterate_schedule_items(self):
for schedule_item in self._schedule_items:
yield schedule_item
def get_alert_sound(self):
alert = self._settings.get('alert', {})
return alert.get('sound', None)
def load(self):
with open(self._args.settings, 'r', encoding='utf-8') as stream:
try:
self._settings = yaml.safe_load(stream)
except yaml.YAMLError:
self._logger.exception('Unable to read the settings file')
raise
self._update_settings_change()
self._build_schedule_items()
def _init_settings_state(self):
self._last_settings = None
self._has_settings_changed = False
def _update_settings_change(self):
self._has_settings_changed = (self._settings != self._last_settings)
if self._has_settings_changed:
self._logger.info('The settings have been changed')
self._last_settings = self._settings
def _build_schedule_items(self):
obj_list = self._settings.get('schedule', [])
self._schedule_items = [ScheduleItem(index, obj)
for index, obj in enumerate(obj_list)]
def __eq__(self, other):
if other is None:
return False
return self._schedule_items == other._schedule_items
class ScheduleItem:
def __init__(self, index, obj):
self._index = index
self._obj = obj
self._build_reminders()
def get_index(self):
return self._index
def iterate_reminders(self):
for reminder in self._reminders:
yield reminder
def get_orig_dest(self):
return (self._obj['orig'], self._obj['dest'])
def get_time(self):
return self._obj['time']
def get_occasion_target(self):
return (self._obj['target']['where'], self._obj['target']['when'])
def is_enabled(self):
return self._obj['enabled']
def get_active_weekday(self):
return self._obj['active_weekday']
def get_reminders(self):
return self._reminders
def _build_reminders(self):
obj_list = self._obj.get('reminders', [])
self._reminders = [Reminder(index, obj)
for index, obj in enumerate(obj_list)]
def __eq__(self, other):
if other is None:
return False
return self._obj == other._obj
def __str__(self):
return 'obj={}, reminders={}'.format(self._obj, self._reminders)
class Reminder:
def __init__(self, index, obj):
self._index = index
self._obj = obj
def get_index(self):
return self._index
def get_remind_time_range(self, target_time):
first_before_min = self._obj['before_min']
last_before_min = self._obj.get('last_before_min', 0)
first_remind_time = target_time - first_before_min
last_remind_time = target_time - last_before_min
return (first_remind_time, last_remind_time)
def get_repeat(self):
return self._obj.get('repeat', 1)
def get_sound_before(self):
return self._obj.get('sound_before', None)
def get_formatted_voice_message(self, m):
voice = self._obj.get('voice', {})
message = voice.get('message', None)
if message is None:
return None
else:
return message.format_map(m)
def get_voice_lang(self):
voice = self._obj.get('voice', {})
return voice.get('lang', None)
def __eq__(self, other):
if other is None:
return False
return self._obj == other._obj
def __str__(self):
return 'obj={}'.format(self._obj)
| 28.212766
| 76
| 0.621418
|
4b1fea97a53d399f6ba16e116d9bd0ab87b1007e
| 724
|
py
|
Python
|
Notakto/Result.py
|
oalieno/Notakto
|
e32d4a8cc2e9a91eadd30d3a0a972f1dd67666bb
|
[
"MIT"
] | 5
|
2018-01-02T13:29:02.000Z
|
2019-12-30T04:10:36.000Z
|
Notakto/Result.py
|
OAlienO/Notakto
|
e32d4a8cc2e9a91eadd30d3a0a972f1dd67666bb
|
[
"MIT"
] | 1
|
2021-10-30T12:42:50.000Z
|
2021-10-30T12:42:50.000Z
|
Notakto/Result.py
|
OAlienO/Notakto
|
e32d4a8cc2e9a91eadd30d3a0a972f1dd67666bb
|
[
"MIT"
] | null | null | null |
from .constants import BOARD_TEMPLATE
class Result:
def __init__(self, status, index, x, y):
self.status = status
self.index = index
self.x = x
self.y = y
@property
def move(self):
return (self.index, self.x, self.y)
def __repr__(self):
return "Result({}, {}, {}, {})".format(self.status, self.index, self.x, self.y)
def __str__(self):
ans = ("Current Monoid: {}\n"
"Winning Position? {}\n"
"Last move was at the {} board\n").format(self.status, self.status.is_win(), self.index)
stones = [' '] * 9
stones[self.x * 3 + self.y] = 'X'
ans += BOARD_TEMPLATE.format(*stones)
return ans
| 28.96
| 103
| 0.542818
|
93ba97d9afb5355b5ffa7dae9f866a1aabdb2f9c
| 783
|
py
|
Python
|
Sample03_schematic_io.py
|
CMA2401PT/BDXWorkShop
|
b42e8d72a6e19ed7d3ee12c3c2469472cc62c690
|
[
"MIT"
] | null | null | null |
Sample03_schematic_io.py
|
CMA2401PT/BDXWorkShop
|
b42e8d72a6e19ed7d3ee12c3c2469472cc62c690
|
[
"MIT"
] | null | null | null |
Sample03_schematic_io.py
|
CMA2401PT/BDXWorkShop
|
b42e8d72a6e19ed7d3ee12c3c2469472cc62c690
|
[
"MIT"
] | null | null | null |
import os
from canvas import IR
from canvas import irio
input_schematic_file = 'data/城市书店1.schematic'
ir_in_bdx = 'data/silo.bdx'
output_bdx_file = 'output/sample03/output.bdx'
output_schematic_file = 'output/sample03/output_schematic.schematic'
output_bdx_file_converted_from_bdx = 'output/sample03/from_bdx.schematic'
os.makedirs('output/sample03', exist_ok=True)
# 先演示 加载 schematic,并保存为 bdx
ir = irio.create_ir_from_schematic(input_schematic_file, ignore=['air'])
irio.dump_ir_to_bdx(ir, out_bdx_file=output_bdx_file, author='2401PT')
# 这个被加载的数据 ir 可以被保存为 schematic
irio.dump_ir_to_schematic(ir, output_schematic_file)
# 当然,也可以先将bdx载入为ir再保存为 schematic
ir = irio.create_ir_from_bdx(ir_in_bdx, need_verify=True)
irio.dump_ir_to_schematic(ir, output_bdx_file_converted_from_bdx)
| 35.590909
| 73
| 0.83014
|
94cd12460a7094869c210b2b361c84840922d435
| 2,933
|
py
|
Python
|
src/video_processor.py
|
artemlunev2000/winter-road-detection
|
5ad20a404e1dd2940a22bbd7e8b82425d8b1a782
|
[
"Apache-2.0"
] | null | null | null |
src/video_processor.py
|
artemlunev2000/winter-road-detection
|
5ad20a404e1dd2940a22bbd7e8b82425d8b1a782
|
[
"Apache-2.0"
] | null | null | null |
src/video_processor.py
|
artemlunev2000/winter-road-detection
|
5ad20a404e1dd2940a22bbd7e8b82425d8b1a782
|
[
"Apache-2.0"
] | null | null | null |
from cv2 import waitKey, destroyAllWindows, line, LINE_8, bitwise_and, cvtColor, COLOR_BGR2GRAY, \
threshold, THRESH_BINARY, dilate, imshow, VideoCapture
import numpy as np
from src.frame_processor import process_frame
from src.frame_preprocessor import preprocess_frame
def detect_hood_ending(images):
result = bitwise_and(images[0], images[1])
for image in images[2:]:
result = bitwise_and(result, image)
threshold(result, 70, 255, THRESH_BINARY, result)
kernel = np.ones((7, 7), np.uint8)
result = dilate(result, kernel)
current_white_pixels = 0
for height in range(result.shape[0] - 1, 0, -1):
if result[height][int(result.shape[1]/2)] == 0:
current_white_pixels = 0
else:
current_white_pixels += 1
if current_white_pixels == 10:
return result.shape[0] - height
return 0
def process_video(path):
cap = VideoCapture(path)
frame_counter = 0
x1 = x2 = None
possible_hood_area_images = []
needed_hood_area_images = 40
hood_ending = None
while cap.isOpened():
frame_counter += 1
ret, frame = cap.read()
# in case banner in frame
# frame = frame[39:frame.shape[0], 0:frame.shape[1]]
if hood_ending is None:
if len(possible_hood_area_images) < needed_hood_area_images:
possible_hood_area_images.append(
cvtColor(
frame[int(frame.shape[0]*2/3):frame.shape[0], 0:frame.shape[1]],
COLOR_BGR2GRAY
)
)
else:
hood_ending = frame.shape[0] - detect_hood_ending(possible_hood_area_images) - 20
if not ret:
print("Can't receive frame. Exiting.")
break
if frame_counter % 10 == 0:
markers_image, found_labels = preprocess_frame(frame)
frame, x1, x2 = process_frame(frame, hood_ending, markers_image, found_labels)
imshow('frame', frame)
frame_counter = 0
elif x1 is not None and x2 is not None:
frame = line(
frame,
(0, int(x1[0])),
(400, int(x1[0] + x1[1] * 400)),
(100, 100, 200),
5,
LINE_8
)
frame = line(
frame,
(frame.shape[1], int(x2[0] + x2[1] * frame.shape[1])),
(frame.shape[1] - 400, int(x2[0] + x2[1] * (frame.shape[1] - 400))),
(100, 100, 200),
5,
LINE_8
)
imshow('frame', frame)
else:
imshow('frame', frame)
if waitKey(1) == ord('q'):
break
waitKey(0)
destroyAllWindows()
cap.release()
| 31.537634
| 99
| 0.522332
|
e906c70feb4946ad6b0b9447591cf55da06e75b4
| 940
|
py
|
Python
|
010_functions/calculator.py
|
rafael-torraca/python-100-days-of-code
|
3a5b3e32c5a3fd66a4fd726d378e0d2f746a3f30
|
[
"MIT"
] | null | null | null |
010_functions/calculator.py
|
rafael-torraca/python-100-days-of-code
|
3a5b3e32c5a3fd66a4fd726d378e0d2f746a3f30
|
[
"MIT"
] | null | null | null |
010_functions/calculator.py
|
rafael-torraca/python-100-days-of-code
|
3a5b3e32c5a3fd66a4fd726d378e0d2f746a3f30
|
[
"MIT"
] | null | null | null |
from os import system
from art import logo
def add(n1, n2):
return n1 + n2
def subtract(n1, n2):
return n1 - n2
def multiply(n1, n2):
return n1 * n2
def divide(n1, n2):
return n1 / n2
operations = {
"+": add,
"-": subtract,
"*": multiply,
"/": divide
}
def calculator():
print(logo)
num1 = float(input("What's the first number?: "))
for symbol in operations:
print(symbol)
should_continue = True
while should_continue:
operation_symbol = input("Pick an operation: ")
num2 = float(input("What's the next number?: "))
calculation_function = operations[operation_symbol]
answer = calculation_function(num1, num2)
print(f"{num1} {operation_symbol} {num2} = {answer}")
if input(f"Type 'y' to continue calculating with {answer}, or type 'n' to start a new calculation: ") == 'y':
num1 = answer
else:
should_continue = False
system("clear || cls")
calculator()
| 21.363636
| 113
| 0.643617
|
1c72380ff608ba818fe91fa149246a01fddb7071
| 9,919
|
py
|
Python
|
getProxy.py
|
solomonxie/autohotkey
|
c77587b5065d0f91cf951175554d2c38cae42f96
|
[
"MIT"
] | null | null | null |
getProxy.py
|
solomonxie/autohotkey
|
c77587b5065d0f91cf951175554d2c38cae42f96
|
[
"MIT"
] | null | null | null |
getProxy.py
|
solomonxie/autohotkey
|
c77587b5065d0f91cf951175554d2c38cae42f96
|
[
"MIT"
] | null | null | null |
# Python2
# coding:utf-8
'''
# Title : proxy
# Author: Solomon Xie
# Usage :
# Notes :
# Update:
'''
# === 必备模块 ===
import urllib2, urllib, re, os, sys, time, random, datetime, getopt
import requests # 第三方
from threading import Thread
from bs4 import BeautifulSoup # 第三方
def main():
# print Proxy('192.168.1.1:8080').ieProxy('ProxyOnly')
opts, args = getopt.getopt(sys.argv[1:], 'p:a',['pac=', 'off'])
# print opts #调试用
for o,a in opts:
if o == '-p': print Proxy(a).ieProxy('ProxyOnly')
elif o == '-a': print Proxy().ieProxy('PacOnly')
elif o == '--pac': print Proxy(pac=a).ieProxy('PacOnly')
elif o == '--off': print Proxy().ieProxy('Off')
# ProxyPool().update()
# for i in ProxyPool().getProxies(): print Proxy(i).check()
class Proxy():
def __init__(self, uri='', pac=''):
self.pto = 'https' if 'https' in uri else 'http'
self.host = ''.join(re.findall('\d+\.\d+\.\d+\.\d+', uri))
self.port = ''.join(re.findall('[:\s]+(\d+)', uri))
self.adr = (self.host +':'+ self.port) if self.host and self.port else ''
self.proxy = {self.pto: self.adr}
self.uri = self.pto+'://'+self.adr
self.pac = pac if pac else 'https://pac.itzmx.com/abc.pac'
def check(self):
'''
Function: 在线或在本地服务器网站(为了快速),检验IP是否可用、是否统一、是否高匿等
Notes : 先到网站上获取本机真实IP再做对比
验证通过返回True,失败则返回False。
'''
if not self.adr: return -1
# 先关上本地已设置的所有代理再开始验证
# self.ieProxy('Off')
# 第一重验证:ping
print 'Pinging %s'%self.host
icmp = os.popen('ping %s'%self.host).read()
resu = re.findall( '(\d+)ms[^\d]+(\d+)ms[^\d]+(\d+)ms', icmp )
# print icmp
if len(resu) < 1: return -1 # 如果没有ping通,则直接中断下一步检测
speed = resu[0][-1]
# 第四重验证: 打开google验证是否能翻墙
print 'Connecting web outside GFW by %s'%self.adr
try: r = requests.get('http://www.ip.cn', headers=getHeader(), proxies=self.proxy, timeout=3)
except Exception as e: return 0
os.popen('start chrome https://www.google.com/#newwindow=1&q=%s'%self.adr)
# 第三重验证:打开验证公网IP的网站
print 'Connecting web inside GFW by %s'%self.adr
try: html = requests.get('http://www.ip.cn', headers=getHeader(), proxies=self.proxy, timeout=3).text
except Exception as e: return 0
print 'Checking anonymity of %s'%self.adr
# 第四重验证:检测该代理是否高匿
resu = ''.join( re.findall('<code>([^<>]+)</code>', html) )
if self.host not in resu: return 0
# print 'My IP detected on the Internet is %s.'%resu
print '---OK----%s'%self.uri
return speed
def ieProxy(self, op):
if not op : return 'No assigned operation.'
# 只有代理地址测试通过了才为本机设置IP
if op == 'ProxyOnly':
if not self.check(): return 'Proxy did not change because [%s] failed on test.'%self.uri
def __toHex(obj):
if obj == '': return ''
elif obj == 0 or obj == '0' or obj == '00': return '00'
if isinstance(obj, str):
return ','.join([ str(hex(ord(s))).replace('0x','') for s in obj ])
elif isinstance(obj, int):
num = str(hex(obj)).replace('0x', '')
# 如果是一位数则自动补上0,7为07,e为0e
return num if len(num)>1 else '0'+num
options = {'On':'0F','Off':'01','ProxyOnly':'03','PacOnly':'05','ProxyAndPac':'07','D':'09','DIP':'0B','DS':'0D'}
if op == 'Off': # 关闭所有代理设置
reg_value = '46,00,00,00,00,00,00,00,01'
else: # 根据选项设置代理
switcher = options.get(op)
if not switcher: return '---Unexpected Option.---'
noLocal = False #"跳过本地地址的代理服务器"这项就默认不设置了
skipLocal = '07,00,00,00,%s'%__toHex('<local>') if noLocal else '00'
reg_value = '46,00,00,00,00,00,00,00,%(swi)s,00,00,00,%(ipLen)s,00,00,00,%(ip)s00,00,00,%(ski)s,21,00,00,00%(pac)s' % ({
'swi':switcher,
'ipLen':__toHex(len(self.adr)),
'ip':__toHex(self.adr) + ',' if self.adr else '',
'infoLen':__toHex(len('<local>')),
'ski':skipLocal,
'pac':','+__toHex(self.pac) if self.pac else ''
})
# print options[op] +'\n'+ __toHex(self.adr) +'\n'+ __toHex(self.pac) #调试用
settings = 'Windows Registry Editor Version 5.00\n[HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Internet Settings\Connections]\n"DefaultConnectionSettings"=hex:%s' % reg_value
# print settings #调试用
# === 生成reg文件并导入到注册表中 ===
filePath = os.environ['TEMP'] + '\DefaultConnectionSettings.reg' #放到命令行的工作目录(可跨系统操作)
with open(filePath, 'w') as f:
f.write( settings )
cmd = 'reg import "%s"' % filePath
if len(os.popen(cmd).readlines()) > 1: return '---Failed on registering reg file.---'
# 删除临时的.reg文件 (如果文件目录在临时文件夹中,就不用删了)
# if os.path.exists(filePath): os.remove(filePath)
return 'Successfully registered proxy settings.'
class ProxyPool():
def __init__(self):
self.lines = [] # 网上获取代理地址
self.uris = [] # 验证通过的代理地址
self.online = True # 是否在线获取,否的话则为本地网页文件获取
def getProxies(self):
with open('proxyJungle.txt', 'r') as f:
return [line for line in f.readlines() if Proxy(line).uri]
def extractIP(self, site):
# 选择是在线获取还是根据本地HTML文件获取
if self.online:
r = requests.get(site['url'], headers=getHeader(), timeout=3)
content = r.text
else:
with open(site['loc'], 'r') as f: content = f.read()
# 开始从HTML源码中进行内容抽取
ptn = site.get('re') if site.get('re') else site.get('ptn')
if site.get('re'): #一般获取方式
resu = re.findall(ptn[0], content)
if len(resu) < 1: return
for m in resu:
pto = 'https://' if 'https' in m[ptn[3]].lower() else 'http://'
self.lines.append( pto + m[ptn[1]] +':'+ m[ptn[2]] )
else: #特殊获取方式
soup = BeautifulSoup(content, 'html5lib')
if ptn[0] == 'goubanjia' or ptn[0] == 'qiaodm':
rows = soup.select(ptn[1])
for ro in rows:
port = ''.join([ ':'+t.get_text() for t in ro.select(ptn[2]) ])
pto = ''.join([ t.get_text() for t in ro.select(ptn[3]) ])
pto = 'https://' if 'https' in pto else 'http://'
chaos = re.findall( ptn[5], str(ro.select(ptn[4])) )
prx = ''.join( [c[2] for c in chaos if c])
mix = pto + prx + port
if mix: self.lines.append(mix)
def update(self):
# 定制所有抓取代理的网址及正则表达式
sites = []
sites.append({'url': 'http://www.kuaidaili.com/free/outha/',
'loc':'proxyHTML/kuaidaili.com.html',
're': ['<td>(\d+\.\d+\.\d+\.\d+)</td>\s*<td>(\d+)</td>\s*<td>[^<>]*</td>\s*<td>([^<>]*)</td>', 0, 1, 2] })
sites.append({'url': 'http://www.xicidaili.com/nn/',
'loc':'proxyHTML/xicidaili.com.html',
're': ['<td>(\d+\.\d+\.\d+\.\d+)</td>\s*<td>(\d+)</td>\s*<td>[^<>]*</td>\s*<td>[^<>]*</td>\s*<td>([^<>]*)</td>', 0,1,2]})
# sites.append({'url': 'http://www.fengyunip.com/free/china-high.html',
# 'loc':'proxyHTML/fengyunip.com.html',
# 're': ['<td>(\d+\.\d+\.\d+\.\d+)</td>\s*<td>(\d+)</td>\s*<td>[^<>]*</td>\s*<td>[^<>]*</td>\s*<td>([^<>]*)</td>', 0,1,2]})
def __checkIPs(prx):
if Proxy(prx).check() > 0:
self.uris.append(prx)
multiThread(func=self.extractIP, prams=sites)
print 'Retrived %d proxies, now start to test each of them.'%len(self.lines)
# 开始检测有效性,只收录可用代理
# uris = [i for i in self.lines if Proxy(i).check() > 0] # 单线程检测
multiThread(func=__checkIPs, prams=self.lines) # 多线程检测
print 'Got %d varified proxies.'%len(self.uris)
with open('proxyJungle.txt', 'w') as f:
f.write('\n'.join(self.uris))
# print self.uris
print '-----Stored %d proxies for this time.'%len(self.lines)
# def update2(self, func):
#
def getHeader():
# 随机使用一个浏览身份
agents = []
agents.append('Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))') # IE
agents.append('Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36') # Chrome
agents.append('Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25') # Safari Mobile
agents.append('Mozilla/5.0 (Linux; U; Android 4.0.3; ko-kr; LG-L160L Build/IML74K) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') # Android Webkit Browser
agents.append('Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25') # Safari Mobile
# if not ag.has_key('User-Agent'): ag['User-Agent'] = agents[ random.randint(0,len(agents)-1) ]
ag = { 'User-Agent':agents[random.randint(0,len(agents)-1)] }
return ag
def multiThread(func, prams):
threadList = []
for p in prams:
t = Thread(target=func, args=(p,))
t.start()
threadList.append(t)
print 'Now there are %d threads.'%len(threadList)
for sub in threadList:
sub.join()
# ---------------------------------------------------------------------------------
if __name__ == '__main__':
main()
| 47.917874
| 199
| 0.52475
|
f7904ee21c6148a588edb6d7483f529f1af85bcb
| 1,365
|
py
|
Python
|
src/scripts/roam_with_joy.py
|
ericchen321/ros_x_habitat
|
f256b62fe8dda059baaf9bad87cf53f7d769f2f9
|
[
"CC-BY-4.0"
] | 24
|
2021-09-10T23:35:53.000Z
|
2022-03-31T18:12:20.000Z
|
src/scripts/roam_with_joy.py
|
ericchen321/ros_x_habitat
|
f256b62fe8dda059baaf9bad87cf53f7d769f2f9
|
[
"CC-BY-4.0"
] | 4
|
2021-12-11T06:56:58.000Z
|
2022-02-23T03:05:00.000Z
|
src/scripts/roam_with_joy.py
|
ericchen321/ros_x_habitat
|
f256b62fe8dda059baaf9bad87cf53f7d769f2f9
|
[
"CC-BY-4.0"
] | 7
|
2021-12-17T14:13:27.000Z
|
2022-03-31T16:39:28.000Z
|
import argparse
from src.roamers.joy_habitat_roamer import JoyHabitatRoamer
def main():
# parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument("--launch-file-path", default="launch/teleop.launch", type=str)
parser.add_argument(
"--hab-env-node-path", default="src/nodes/habitat_env_node.py", type=str
)
parser.add_argument(
"--hab-env-config-path", default="configs/pointnav_rgbd_roam.yaml", type=str
)
parser.add_argument("--hab-env-node-name", default="roamer_env_node", type=str)
parser.add_argument("--episode-id", type=str, default="-1")
parser.add_argument(
"--scene-id",
type=str,
default="data/scene_datasets/habitat-test-scenes/skokloster-castle.glb",
)
parser.add_argument(
"--video-frame-period",
type=int,
default=60,
)
args = parser.parse_args()
# start the roamer nodes
roamer = JoyHabitatRoamer(
launch_file_path=args.launch_file_path,
hab_env_node_path=args.hab_env_node_path,
hab_env_config_path=args.hab_env_config_path,
hab_env_node_name=args.hab_env_node_name,
video_frame_period=args.video_frame_period,
)
# get to the specified episode
roamer.roam_until_shutdown(args.episode_id, args.scene_id)
if __name__ == "__main__":
main()
| 31.022727
| 87
| 0.683516
|
4ecc52a195ed40ad93a0d1fcd935da734316172d
| 1,498
|
py
|
Python
|
rchabro/users/tests/test_views.py
|
Digbigpig/rchabro
|
8c8af557443f81fa8dab91e57829e20f98d2759f
|
[
"MIT"
] | null | null | null |
rchabro/users/tests/test_views.py
|
Digbigpig/rchabro
|
8c8af557443f81fa8dab91e57829e20f98d2759f
|
[
"MIT"
] | null | null | null |
rchabro/users/tests/test_views.py
|
Digbigpig/rchabro
|
8c8af557443f81fa8dab91e57829e20f98d2759f
|
[
"MIT"
] | null | null | null |
import pytest
from django.conf import settings
from django.test import RequestFactory
from rchabro.users.views import UserRedirectView, UserUpdateView
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def test_get_success_url(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.username}/"
def test_get_object(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_object() == user
class TestUserRedirectView:
def test_get_redirect_url(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserRedirectView()
request = request_factory.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.username}/"
| 28.264151
| 77
| 0.6749
|
ec6ede8545dc74f97c00f40055e25a8e80d1cc8b
| 34,562
|
py
|
Python
|
badwords/profanity_filter.py
|
Jeyhun023/python
|
aec0ba266543d52d7cecf49221fe38eafaf50e09
|
[
"MIT"
] | null | null | null |
badwords/profanity_filter.py
|
Jeyhun023/python
|
aec0ba266543d52d7cecf49221fe38eafaf50e09
|
[
"MIT"
] | null | null | null |
badwords/profanity_filter.py
|
Jeyhun023/python
|
aec0ba266543d52d7cecf49221fe38eafaf50e09
|
[
"MIT"
] | null | null | null |
import re
from collections import defaultdict
from contextlib import suppress, contextmanager
from copy import deepcopy
from itertools import chain
from math import floor
from pathlib import Path
from typing import Dict, Union, List, Tuple, Set, Collection, ContextManager, Optional
import poetry_version
import spacy
import spacy.attrs
import spacy.language
import spacy.tokens
from cached_property import cached_property
from more_itertools import substrings_indexes
from ordered_set import OrderedSet
from redis import Redis
from profanity_filter import spacy_utlis
from profanity_filter.config import Config, DEFAULT_CONFIG
from profanity_filter.spacy_component import SpacyProfanityFilterComponent
from profanity_filter.types_ import (Words, Language, ProfaneWordDictionaries, ProfaneWordDictionariesAcceptable,
Languages, LanguagesAcceptable, Nlps, Morphs, Spells, Substrings,
TextSplittedByLanguage, ProfanityFilterError, Word, AnalysisType, AnalysesTypes)
class DummyHunSpell:
def __init__(self, *args):
pass
@staticmethod
def spell(word: str) -> str:
return word
@staticmethod
def stem(word: str) -> List[bytes]:
return [word.encode('utf8')]
@staticmethod
def get_dic_encoding():
return 'utf8'
class DummyMorphAnalyzer:
def __init__(self):
pass
@staticmethod
def parse(word):
class ParseResult:
def __init__(self):
self.normal_form = word
return [ParseResult()]
# Defining variables in case of unavailable analyses
HunSpell = DummyHunSpell
HunSpellError = None
Trie = None
MorphAnalyzer = DummyMorphAnalyzer
_available_analyses_list = []
with suppress(ImportError):
from profanity_filter.analysis.deep import *
_available_analyses_list.append(AnalysisType.DEEP)
with suppress(ImportError):
from profanity_filter.analysis.morphological import *
_available_analyses_list.append(AnalysisType.MORPHOLOGICAL)
with suppress(ImportError):
from profanity_filter.analysis.multilingual import *
_available_analyses_list.append(AnalysisType.MULTILINGUAL)
AVAILABLE_ANALYSES: AnalysesTypes = frozenset(_available_analyses_list)
APP_NAME = 'profanity-filter'
__version__ = poetry_version.extract(source_file=__file__)
class ProfanityFilter:
def __init__(self,
languages: LanguagesAcceptable = tuple(DEFAULT_CONFIG.languages),
*,
analyses: AnalysesTypes = frozenset(DEFAULT_CONFIG.analyses),
cache_redis_connection_url: Optional[str] = None,
censor_char: str = DEFAULT_CONFIG.censor_char,
censor_whole_words: bool = DEFAULT_CONFIG.censor_whole_words,
custom_profane_word_dictionaries: ProfaneWordDictionariesAcceptable = None,
extra_profane_word_dictionaries: ProfaneWordDictionariesAcceptable = None,
max_relative_distance: float = DEFAULT_CONFIG.max_relative_distance,
morphs: Optional[Morphs] = None,
nlps: Optional[Nlps] = None,
spells: Optional[Spells] = None,
):
# Path to data dir
self._BASE_DIR = Path(__file__).absolute().parent
self._DATA_DIR: Path = self._BASE_DIR / 'data'
self._MAX_MAX_DISTANCE = 3
# Set dummy values to satisfy the linter (they will be overwritten in `config`)
self._analyses: AnalysesTypes = frozenset()
self._cache_clearing_disabled: bool = False
self._cache_redis: Optional[Redis] = None
self._cache_redis_connection_url: Optional[str] = None
self._censor_char: str = ''
self._censor_whole_words: bool = False
self._custom_profane_word_dictionaries: ProfaneWordDictionaries = {}
self._extra_profane_word_dictionaries: ProfaneWordDictionaries = {}
self._languages: Languages = OrderedSet()
self._max_relative_distance: float = 0.0
self._morphs: Morphs = {}
self._nlps: Nlps = {}
self._profane_word_dictionary_files: Dict[Language, Path] = {}
self._spells: Spells = {}
# For Levenshtein automata
self._alphabet = set()
self._trie = {}
# Cache of censored words
self._censored_words: Words = {}
# Cache of words with no profanity inside that is generated after censoring
# (include words that are not in the dictionary)
self._words_with_no_profanity_inside: Set[str] = set()
# What to be censored - should not be modified by user
self._censor_dictionaries: ProfaneWordDictionaries = {}
with self._disabled_cache_clearing():
self.config(
languages=languages,
analyses=analyses,
cache_redis_connection_url=cache_redis_connection_url,
censor_char=censor_char,
censor_whole_words=censor_whole_words,
custom_profane_word_dictionaries=custom_profane_word_dictionaries,
extra_profane_word_dictionaries=extra_profane_word_dictionaries,
max_relative_distance=max_relative_distance,
morphs=morphs,
nlps=nlps,
spells=spells,
)
self.clear_cache()
def config(self,
languages: LanguagesAcceptable = tuple(DEFAULT_CONFIG.languages),
*,
analyses: AnalysesTypes = frozenset(DEFAULT_CONFIG.analyses),
cache_redis_connection_url: Optional[str] = DEFAULT_CONFIG.cache_redis_connection_url,
censor_char: str = DEFAULT_CONFIG.censor_char,
censor_whole_words: bool = DEFAULT_CONFIG.censor_whole_words,
custom_profane_word_dictionaries: ProfaneWordDictionariesAcceptable = None,
extra_profane_word_dictionaries: ProfaneWordDictionariesAcceptable = None,
max_relative_distance: float = DEFAULT_CONFIG.max_relative_distance,
morphs: Optional[Morphs] = None,
nlps: Optional[Nlps] = None,
spells: Optional[Spells] = None,
):
self.analyses = analyses
self.cache_redis_connection_url = cache_redis_connection_url
self.censor_char = censor_char
self.censor_whole_words = censor_whole_words
self.custom_profane_word_dictionaries = custom_profane_word_dictionaries
self.extra_profane_word_dictionaries = extra_profane_word_dictionaries
self.max_relative_distance = max_relative_distance
self._set_languages(languages, load_morphs=morphs is None, load_nlps=nlps is None, load_spells=spells is None)
if morphs is not None:
self.morphs = morphs
if nlps is not None:
self.nlps = nlps
if spells is not None:
self.spells = spells
@classmethod
def from_config(cls, config: Config) -> 'ProfanityFilter':
return cls(
languages=config.languages,
analyses=frozenset(config.analyses),
cache_redis_connection_url=config.cache_redis_connection_url,
censor_char=config.censor_char,
censor_whole_words=config.censor_whole_words,
max_relative_distance=config.max_relative_distance,
)
@classmethod
def from_yaml(cls, path: Union[Path, str]) -> 'ProfanityFilter':
return cls.from_config(Config.from_yaml(path))
def censor(self, text: str) -> str:
"""Returns text with any profane words censored"""
return self._censor(text=text, return_bool=False)
def censor_word(self, word: Union[str, spacy.tokens.Token], language: Language = None) -> Word:
"""Returns censored word"""
word = self._make_spacy_token(language=language, word=word)
return self._censor_word(language=language, word=word)
def is_clean(self, text: str) -> bool:
"""Returns True if text doesn't contain any profane words, False otherwise"""
return not self.is_profane(text=text)
def is_profane(self, text: str) -> bool:
"""Returns True if input_text contains any profane words, False otherwise"""
return self._censor(text=text, return_bool=True)
@cached_property
def spacy_component(self, language: Language = None) -> SpacyProfanityFilterComponent:
nlp = self._get_nlp(language)
[language] = [language for language, nlp_ in self.nlps.items() if nlp_ == nlp]
return SpacyProfanityFilterComponent(profanity_filter=self, nlp=nlp, language=language)
@property
def analyses(self) -> AnalysesTypes:
return self._analyses
@analyses.setter
def analyses(self, value: Collection[AnalysisType]) -> None:
self._analyses = AVAILABLE_ANALYSES.intersection(value)
self.clear_cache()
@property
def cache_redis_connection_url(self) -> Optional[str]:
return self._cache_redis_connection_url
@cache_redis_connection_url.setter
def cache_redis_connection_url(self, value: Optional[str]) -> None:
self._cache_redis_connection_url = value
if value is not None:
self._cache_redis = Redis.from_url(value)
@property
def censor_char(self) -> str:
"""What to censor the words with"""
return self._censor_char
@censor_char.setter
def censor_char(self, value: str) -> None:
"""Replaces the original censor char '*' with value"""
if len(value) != 1:
raise ValueError("Censor char must be str of length 1")
self._censor_char = value
self.clear_cache()
@property
def censor_whole_words(self) -> bool:
return self._censor_whole_words
@censor_whole_words.setter
def censor_whole_words(self, value: bool) -> None:
self._censor_whole_words = value
self.clear_cache()
@property
def custom_profane_word_dictionaries(self) -> ProfaneWordDictionaries:
"""If defined, use this instead of _censor_lists"""
return self._custom_profane_word_dictionaries
@custom_profane_word_dictionaries.setter
def custom_profane_word_dictionaries(self, value: ProfaneWordDictionariesAcceptable) -> None:
if value is None:
value = {}
else:
value = {language: OrderedSet(custom_censor_dictionary)
for language, custom_censor_dictionary in value.items()}
self._custom_profane_word_dictionaries = defaultdict(lambda: OrderedSet(), **value)
self.clear_cache()
@property
def extra_profane_word_dictionaries(self) -> ProfaneWordDictionaries:
"""Words to be used in conjunction with _censor_dictionaries"""
return self._extra_profane_word_dictionaries
@extra_profane_word_dictionaries.setter
def extra_profane_word_dictionaries(self, value: ProfaneWordDictionariesAcceptable) -> None:
if value is None:
value = {}
else:
value = {language: OrderedSet(extra_profane_word_dictionary)
for language, extra_profane_word_dictionary in value.items()}
self._extra_profane_word_dictionaries = defaultdict(lambda: OrderedSet(), **value)
self.clear_cache()
@property
def languages(self) -> Languages:
"""Languages"""
return self._languages
@languages.setter
def languages(self, value: LanguagesAcceptable) -> None:
self._set_languages(value)
@cached_property
def languages_str(self) -> str:
return ', '.join(self.languages)
@property
def max_relative_distance(self) -> float:
"""Max relative distance to profane words"""
return self._max_relative_distance
@max_relative_distance.setter
def max_relative_distance(self, value: float) -> None:
self._max_relative_distance = value
self.clear_cache()
@property
def morphs(self) -> Morphs:
return self._morphs
@morphs.setter
def morphs(self, value: Optional[Morphs]) -> None:
if AnalysisType.MORPHOLOGICAL in self.analyses:
self.clear_cache()
if value is not None:
self._morphs = value
else:
self._morphs = {}
for language in self.languages:
with suppress(ValueError):
self._morphs[language] = MorphAnalyzer(lang=language)
if not self._morphs:
self.analyses -= {AnalysisType.MORPHOLOGICAL}
@property
def nlps(self) -> Nlps:
return self._nlps
@nlps.setter
def nlps(self, value: Optional[Nlps]) -> None:
self.clear_cache()
if value is not None:
self._nlps = value
else:
self._nlps = {}
for language in self.languages:
with suppress(OSError):
self._nlps[language] = spacy.load(language, disable=['parser', 'ner'])
self._nlps[language].add_pipe(self.spacy_component, last=True)
if not self._nlps:
raise ProfanityFilterError(f"Couldn't load Spacy model for any of languages: {self.languages_str}")
@cached_property
def profane_word_dictionaries(self) -> ProfaneWordDictionaries:
"""Gets profane word dictionaries"""
if self.custom_profane_word_dictionaries:
result = deepcopy(self.custom_profane_word_dictionaries)
else:
self._load_profane_word_dictionaries()
result = deepcopy(self._censor_dictionaries)
for language in self.languages.intersection(list(self.extra_profane_word_dictionaries.keys())):
result[language] |= self.extra_profane_word_dictionaries[language]
if AnalysisType.DEEP in self.analyses:
self._trie = {language: Trie(words=result[language], alphabet=self._alphabet)
for language in self.languages}
for length in range(self._MAX_MAX_DISTANCE + 1):
generate_automaton_to_file(length)
return result
@property
def spells(self) -> Spells:
return self._spells
@spells.setter
def spells(self, value: Optional[Spells]) -> None:
if AnalysisType.DEEP in self.analyses:
self.clear_cache()
if value is not None:
self._spells = value
else:
self._spells = {}
for language in self._languages:
with suppress(HunSpellError):
self._spells[language] = HunSpell(self._DATA_DIR / f'{language}.dic',
self._DATA_DIR / f'{language}.aff')
if not self._spells:
self.analyses -= {AnalysisType.DEEP}
def clear_cache(self) -> None:
if self._cache_clearing_disabled:
return
self._update_profane_word_dictionary_files()
self._update_profane_word_dictionaries()
self._clear_words_cache()
def restore_profane_word_dictionaries(self) -> None:
""" Clears all custom censor lists """
self.custom_profane_word_dictionaries = None
self.extra_profane_word_dictionaries = None
@contextmanager
def _disabled_cache_clearing(self) -> ContextManager[None]:
self._cache_clearing_disabled = True
yield
self._cache_clearing_disabled = False
def _clear_words_cache(self):
self._censored_words = {}
self._words_with_no_profanity_inside = set()
if self._cache_redis is not None:
self._cache_redis.flushdb()
def _update_languages_str(self) -> None:
if self._cache_clearing_disabled:
return
with suppress(KeyError):
del self.__dict__['languages_str']
_ = self.languages_str
def _set_languages(self, value: LanguagesAcceptable, load_morphs: bool = True, load_nlps: bool = True,
load_spells: bool = True) -> None:
self._languages = OrderedSet(value)
self._update_languages_str()
if load_morphs:
self.morphs = None
if load_nlps:
self.nlps = None
if load_spells:
self.spells = None
self.clear_cache()
def _update_profane_word_dictionary_files(self):
# Paths to profane word dictionaries
self._profane_word_dictionary_files = {}
for language in self.languages:
profane_word_file = self._DATA_DIR / f'{language}_profane_words.txt'
if profane_word_file.is_file():
self._profane_word_dictionary_files[language] = profane_word_file
if not self._profane_word_dictionary_files:
raise ProfanityFilterError(f"Couldn't load profane words for any of languages: {self.languages_str}")
def _update_profane_word_dictionaries(self) -> None:
if self._cache_clearing_disabled:
return
with suppress(KeyError):
del self.__dict__['profane_word_dictionaries']
_ = self.profane_word_dictionaries
def _load_profane_word_dictionaries(self) -> None:
"""Loads the dictionaries of profane words from files"""
self._update_profane_word_dictionary_files()
self._censor_dictionaries = defaultdict(lambda: OrderedSet())
for language, words_file in self._profane_word_dictionary_files.items():
with open(str(words_file)) as f:
self._censor_dictionaries[language] = OrderedSet(line.strip() for line in f.readlines())
def _get_max_distance(self, length: int) -> float:
return min(self._MAX_MAX_DISTANCE, floor(self.max_relative_distance * length))
def _make_spacy_token(self, language: Language, word: str) -> spacy.tokens.Token:
return spacy_utlis.make_token(nlp=self._get_nlp(language), word=word)
def _drop_fully_censored_words(self, substrings: Substrings) -> Substrings:
return ((word, start, finish)
for word, start, finish in substrings
if not all(char == self.censor_char for char in word))
@staticmethod
def _drop_substrings(substrings: Substrings) -> Substrings:
drop_intervals = set()
for word, start, finish in substrings:
if all(start < drop_start or finish > drop_finish for drop_start, drop_finish in drop_intervals):
result = (word, start, finish)
drop = yield result
drop_start, drop_finish = drop
if drop_start is not None and drop_finish is not None:
drop_intervals.add((drop_start, drop_finish))
def _generate_fully_censored_word(self, word: Union[str, spacy.tokens.Token]) -> str:
with suppress(AttributeError):
word = word.text
return len(word) * self.censor_char
def _generate_partly_censored_word(self, word: Union[str, spacy.tokens.Token], profane_word: str) -> str:
def is_delete_or_insert(opcode):
return opcode[0] in ('delete', 'insert')
# noinspection PyShadowingNames
def find_word_part(word: str, word_part: str) -> str:
word_to_word_part_opcodes = Levenshtein.opcodes(word, word_part)
word_part_in_word_start = (
word_to_word_part_opcodes[0][2] if is_delete_or_insert(word_to_word_part_opcodes[0]) else 0)
word_part_in_word_finish = (
word_to_word_part_opcodes[-1][1] if is_delete_or_insert(word_to_word_part_opcodes[-1]) else len(word))
return word[word_part_in_word_start:word_part_in_word_finish]
with suppress(AttributeError):
word = word.text
word_part_for_censoring = find_word_part(word.lower(), profane_word)
return regex.sub(pattern=re.escape(word_part_for_censoring),
repl=self._generate_fully_censored_word(word=word_part_for_censoring),
string=word,
flags=regex.IGNORECASE)
def _get_nlp(self, language: Language) -> spacy.language.Language:
# noinspection PyTypeChecker
languages = OrderedSet([language]) | self.languages
for nlp_language in languages:
with suppress(KeyError):
return self.nlps[nlp_language]
def _parse(self,
language: Language,
text: str,
use_profanity_filter: bool = True) -> spacy.tokens.Doc:
nlp = self._get_nlp(language)
return spacy_utlis.parse(nlp=nlp, text=text, language=language, use_profanity_filter=use_profanity_filter)
def _get_spells(self, language: Language) -> 'OrderedSet[HunSpell]':
result = OrderedSet([DummyHunSpell()])
if AnalysisType.DEEP not in self.analyses:
return result
if language is None:
return OrderedSet(self.spells.values())
# noinspection PyTypeChecker
languages = OrderedSet([language]) | self.languages
for language in languages:
with suppress(KeyError):
result = OrderedSet([self.spells[language]])
break
return result
def _stems(self, language: Language, word: str) -> 'OrderedSet[str]':
spells = self._get_spells(language=language)
try:
return OrderedSet([stem_bytes.decode(spell.get_dic_encoding())
for spell in spells for stem_bytes in spell.stem(word)])
except UnicodeEncodeError:
return OrderedSet()
def _normal_forms(self, language: Language, word: str) -> 'OrderedSet[str]':
morph = DummyMorphAnalyzer
if AnalysisType.MORPHOLOGICAL in self.analyses:
# noinspection PyTypeChecker
languages = OrderedSet([language]) | self.languages
for language in languages:
with suppress(KeyError):
morph = self.morphs[language]
break
return OrderedSet([morph.parse(word=word)[0].normal_form])
def _lemmas(self, language: Language, word: Union[str, spacy.tokens.Token]) -> 'OrderedSet[str]':
result = OrderedSet()
if not word:
return result
word = self._make_spacy_token(language=language, word=word)
spacy_lemma = word.lemma_
result.add(word.text)
spacy_lemma = spacy_lemma.lower() if spacy_lemma != '-PRON-' else word.lower_
result.add(spacy_lemma)
result |= self._stems(language=language, word=word.text)
result |= self._normal_forms(language=language, word=word.text)
return result
def _is_dictionary_word(self, language: Language, word: str) -> bool:
try:
return any(spell.spell(word) for spell in self._get_spells(language=language))
except UnicodeEncodeError:
return False
def _keep_only_letters_or_dictionary_word(self, language: Language, word: Union[str, spacy.tokens.Token]) -> str:
with suppress(AttributeError):
word = word.text
if language is None:
language = self.languages[0]
if AnalysisType.DEEP in self.analyses and self._is_dictionary_word(language=language, word=word):
return word
else:
return ''.join(regex.findall(r'\p{letter}', word))
def _get_words_with_no_profanity_inside(self) -> Set[str]:
if self._cache_redis is None:
return self._words_with_no_profanity_inside
else:
return {word.decode('utf8') for word in self._cache_redis.smembers('_words_with_no_profanity_inside')}
def _has_no_profanity(self, words: Collection[str]) -> bool:
return any(word in word_with_no_profanity_inside
for word in words
for word_with_no_profanity_inside in self._get_words_with_no_profanity_inside())
def _get_trie(self, language: Language) -> Trie:
result = None
# noinspection PyTypeChecker
languages = OrderedSet([language]) | self.languages
for language in languages:
with suppress(KeyError):
result = self._trie[language]
break
return result
def _is_profane_word(self, language: Language, word: str) -> bool:
profane_word_dictionaries = (self.profane_word_dictionaries.values()
if language is None else
[self.profane_word_dictionaries[language]])
return any(word in profane_word_dictionary for profane_word_dictionary in profane_word_dictionaries)
def _get_censored_word(self, word: spacy.tokens.Token) -> Optional[Word]:
if self._cache_redis is None:
return self._censored_words.get(word.text)
else:
d = self._cache_redis.hgetall(word.text)
if not d:
return None
uncensored, censored, original_profane_word = d[b'uncensored'], d[b'censored'], d[b'original_profane_word']
if not original_profane_word:
original_profane_word = None
return Word(uncensored=uncensored, censored=censored, original_profane_word=original_profane_word)
def _save_censored_word(self, word: Word) -> None:
if self._cache_redis is None:
self._censored_words[word.uncensored] = word
else:
d = dict(word)
if not word.original_profane_word:
d['original_profane_word'] = ''
self._cache_redis.hmset(word.uncensored, d)
def _censor_word_part(self, language: Language, word: spacy.tokens.Token) -> Tuple[Word, bool]:
"""
:return: Tuple of censored word and flag of no profanity inside
"""
lemmas = self._lemmas(word=word, language=language)
if AnalysisType.DEEP in self.analyses:
lemmas_only_letters = OrderedSet([
self._keep_only_letters_or_dictionary_word(language=language, word=lemma) for lemma in lemmas])
if lemmas_only_letters != lemmas:
lemmas_only_letters = [
*chain(*(self._lemmas(word=lemma, language=language) for lemma in lemmas_only_letters))]
lemmas.update(lemmas_only_letters)
if self._has_no_profanity(lemmas):
return Word(uncensored=word.text, censored=word.text), True
censored_word = self._get_censored_word(word)
if censored_word is not None:
return censored_word, False
for lemma in lemmas:
if self._is_profane_word(language=language, word=lemma):
if self.censor_whole_words:
censored = self._generate_fully_censored_word(word=word)
else:
censored = self._generate_partly_censored_word(word=word, profane_word=lemma)
censored_word = Word(uncensored=word.text, censored=censored, original_profane_word=lemma)
self._save_censored_word(censored_word)
return censored_word, False
if AnalysisType.DEEP in self.analyses:
for lemma in lemmas:
if self._is_dictionary_word(language=language, word=lemma):
return Word(uncensored=word.text, censored=word.text), True
automaton = LevenshteinAutomaton(tolerance=self._get_max_distance(len(lemma)),
query_word=lemma,
alphabet=self._alphabet)
matching_bad_words = trie_automaton_intersection(automaton=automaton,
trie=self._get_trie(language=language),
include_error=False)
if matching_bad_words:
bad_word = matching_bad_words[0]
if self.censor_whole_words:
censored = self._generate_fully_censored_word(word=word)
else:
censored = self._generate_partly_censored_word(word=word, profane_word=bad_word)
censored_word = Word(uncensored=word.text, censored=censored, original_profane_word=bad_word)
self._save_censored_word(censored_word)
return censored_word, False
return Word(uncensored=word.text, censored=word.text), False
def _save_word_with_no_profanity_inside(self, word: spacy.tokens.Token) -> None:
if self._cache_redis is None:
self._words_with_no_profanity_inside.add(word.text)
else:
self._cache_redis.sadd('_words_with_no_profanity_inside', word.text)
def _censor_word(self, language: Language, word: spacy.tokens.Token) -> Word:
"""Returns censored word"""
censored_word_prev = None
censored_word = Word(uncensored=word.text, censored=word.text)
while censored_word != censored_word_prev:
censored_word_prev = censored_word
substrings = (
self._drop_substrings(
self._drop_fully_censored_words(
substrings_indexes(censored_word_prev.censored, reverse=True)
)
)
)
no_profanity_start, no_profanity_finish = None, None
try:
substring = next(substrings)
censored_part, start, finish = substring
except StopIteration:
break
while True:
try:
censored_part = self._make_spacy_token(language=language, word=censored_part)
censored_censored_part, no_profanity_inside = self._censor_word_part(language=language,
word=censored_part)
if no_profanity_inside:
no_profanity_start, no_profanity_finish = start, finish
if censored_censored_part.censored != censored_part.text:
if self.censor_whole_words:
censored = self._generate_fully_censored_word(word=word)
else:
censored = censored_word_prev.censored.replace(
censored_part.text, censored_censored_part.censored)
censored_word = Word(
uncensored=word.text,
censored=censored,
original_profane_word=censored_censored_part.original_profane_word,
)
# Stop after first iteration (with word part equal word) when deep analysis is disabled
# Also stop if word was partly censored
if AnalysisType.DEEP not in self.analyses or (censored_word != censored_word_prev):
break
censored_part, start, finish = substrings.send((no_profanity_start, no_profanity_finish))
except StopIteration:
break
if censored_word.censored == word.text:
if AnalysisType.DEEP in self.analyses and not self._is_dictionary_word(language=language, word=word.text):
self._save_word_with_no_profanity_inside(word)
else:
self._save_censored_word(censored_word)
return censored_word
def _detect_languages(self, text: str) -> Languages:
fallback_language = self.languages[0]
fallback_result = OrderedSet([fallback_language])
if AnalysisType.MULTILINGUAL in self.analyses:
polyglot_output = polyglot.detect.Detector(text, quiet=True)
result = OrderedSet([language.code for language in polyglot_output.languages if language.code != 'un'])
if not result:
result = fallback_result
else:
result = fallback_result
result = result.intersection(self.languages)
return result
@staticmethod
def _merge_by_language(parts: TextSplittedByLanguage) -> TextSplittedByLanguage:
result = []
language = parts[0][0]
merged = parts[0][1]
i = 1
while i < len(parts):
if parts[i][0] != language:
result.append((language, merged))
language = parts[i][0]
merged = parts[i][1]
else:
merged += parts[i][1]
i += 1
result.append((language, merged))
return result
def _split_by_language(self, text: str) -> TextSplittedByLanguage:
languages = self._detect_languages(text=text)
tokens = re.split(r'(\W)', text)
if len(languages) == 0:
return [(None, text)]
elif len(languages) == 1 or len(tokens) <= 1:
# noinspection PyTypeChecker
return [(languages[0], text)]
else:
middle_index = len(tokens) // 2
left_text, right_text, = ''.join(tokens[:middle_index]), ''.join(tokens[middle_index:])
left = self._split_by_language(text=left_text)
right = self._split_by_language(text=right_text)
return ProfanityFilter._merge_by_language(left + right)
@staticmethod
def _replace_token(text: str, old: spacy.tokens.Token, new: str) -> str:
return text[:old.idx] + new + text[old.idx + len(old.text):]
# noinspection PyProtectedMember
def _censor(self, text: str, return_bool=False) -> Union[str, bool]:
""":return: text with any profane words censored or bool (True - text has profane words, False otherwise) if
return_bool=True"""
result = ''
text_parts = self._split_by_language(text=text)
for language, text_part in text_parts:
result_part = text_part
doc = self._parse(language=language, text=text_part)
for token in doc:
if token._.is_profane:
if return_bool:
return True
else:
result_part = self._replace_token(text=result_part, old=token, new=token._.censored)
result += result_part
if return_bool:
return False
else:
return result
| 42.827757
| 119
| 0.634714
|
d44b6639550bc06702135a7f8addef958bc84ac6
| 3,534
|
py
|
Python
|
src/cone/app/browser/utils.py
|
lenadax/cone.app
|
b25c55aedb85e45a962003d2767a22a927cc61c0
|
[
"BSD-3-Clause"
] | null | null | null |
src/cone/app/browser/utils.py
|
lenadax/cone.app
|
b25c55aedb85e45a962003d2767a22a927cc61c0
|
[
"BSD-3-Clause"
] | null | null | null |
src/cone/app/browser/utils.py
|
lenadax/cone.app
|
b25c55aedb85e45a962003d2767a22a927cc61c0
|
[
"BSD-3-Clause"
] | null | null | null |
from cone.app import compat
from cone.app.utils import app_config
from cone.app.utils import format_traceback as _format_traceback
from cone.app.utils import safe_encode
from cone.app.utils import safe_decode
from pyramid.i18n import TranslationStringFactory
import copy
import datetime
import re
_ = TranslationStringFactory('cone.app')
# B/C. use ``authenticated_userid`` directly.
def authenticated(request):
return request.authenticated_userid
def node_path(node):
# XXX: implement in ``BaseNode``.
return [safe_decode(p) for p in node.path if p is not None]
# B/C, removed as of cone.app 1.1
nodepath = node_path
# default query parameters to quote
QUOTE_PARAMS = ('came_from',)
def make_query(quote_params=QUOTE_PARAMS, **kw):
query = list()
for name, param in sorted(kw.items()):
if param is None:
continue
if isinstance(param, compat.STR_TYPE):
param = [param]
if type(param) in compat.NUMBER_TYPES:
param = [str(param)]
quote = name in quote_params
for p in param:
p = safe_encode(p) if compat.IS_PY2 else p
query.append('{}={}'.format(name, compat.quote(p) if quote else p))
query = '&'.join(query)
if query:
return '?{}'.format(query)
def make_url(request, path=None, node=None, resource=None, query=None):
# if path=[] in signature, path gets aggregated in recursive calls ???
# happens on icon lookup in navtree.
# ^^^ that is because the [] (a list, mutable) is generated at compile
# time. mutable values should not be in function signatures to avoid this.
if path is None:
path = []
else:
path = copy.copy(path)
if node is not None:
path = node_path(node)
if resource is not None:
path.append(resource)
path = [compat.quote(safe_encode(it)) for it in path]
url = '{}/{}'.format(request.application_url, '/'.join(path))
if not query:
return url
return '{}{}'.format(url, query)
def choose_name(container, name):
name = re.sub(
r'-{2,}', '-',
re.sub(r'^\w-|-\w-|-\w$', '-',
re.sub(r'\W', '-', name.strip()))).strip('-').lower()
n = name
i = 0
while n in container:
i += 1
n = u'{}-{}'.format(name, i)
return n.replace('/', '-').lstrip('+@')
def format_date(dt, long=True):
if not isinstance(dt, datetime.datetime):
return _('unknown', default='Unknown')
return long and dt.strftime('%d.%m.%Y %H:%M') or dt.strftime('%d.%m.%Y')
def node_icon(node):
if node.properties.icon:
return node.properties.icon
info = node.nodeinfo
if not info.icon:
return app_config().default_node_icon
return info.icon
def request_property(func):
"""Decorator like ``property``, but underlying function is only called once
per request.
Cache attribute on request.environ under key
``instanceid.classname.funcname``.
Works only on instances providing a request attribute.
"""
def wrapper(self):
cache_key = '{}.{}.{}'.format(
str(id(self)),
self.__class__.__name__,
func.__name__
)
try:
return self.request.environ[cache_key]
except KeyError:
val = self.request.environ[cache_key] = func(self)
return val
wrapper.__doc__ = func.__doc__
return property(wrapper)
def format_traceback():
return '<pre>{}</pre>'.format(_format_traceback())
| 28.272
| 79
| 0.624222
|
0deac2c1cb47006d1e5e2a3c51d681581edd71bc
| 245
|
py
|
Python
|
Python/Python (Basic) Skills Certification Test/average_function.py
|
paurav11/HackerRank
|
80c91c5cc55dd56671a5906be7a106ad4f1db95e
|
[
"MIT"
] | 1
|
2021-05-19T06:44:03.000Z
|
2021-05-19T06:44:03.000Z
|
Python/Python (Basic) Skills Certification Test/average_function.py
|
paurav11/HackerRank
|
80c91c5cc55dd56671a5906be7a106ad4f1db95e
|
[
"MIT"
] | null | null | null |
Python/Python (Basic) Skills Certification Test/average_function.py
|
paurav11/HackerRank
|
80c91c5cc55dd56671a5906be7a106ad4f1db95e
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
def avg(*nums):
return sum(nums)/len(nums)
if __name__ == '__main__':
nums = list(map(int, input().split()))
res = avg(*nums)
print('%.2f' % res + '\n')
| 14.411765
| 42
| 0.612245
|
4584d84f19f96559416e608dde6bbfa3740afe3a
| 12,804
|
py
|
Python
|
notebooks/py/ISS.py
|
haoxusci/starfish
|
d7bd856024c75f2ce41504406f2a663566c3814b
|
[
"MIT"
] | null | null | null |
notebooks/py/ISS.py
|
haoxusci/starfish
|
d7bd856024c75f2ce41504406f2a663566c3814b
|
[
"MIT"
] | null | null | null |
notebooks/py/ISS.py
|
haoxusci/starfish
|
d7bd856024c75f2ce41504406f2a663566c3814b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
#
# EPY: stripped_notebook: {"metadata": {"hide_input": false, "kernelspec": {"display_name": "starfish", "language": "python", "name": "starfish"}, "language_info": {"codemirror_mode": {"name": "ipython", "version": 3}, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.5"}, "toc": {"nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "toc_cell": false, "toc_position": {}, "toc_section_display": "block", "toc_window_display": false}}, "nbformat": 4, "nbformat_minor": 2}
# EPY: START markdown
### Reproduce In-situ Sequencing results with Starfish
#
#In Situ Sequencing (ISS) is an image based transcriptomics technique that can spatially resolve hundreds RNA species and their expression levels in-situ. The protocol and data analysis are described in this [publication](https://www.ncbi.nlm.nih.gov/pubmed/23852452). This notebook walks through how to use Starfish to process the raw images from an ISS experiment into a spatially resolved cell by gene expression matrix. We verify that Starfish can accurately reproduce the results from the authors' original [pipeline](https://cellprofiler.org/previous_examples/#sequencing-rna-molecules-in-situ-combining-cellprofiler-with-imagej-plugins)
#
#Please see [documentation](https://spacetx-starfish.readthedocs.io/en/stable/) for detailed descriptions of all the data structures and methods used here.
# EPY: END markdown
# EPY: START code
# EPY: ESCAPE %matplotlib inline
import numpy as np
import os
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import pprint
from starfish import data, FieldOfView
from starfish.types import Axes, Features, FunctionSource
from starfish.util.plot import imshow_plane
# EPY: END code
# EPY: START code
matplotlib.rcParams["figure.dpi"] = 150
# EPY: END code
# EPY: START markdown
### Load Data into Starfish from the Cloud
#
#The primary data from one field of view correspond to 16 images from 4 hybridzation rounds (r) 4 color channels (c) one z plane (z). Each image is 1044 x 1390 (y, x). These data arise from human breast tissue. O(10) transcripts are barcoded for subsequent spatial resolution. Average pixel intensity values for one 'spot' in the image, across all rounds and channels, can be decoded into the nearest barcode, thus resolving each pixel into a particular gene.
# EPY: END markdown
# EPY: START code
use_test_data = os.getenv("USE_TEST_DATA") is not None
# An experiment contains a codebook, primary images, and auxiliary images
experiment = data.ISS(use_test_data=use_test_data)
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(experiment._src_doc)
# EPY: END code
# EPY: START code
fov = experiment.fov()
# note the structure of the 5D tensor containing the raw imaging data
imgs = fov.get_image(FieldOfView.PRIMARY_IMAGES)
print(imgs)
# EPY: END code
# EPY: START markdown
### Visualize Codebook
#
# The ISS codebook maps each barcode to a gene. This protocol asserts that genes are encoded with
# a length 4 quatenary barcode that can be read out from the images. Each round encodes a position in the codeword.
# The maximum signal in each color channel (columns in the above image) corresponds to a letter in the codeword.
# The channels, in order, correspond to the letters: 'T', 'G', 'C', 'A'.
# EPY: END markdown
# EPY: START code
experiment.codebook
# EPY: END code
# EPY: START markdown
### Visualize raw data
#
#A nice way to page through all this data is to use the display command. We have commented this out for now, because it will not render in Github. Instead, we simply show an image from the first round and color channel.
# EPY: END markdown
# EPY: START code
# # Display all the data in an interactive pop-up window. Uncomment to have this version work.
# %gui qt5
# display(imgs)
# Display a single plane of data
sel={Axes.ROUND: 0, Axes.CH: 0, Axes.ZPLANE: 0}
single_plane = imgs.sel(sel)
imshow_plane(single_plane, title="Round: 0, Channel: 0")
# EPY: END code
# EPY: START markdown
#'dots' is a general stain for all possible transcripts. This image should correspond to the maximum projcection of all color channels within a single imaging round. This auxiliary image is useful for registering images from multiple imaging rounds to this reference image. We'll see an example of this further on in the notebook
# EPY: END markdown
# EPY: START code
from starfish.image import Filter
dots = fov.get_image("dots")
dots_single_plane = dots.reduce({Axes.ROUND, Axes.CH, Axes.ZPLANE}, func="max")
imshow_plane(dots_single_plane, title="Anchor channel, all RNA molecules")
# EPY: END code
# EPY: START markdown
#Below is a DAPI image, which specifically marks nuclei. This is useful cell segmentation later on in the processing.
# EPY: END markdown
# EPY: START code
nuclei = fov.get_image("nuclei")
nuclei_single_plane = nuclei.reduce({Axes.ROUND, Axes.CH, Axes.ZPLANE}, func="max")
imshow_plane(nuclei_single_plane, title="Nuclei (DAPI) channel")
# EPY: END code
# EPY: START markdown
### Filter raw data before decoding into spatially resolved gene expression
#
#A White-Tophat filter can be used to enhance spots while minimizing background autoflourescence. The ```masking_radius``` parameter specifies the expected radius, in pixels, of each spot.
# EPY: END markdown
# EPY: START code
# filter raw data
masking_radius = 15
filt = Filter.WhiteTophat(masking_radius, is_volume=False)
filtered_imgs = filt.run(imgs, verbose=True, in_place=False)
filt.run(dots, verbose=True, in_place=True)
filt.run(nuclei, verbose=True, in_place=True)
# EPY: END code
# EPY: START code
single_plane_filtered = filtered_imgs.sel(sel)
f, (ax1, ax2) = plt.subplots(ncols=2)
vmin, vmax = np.percentile(single_plane.xarray.values.data, [5, 99])
imshow_plane(
single_plane, ax=ax1, vmin=vmin, vmax=vmax,
title="Original data\nRound: 0, Channel: 0"
)
vmin, vmax = np.percentile(single_plane_filtered.xarray.values.data, [5, 99])
imshow_plane(
single_plane_filtered, ax=ax2, vmin=vmin, vmax=vmax,
title="Filtered data\nRound: 0, Channel: 0"
)
# EPY: END code
# EPY: START markdown
### Register data
# EPY: END markdown
# EPY: START markdown
#Images may have shifted between imaging rounds. This needs to be corrected for before decoding, since this shift in the images will corrupt the barcodes, thus hindering decoding accuracy. A simple procedure can correct for this shift. For each imaging round, the max projection across color channels should look like the dots stain. Below, we simply shift all images in each round to match the dots stain by learning the shift that maximizes the cross-correlation between the images and the dots stain.
# EPY: END markdown
# EPY: START code
from starfish.image import ApplyTransform, LearnTransform
learn_translation = LearnTransform.Translation(reference_stack=dots, axes=Axes.ROUND, upsampling=1000)
transforms_list = learn_translation.run(imgs.reduce({Axes.CH, Axes.ZPLANE}, func="max"))
warp = ApplyTransform.Warp()
registered_imgs = warp.run(filtered_imgs, transforms_list=transforms_list, in_place=False, verbose=True)
# EPY: END code
# EPY: START markdown
### Decode the processed data into spatially resolved gene expression profiles
#
#To decode, first we find spots, and record, for reach spot, the average pixel intensities across rounds and channels. This spot detection can be achieved by the ```BlobDetector``` algorithm
# EPY: END markdown
# EPY: START code
import warnings
from starfish.spots import FindSpots, DecodeSpots
bd = FindSpots.BlobDetector(
min_sigma=1,
max_sigma=10,
num_sigma=30,
threshold=0.01,
measurement_type='mean',
)
dots_max = dots.reduce((Axes.ROUND, Axes.ZPLANE), func=FunctionSource.np("max"))
spots = bd.run(image_stack=registered_imgs, reference_image=dots_max)
decoder = DecodeSpots.PerRoundMaxChannel(codebook=experiment.codebook)
decoded = decoder.run(spots=spots)
# Besides house keeping genes, VIM and HER2 should be most highly expessed, which is consistent here.
genes, counts = np.unique(decoded.loc[decoded[Features.PASSES_THRESHOLDS]][Features.TARGET], return_counts=True)
table = pd.Series(counts, index=genes).sort_values(ascending=False)
table
# EPY: END code
# EPY: START markdown
### Segment Cells and create Cell by Gene Expression Matrix
#
#After calling spots and decoding their gene information, cells must be segmented to assign genes to cells. This paper used a seeded watershed approach to segment the cells, which we also use here.
# EPY: END markdown
# EPY: START code
from starfish.morphology import Binarize, Filter, Merge, Segment
from starfish.types import Levels
dapi_thresh = .18 # binary mask for cell (nuclear) locations
stain_thresh = .22 # binary mask for overall cells // binarization of stain
min_dist = 57
min_allowed_size = 10
max_allowed_size = 10000
mp = registered_imgs.reduce({Axes.CH, Axes.ZPLANE}, func="max")
stain = mp.reduce(
{Axes.ROUND},
func="mean",
level_method=Levels.SCALE_BY_IMAGE)
nuclei_mp_scaled = nuclei.reduce(
{Axes.ROUND, Axes.CH, Axes.ZPLANE},
func="max",
level_method=Levels.SCALE_BY_IMAGE)
binarized_nuclei = Binarize.ThresholdBinarize(dapi_thresh).run(nuclei_mp_scaled)
labeled_masks = Filter.MinDistanceLabel(min_dist, 1).run(binarized_nuclei)
watershed_markers = Filter.AreaFilter(min_area=min_allowed_size, max_area=max_allowed_size).run(labeled_masks)
thresholded_stain = Binarize.ThresholdBinarize(stain_thresh).run(stain)
markers_and_stain = Merge.SimpleMerge().run([thresholded_stain, watershed_markers])
watershed_mask = Filter.Reduce(
"logical_or",
lambda shape: np.zeros(shape=shape, dtype=np.bool)
).run(markers_and_stain)
segmenter = Segment.WatershedSegment(connectivity=np.ones((1, 3, 3), dtype=np.bool))
masks = segmenter.run(
stain,
watershed_markers,
watershed_mask,
)
import matplotlib.pyplot as plt
from showit import image
plt.figure(figsize=(10, 10))
plt.subplot(321)
nuclei_numpy = nuclei_mp_scaled._squeezed_numpy(Axes.ROUND, Axes.CH, Axes.ZPLANE)
image(nuclei_numpy, ax=plt.gca(), size=20, bar=True)
plt.title('Nuclei')
plt.subplot(322)
image(
stain._squeezed_numpy(Axes.ROUND, Axes.CH, Axes.ZPLANE),
ax=plt.gca(), size=20, bar=True)
plt.title('Stain')
plt.subplot(323)
image(
binarized_nuclei.uncropped_mask(0).squeeze(Axes.ZPLANE.value).values,
bar=False,
ax=plt.gca(),
)
plt.title('Nuclei Thresholded')
plt.subplot(324)
image(
watershed_mask.to_label_image().xarray.squeeze(Axes.ZPLANE.value).values,
bar=False,
ax=plt.gca(),
)
plt.title('Watershed Mask')
plt.subplot(325)
image(
watershed_markers.to_label_image().xarray.squeeze(Axes.ZPLANE.value).values,
size=20,
cmap=plt.cm.nipy_spectral,
ax=plt.gca(),
)
plt.title('Found: {} cells'.format(len(watershed_markers)))
plt.subplot(326)
image(
masks.to_label_image().xarray.squeeze(Axes.ZPLANE.value).values,
size=20,
cmap=plt.cm.nipy_spectral,
ax=plt.gca(),
)
plt.title('Segmented Cells')
plt
# EPY: END code
# EPY: START markdown
#Now that cells have been segmented, we can assign spots to cells in order to create a cell x gene count matrix
# EPY: END markdown
# EPY: START code
from starfish.spots import AssignTargets
from starfish import ExpressionMatrix
al = AssignTargets.Label()
labeled = al.run(masks, decoded)
cg = labeled.to_expression_matrix()
cg
# EPY: END code
# EPY: START markdown
### Compare to results from paper
# EPY: END markdown
# EPY: START markdown
#This FOV was selected to make sure that we can visualize the tumor/stroma boundary, below this is described by pseudo-coloring HER2 (tumor) and vimentin (VIM, stroma). This distribution matches the one described in the original paper.
# EPY: END markdown
# EPY: START code
from skimage.color import rgb2gray
GENE1 = 'HER2'
GENE2 = 'VIM'
rgb = np.zeros(registered_imgs.tile_shape + (3,))
nuclei_numpy = nuclei.reduce({Axes.ROUND, Axes.CH, Axes.ZPLANE}, func="max")._squeezed_numpy(Axes.ROUND, Axes.CH, Axes.ZPLANE)
rgb[:,:,0] = nuclei_numpy
dots_numpy = dots.reduce({Axes.ROUND, Axes.CH, Axes.ZPLANE}, func="max")._squeezed_numpy(Axes.ROUND, Axes.CH, Axes.ZPLANE)
rgb[:,:,1] = dots_numpy
do = rgb2gray(rgb)
do = do/(do.max())
plt.imshow(do,cmap='gray')
plt.axis('off');
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
is_gene1 = decoded.where(decoded[Features.AXIS][Features.TARGET] == GENE1, drop=True)
is_gene2 = decoded.where(decoded[Features.AXIS][Features.TARGET] == GENE2, drop=True)
plt.plot(is_gene1.x, is_gene1.y, 'or', markersize=3)
plt.plot(is_gene2.x, is_gene2.y, 'ob', markersize=3)
plt.title(f'Red: {GENE1}, Blue: {GENE2}');
# EPY: END code
| 38.682779
| 643
| 0.757888
|
a933188e69d4d39029422f9d527706adcbd78f9f
| 361
|
py
|
Python
|
sdk/schemaregistry/azure-schemaregistry/azure/schemaregistry/__init__.py
|
conniey/azure-sdk-for-python
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/schemaregistry/azure-schemaregistry/azure/schemaregistry/__init__.py
|
conniey/azure-sdk-for-python
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
[
"MIT"
] | null | null | null |
sdk/schemaregistry/azure-schemaregistry/azure/schemaregistry/__init__.py
|
conniey/azure-sdk-for-python
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# ------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from ._version import VERSION
__version__ = VERSION
| 40.111111
| 75
| 0.459834
|
4b5ee738464976d35f4bd59dcc56491308ce5fa9
| 22,723
|
py
|
Python
|
services/director-v2/tests/unit/with_dbs/test_modules_comp_scheduler_dask_scheduler.py
|
mguidon/osparc-simcore
|
77e64777728f20a5b21362372aefa0e0db5072cd
|
[
"MIT"
] | null | null | null |
services/director-v2/tests/unit/with_dbs/test_modules_comp_scheduler_dask_scheduler.py
|
mguidon/osparc-simcore
|
77e64777728f20a5b21362372aefa0e0db5072cd
|
[
"MIT"
] | 29
|
2018-11-13T09:39:29.000Z
|
2022-03-22T10:11:32.000Z
|
services/director-v2/tests/unit/with_dbs/test_modules_comp_scheduler_dask_scheduler.py
|
mguidon/osparc-simcore
|
77e64777728f20a5b21362372aefa0e0db5072cd
|
[
"MIT"
] | null | null | null |
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
# pylint:disable=no-value-for-parameter
# pylint:disable=protected-access
# pylint:disable=too-many-arguments
# pylint:disable=no-name-in-module
from typing import Any, Callable, Dict, Iterator, cast
from unittest import mock
import aiopg
import pytest
from _helpers import PublishedProject # type: ignore
from _helpers import assert_comp_run_state # type: ignore
from _helpers import assert_comp_tasks_state # type: ignore
from _helpers import manually_run_comp_scheduler # type: ignore
from _helpers import set_comp_task_state # type: ignore
from _pytest.monkeypatch import MonkeyPatch
from dask.distributed import LocalCluster, SpecCluster
from dask_task_models_library.container_tasks.events import TaskStateEvent
from dask_task_models_library.container_tasks.io import TaskOutputData
from fastapi.applications import FastAPI
from models_library.projects import ProjectAtDB
from models_library.projects_state import RunningState
from pydantic import PositiveInt
from pytest_mock.plugin import MockerFixture
from simcore_postgres_database.models.comp_pipeline import StateType
from simcore_postgres_database.models.comp_runs import comp_runs
from simcore_postgres_database.models.comp_tasks import NodeClass
from simcore_service_director_v2.core.application import init_app
from simcore_service_director_v2.core.errors import (
ComputationalBackendNotConnectedError,
ConfigurationError,
PipelineNotFoundError,
)
from simcore_service_director_v2.core.settings import AppSettings
from simcore_service_director_v2.models.domains.comp_pipelines import CompPipelineAtDB
from simcore_service_director_v2.models.domains.comp_runs import CompRunsAtDB
from simcore_service_director_v2.modules.comp_scheduler import background_task
from simcore_service_director_v2.modules.comp_scheduler.base_scheduler import (
BaseCompScheduler,
)
from simcore_service_director_v2.modules.comp_scheduler.dask_scheduler import (
DaskScheduler,
)
from simcore_service_director_v2.utils.dask import generate_dask_job_id
from simcore_service_director_v2.utils.scheduler import COMPLETED_STATES
from starlette.testclient import TestClient
pytest_simcore_core_services_selection = ["postgres"]
pytest_simcore_ops_services_selection = ["adminer"]
@pytest.fixture()
def mocked_rabbit_mq_client(mocker: MockerFixture):
mocker.patch(
"simcore_service_director_v2.core.application.rabbitmq.RabbitMQClient",
autospec=True,
)
@pytest.fixture
def minimal_dask_scheduler_config(
mock_env: None,
postgres_host_config: Dict[str, str],
monkeypatch: MonkeyPatch,
mocked_rabbit_mq_client: None,
) -> None:
"""set a minimal configuration for testing the dask connection only"""
monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SIDECAR_ENABLED", "false")
monkeypatch.setenv("DIRECTOR_V0_ENABLED", "0")
monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "1")
monkeypatch.setenv("DIRECTOR_V2_CELERY_ENABLED", "0")
monkeypatch.setenv("DIRECTOR_V2_CELERY_SCHEDULER_ENABLED", "0")
monkeypatch.setenv("DIRECTOR_V2_DASK_CLIENT_ENABLED", "1")
monkeypatch.setenv("DIRECTOR_V2_DASK_SCHEDULER_ENABLED", "1")
@pytest.fixture
def scheduler(
minimal_dask_scheduler_config: None,
aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore
dask_spec_local_cluster: SpecCluster,
minimal_app: FastAPI,
) -> BaseCompScheduler:
assert minimal_app.state.scheduler is not None
return minimal_app.state.scheduler
@pytest.fixture
def mocked_dask_client_send_task(mocker: MockerFixture) -> mock.MagicMock:
mocked_dask_client_send_task = mocker.patch(
"simcore_service_director_v2.modules.comp_scheduler.dask_scheduler.DaskClient.send_computation_tasks"
)
return mocked_dask_client_send_task
@pytest.fixture
def mocked_node_ports(mocker: MockerFixture):
mocker.patch(
"simcore_service_director_v2.modules.comp_scheduler.dask_scheduler.parse_output_data",
return_value=None,
)
@pytest.fixture
def mocked_clean_task_output_fct(mocker: MockerFixture) -> mock.MagicMock:
return mocker.patch(
"simcore_service_director_v2.modules.comp_scheduler.dask_scheduler.clean_task_output_and_log_files_if_invalid",
return_value=None,
)
@pytest.fixture
def mocked_scheduler_task(monkeypatch: MonkeyPatch) -> None:
async def mocked_scheduler_task(app: FastAPI) -> None:
return None
monkeypatch.setattr(background_task, "scheduler_task", mocked_scheduler_task)
async def test_scheduler_gracefully_starts_and_stops(
minimal_dask_scheduler_config: None,
aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore
dask_local_cluster: LocalCluster,
minimal_app: FastAPI,
):
# check it started correctly
assert minimal_app.state.scheduler_task is not None
@pytest.mark.parametrize(
"missing_dependency",
[
"DIRECTOR_V2_POSTGRES_ENABLED",
"DIRECTOR_V2_DASK_CLIENT_ENABLED",
],
)
def test_scheduler_raises_exception_for_missing_dependencies(
minimal_dask_scheduler_config: None,
aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore
dask_local_cluster: LocalCluster,
monkeypatch: MonkeyPatch,
missing_dependency: str,
):
# disable the dependency
monkeypatch.setenv(missing_dependency, "0")
# create the client
settings = AppSettings.create_from_envs()
app = init_app(settings)
with pytest.raises(ConfigurationError):
with TestClient(app, raise_server_exceptions=True) as _:
pass
async def test_empty_pipeline_is_not_scheduled(
scheduler: BaseCompScheduler,
minimal_app: FastAPI,
user_id: PositiveInt,
project: Callable[..., ProjectAtDB],
pipeline: Callable[..., CompPipelineAtDB],
aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore
mocked_scheduler_task: None,
):
empty_project = project()
# the project is not in the comp_pipeline, therefore scheduling it should fail
with pytest.raises(PipelineNotFoundError):
await scheduler.run_new_pipeline(
user_id=user_id,
project_id=empty_project.uuid,
cluster_id=minimal_app.state.settings.DASK_SCHEDULER.DASK_DEFAULT_CLUSTER_ID,
)
# create the empty pipeline now
_empty_pipeline = pipeline(project_id=f"{empty_project.uuid}")
# creating a run with an empty pipeline is useless, check the scheduler is not kicking in
await scheduler.run_new_pipeline(
user_id=user_id,
project_id=empty_project.uuid,
cluster_id=minimal_app.state.settings.DASK_SCHEDULER.DASK_DEFAULT_CLUSTER_ID,
)
assert len(scheduler.scheduled_pipelines) == 0
assert (
scheduler.wake_up_event.is_set() == False
), "the scheduler was woken up on an empty pipeline!"
# check the database is empty
async with aiopg_engine.acquire() as conn: # type: ignore
result = await conn.scalar(
comp_runs.select().where(
(comp_runs.c.user_id == user_id)
& (comp_runs.c.project_uuid == f"{empty_project.uuid}")
) # there is only one entry
)
assert result == None
async def test_misconfigured_pipeline_is_not_scheduled(
mocked_scheduler_task: None,
scheduler: BaseCompScheduler,
minimal_app: FastAPI,
user_id: PositiveInt,
project: Callable[..., ProjectAtDB],
pipeline: Callable[..., CompPipelineAtDB],
fake_workbench_without_outputs: Dict[str, Any],
fake_workbench_adjacency: Dict[str, Any],
aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore
):
"""A pipeline which comp_tasks are missing should not be scheduled.
It shall be aborted and shown as such in the comp_runs db"""
sleepers_project = project(workbench=fake_workbench_without_outputs)
sleepers_pipeline = pipeline(
project_id=f"{sleepers_project.uuid}",
dag_adjacency_list=fake_workbench_adjacency,
)
# check the pipeline is correctly added to the scheduled pipelines
await scheduler.run_new_pipeline(
user_id=user_id,
project_id=sleepers_project.uuid,
cluster_id=minimal_app.state.settings.DASK_SCHEDULER.DASK_DEFAULT_CLUSTER_ID,
)
assert len(scheduler.scheduled_pipelines) == 1
assert (
scheduler.wake_up_event.is_set() == True
), "the scheduler was NOT woken up on the scheduled pipeline!"
for (u_id, p_id, it), params in scheduler.scheduled_pipelines.items():
assert u_id == user_id
assert p_id == sleepers_project.uuid
assert it > 0
assert params.mark_for_cancellation == False
# check the database was properly updated
async with aiopg_engine.acquire() as conn: # type: ignore
result = await conn.execute(
comp_runs.select().where(
(comp_runs.c.user_id == user_id)
& (comp_runs.c.project_uuid == f"{sleepers_project.uuid}")
) # there is only one entry
)
run_entry = CompRunsAtDB.parse_obj(await result.first())
assert run_entry.result == RunningState.PUBLISHED
# let the scheduler kick in
await manually_run_comp_scheduler(scheduler)
# check the scheduled pipelines is again empty since it's misconfigured
assert len(scheduler.scheduled_pipelines) == 0
# check the database entry is correctly updated
async with aiopg_engine.acquire() as conn: # type: ignore
result = await conn.execute(
comp_runs.select().where(
(comp_runs.c.user_id == user_id)
& (comp_runs.c.project_uuid == f"{sleepers_project.uuid}")
) # there is only one entry
)
run_entry = CompRunsAtDB.parse_obj(await result.first())
assert run_entry.result == RunningState.ABORTED
async def test_proper_pipeline_is_scheduled(
scheduler: BaseCompScheduler,
minimal_app: FastAPI,
user_id: PositiveInt,
aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore
mocked_dask_client_send_task: mock.MagicMock,
published_project: PublishedProject,
mocked_scheduler_task: None,
):
# This calls adds starts the scheduling of a pipeline
await scheduler.run_new_pipeline(
user_id=user_id,
project_id=published_project.project.uuid,
cluster_id=minimal_app.state.settings.DASK_SCHEDULER.DASK_DEFAULT_CLUSTER_ID,
)
assert len(scheduler.scheduled_pipelines) == 1, "the pipeline is not scheduled!"
assert (
scheduler.wake_up_event.is_set() == True
), "the scheduler was NOT woken up on the scheduled pipeline!"
for (u_id, p_id, it), params in scheduler.scheduled_pipelines.items():
assert u_id == user_id
assert p_id == published_project.project.uuid
assert it > 0
assert params.mark_for_cancellation == False
# check the database is correctly updated, the run is published
await assert_comp_run_state(
aiopg_engine,
user_id,
published_project.project.uuid,
exp_state=RunningState.PUBLISHED,
)
published_tasks = [
published_project.tasks[1],
published_project.tasks[3],
]
# trigger the scheduler
await manually_run_comp_scheduler(scheduler)
# the tasks are set to pending, so they are ready to be taken, and the dask client is triggered
await assert_comp_tasks_state(
aiopg_engine,
published_project.project.uuid,
[p.node_id for p in published_tasks],
exp_state=RunningState.PENDING,
)
# the other tasks are published
await assert_comp_tasks_state(
aiopg_engine,
published_project.project.uuid,
[p.node_id for p in published_project.tasks if p not in published_tasks],
exp_state=RunningState.PUBLISHED,
)
mocked_dask_client_send_task.assert_has_calls(
calls=[
mock.call(
user_id=user_id,
project_id=published_project.project.uuid,
cluster_id=minimal_app.state.settings.DASK_SCHEDULER.DASK_DEFAULT_CLUSTER_ID,
tasks={f"{p.node_id}": p.image},
callback=cast(DaskScheduler, scheduler)._on_task_completed,
)
for p in published_tasks
],
any_order=True,
)
mocked_dask_client_send_task.reset_mock()
# trigger the scheduler
await manually_run_comp_scheduler(scheduler)
# let the scheduler kick in, it should switch to the run state to PENDING state, to reflect the tasks states
await assert_comp_run_state(
aiopg_engine,
user_id,
published_project.project.uuid,
exp_state=RunningState.PENDING,
)
# no change here
await assert_comp_tasks_state(
aiopg_engine,
published_project.project.uuid,
[p.node_id for p in published_tasks],
exp_state=RunningState.PENDING,
)
mocked_dask_client_send_task.assert_not_called()
# change 1 task to RUNNING
running_task_id = published_tasks[0].node_id
await set_comp_task_state(
aiopg_engine,
node_id=f"{running_task_id}",
state=StateType.RUNNING,
)
# trigger the scheduler, comp_run is now STARTED, as is the task
await manually_run_comp_scheduler(scheduler)
await assert_comp_run_state(
aiopg_engine, user_id, published_project.project.uuid, RunningState.STARTED
)
await assert_comp_tasks_state(
aiopg_engine,
published_project.project.uuid,
[running_task_id],
exp_state=RunningState.STARTED,
)
mocked_dask_client_send_task.assert_not_called()
# change the task to SUCCESS
await set_comp_task_state(
aiopg_engine,
node_id=f"{running_task_id}",
state=StateType.SUCCESS,
)
# trigger the scheduler, the run state is still STARTED, the task is completed
await manually_run_comp_scheduler(scheduler)
await assert_comp_run_state(
aiopg_engine, user_id, published_project.project.uuid, RunningState.STARTED
)
await assert_comp_tasks_state(
aiopg_engine,
published_project.project.uuid,
[running_task_id],
exp_state=RunningState.SUCCESS,
)
next_published_task = published_project.tasks[2]
await assert_comp_tasks_state(
aiopg_engine,
published_project.project.uuid,
[next_published_task.node_id],
exp_state=RunningState.PENDING,
)
mocked_dask_client_send_task.assert_called_once_with(
user_id=user_id,
project_id=published_project.project.uuid,
cluster_id=minimal_app.state.settings.DASK_SCHEDULER.DASK_DEFAULT_CLUSTER_ID,
tasks={
f"{next_published_task.node_id}": next_published_task.image,
},
callback=cast(DaskScheduler, scheduler)._on_task_completed,
)
mocked_dask_client_send_task.reset_mock()
# change 1 task to RUNNING
await set_comp_task_state(
aiopg_engine,
node_id=f"{next_published_task.node_id}",
state=StateType.RUNNING,
)
# trigger the scheduler, run state should keep to STARTED, task should be as well
await manually_run_comp_scheduler(scheduler)
await assert_comp_run_state(
aiopg_engine, user_id, published_project.project.uuid, RunningState.STARTED
)
await assert_comp_tasks_state(
aiopg_engine,
published_project.project.uuid,
[next_published_task.node_id],
exp_state=RunningState.STARTED,
)
mocked_dask_client_send_task.assert_not_called()
# now change the task to FAILED
await set_comp_task_state(
aiopg_engine,
node_id=f"{next_published_task.node_id}",
state=StateType.FAILED,
)
# trigger the scheduler, it should keep to STARTED state until it finishes
await manually_run_comp_scheduler(scheduler)
await assert_comp_run_state(
aiopg_engine, user_id, published_project.project.uuid, RunningState.STARTED
)
await assert_comp_tasks_state(
aiopg_engine,
published_project.project.uuid,
[next_published_task.node_id],
exp_state=RunningState.FAILED,
)
mocked_dask_client_send_task.assert_not_called()
# now change the other task to SUCCESS
other_task = published_tasks[1]
await set_comp_task_state(
aiopg_engine,
node_id=f"{other_task.node_id}",
state=StateType.SUCCESS,
)
# trigger the scheduler, it should switch to FAILED, as we are done
await manually_run_comp_scheduler(scheduler)
await assert_comp_run_state(
aiopg_engine, user_id, published_project.project.uuid, RunningState.FAILED
)
await assert_comp_tasks_state(
aiopg_engine,
published_project.project.uuid,
[other_task.node_id],
exp_state=RunningState.SUCCESS,
)
mocked_dask_client_send_task.assert_not_called()
# the scheduled pipeline shall be removed
assert scheduler.scheduled_pipelines == {}
async def test_handling_of_disconnected_dask_scheduler(
mocked_scheduler_task: None,
dask_spec_local_cluster: SpecCluster,
scheduler: BaseCompScheduler,
minimal_app: FastAPI,
user_id: PositiveInt,
aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore
mocker: MockerFixture,
published_project: PublishedProject,
):
# this will crate a non connected backend issue that will trigger re-connection
mocked_dask_client_send_task = mocker.patch(
"simcore_service_director_v2.modules.comp_scheduler.dask_scheduler.DaskClient.send_computation_tasks",
side_effect=ComputationalBackendNotConnectedError(
msg="faked disconnected backend"
),
)
# mocked_delete_client_fct = mocker.patch(
# "simcore_service_director_v2.modules.comp_scheduler.dask_scheduler.DaskClient.delete",
# autospec=True,
# )
# check the pipeline is correctly added to the scheduled pipelines
await scheduler.run_new_pipeline(
user_id=user_id,
project_id=published_project.project.uuid,
cluster_id=minimal_app.state.settings.DASK_SCHEDULER.DASK_DEFAULT_CLUSTER_ID,
)
with pytest.raises(ComputationalBackendNotConnectedError):
await manually_run_comp_scheduler(scheduler)
# since there is no cluster, there is no dask-scheduler,
# the tasks shall all still be in PUBLISHED state now
await assert_comp_run_state(
aiopg_engine, user_id, published_project.project.uuid, RunningState.PUBLISHED
)
await assert_comp_tasks_state(
aiopg_engine,
published_project.project.uuid,
[t.node_id for t in published_project.tasks],
exp_state=RunningState.PUBLISHED,
)
# the exception risen should trigger calls to reconnect the client, we do it manually here
old_dask_client = cast(DaskScheduler, scheduler).dask_client
await scheduler.reconnect_backend()
# this will delete and re-create the dask client
new_dask_client = cast(DaskScheduler, scheduler).dask_client
assert old_dask_client is not new_dask_client
# now try to abort the tasks since we are wondering what is happening, this should auto-trigger the scheduler
await scheduler.stop_pipeline(
user_id=user_id, project_id=published_project.project.uuid
)
# we ensure the scheduler was run
await manually_run_comp_scheduler(scheduler)
# after this step the tasks are marked as ABORTED
await assert_comp_tasks_state(
aiopg_engine,
published_project.project.uuid,
[
t.node_id
for t in published_project.tasks
if t.node_class == NodeClass.COMPUTATIONAL
],
exp_state=RunningState.ABORTED,
)
# then we have another scheduler run
await manually_run_comp_scheduler(scheduler)
# now the run should be ABORTED
await assert_comp_run_state(
aiopg_engine, user_id, published_project.project.uuid, RunningState.ABORTED
)
@pytest.mark.parametrize("state", COMPLETED_STATES)
async def test_completed_task_properly_updates_state(
scheduler: BaseCompScheduler,
minimal_app: FastAPI,
user_id: PositiveInt,
aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore
published_project: PublishedProject,
mocked_node_ports: None,
mocked_clean_task_output_fct: mock.MagicMock,
state: RunningState,
mocked_scheduler_task: None,
):
# we do have a published project where the comp services are in PUBLISHED state
# here we will artifically call the completion handler in the scheduler
dask_scheduler = cast(DaskScheduler, scheduler)
job_id = generate_dask_job_id(
"simcore/service/comp/pytest/fake",
"12.34.55",
user_id,
published_project.project.uuid,
published_project.tasks[0].node_id,
)
state_event = TaskStateEvent(
job_id=job_id,
msg=TaskOutputData.parse_obj({"output_1": "some fake data"}).json(),
state=state,
)
await dask_scheduler._on_task_completed(state_event)
await assert_comp_tasks_state(
aiopg_engine,
published_project.project.uuid,
[published_project.tasks[0].node_id],
exp_state=state,
)
@pytest.mark.parametrize("state", [RunningState.ABORTED, RunningState.FAILED])
async def test_failed_or_aborted_task_cleans_output_files(
scheduler: BaseCompScheduler,
minimal_app: FastAPI,
user_id: PositiveInt,
aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore
mocked_dask_client_send_task: mock.MagicMock,
published_project: PublishedProject,
state: RunningState,
mocked_clean_task_output_fct: mock.MagicMock,
mocked_scheduler_task: None,
):
# we do have a published project where the comp services are in PUBLISHED state
# here we will artifically call the completion handler in the scheduler
dask_scheduler = cast(DaskScheduler, scheduler)
job_id = generate_dask_job_id(
"simcore/service/comp/pytest/fake",
"12.34.55",
user_id,
published_project.project.uuid,
published_project.tasks[0].node_id,
)
state_event = TaskStateEvent(
job_id=job_id,
msg=TaskOutputData.parse_obj({"output_1": "some fake data"}).json(),
state=state,
)
await dask_scheduler._on_task_completed(state_event)
await assert_comp_tasks_state(
aiopg_engine,
published_project.project.uuid,
[published_project.tasks[0].node_id],
exp_state=state,
)
mocked_clean_task_output_fct.assert_called_once()
| 37.558678
| 119
| 0.727545
|
d41f91d80fc61cf2f337b280da24bae254309672
| 994
|
py
|
Python
|
souspi/tests/unit_poc/temp_test.py
|
jrheling/souspi
|
d4f35ad868172aeba4d68287ab3d101dd05b48e2
|
[
"Apache-2.0"
] | null | null | null |
souspi/tests/unit_poc/temp_test.py
|
jrheling/souspi
|
d4f35ad868172aeba4d68287ab3d101dd05b48e2
|
[
"Apache-2.0"
] | 26
|
2015-01-24T13:44:04.000Z
|
2016-07-14T11:15:39.000Z
|
souspi/tests/unit_poc/temp_test.py
|
jrheling/souspi
|
d4f35ad868172aeba4d68287ab3d101dd05b48e2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# trivial test driver to show operation of DS18B20 sensor on raspi
#
# wiring:
#
# blue / black lead from DS18B20 to GND
# yellow lead (data) from DS18B20 to BCM port #4 on RPi
# - also have resistor (~4.7k-10k) to 3.3v
# red led from DS18B20 to 3.3V
# software prereq
# python package w1thermsensor installed
# references
# https://github.com/timofurrer/w1thermsensor
# https://learn.adafruit.com/adafruits-raspberry-pi-lesson-11-ds18b20-temperature-sensing
import sys
from w1thermsensor import W1ThermSensor
try:
sensor = W1ThermSensor()
except W1ThermSensor.NoSensorFoundError as e:
print "Unable to find sensor."
print "(remember, w1-* modules must be loaded as root before this will work!)"
sys.exit(1)
temperature_in_fahrenheit = sensor.get_temperature(W1ThermSensor.DEGREES_F)
print "it is %.2f degrees celsius" % sensor.get_temperature(W1ThermSensor.DEGREES_C)
print "it is %.2f degrees fahrenheit" % temperature_in_fahrenheit
| 30.121212
| 89
| 0.749497
|
9ad9fea0df06b390266e8ae36abcc8a232e5bdc8
| 387
|
py
|
Python
|
tests/test_swagger_jmx.py
|
QAInsights/swaggerjmx
|
29308c8b2cdcf33819a9681aa669ab57cfbc88c7
|
[
"MIT"
] | 1
|
2021-08-20T08:04:31.000Z
|
2021-08-20T08:04:31.000Z
|
tests/test_swagger_jmx.py
|
QAInsights/swaggerjmx
|
29308c8b2cdcf33819a9681aa669ab57cfbc88c7
|
[
"MIT"
] | null | null | null |
tests/test_swagger_jmx.py
|
QAInsights/swaggerjmx
|
29308c8b2cdcf33819a9681aa669ab57cfbc88c7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from swaggerjmx.convert import conversion
from swaggerjmx.settings import Settings as ST
path = os.path.join(os.getcwd(), 'test.json')
ST.swagger_url_json_path = '/home/travis/build/Pactortester/swaggerjmx/tests/test.json'
# ST.swagger_url_json_path = 'test.json'
# ST.swagger_url = 'http://00000:6003/v2/api-docs'
ST.report_path = 'jmx'
conversion()
| 29.769231
| 87
| 0.749354
|
e013fb43f02b12c0c4d9bf417b14c194a42dbf9d
| 6,184
|
py
|
Python
|
robogen/rgkit/backup bots/kamikaze112213.py
|
andrewgailey/robogen
|
7e96cfa26d2e6dc383c5d205816ddd98f8f100d7
|
[
"Unlicense"
] | null | null | null |
robogen/rgkit/backup bots/kamikaze112213.py
|
andrewgailey/robogen
|
7e96cfa26d2e6dc383c5d205816ddd98f8f100d7
|
[
"Unlicense"
] | null | null | null |
robogen/rgkit/backup bots/kamikaze112213.py
|
andrewgailey/robogen
|
7e96cfa26d2e6dc383c5d205816ddd98f8f100d7
|
[
"Unlicense"
] | null | null | null |
# kamikaze112213 by hephaestus
# http://robotgame.org/viewrobot/5830
import rg
import operator
class Robot:
def act(self, game):
adjacent_robots = self.get_adjacent_robots(game)
adjacent_friendlies = self.get_adjacent_robots(game, operator.__eq__)
adjacent_enemies = self.get_adjacent_robots(game, operator.__ne__)
all_enemies = self.get_all_robots(game, operator.__ne__)
# "The value of the key parameter should be a function that takes
# a single argument and returns a key to use for sorting purposes."
def query(bot_dict, sorting_function, offset=0):
organized = sorted(bot_dict.items(), key=sorting_function)
# returns a list of tuples, [(key, value),... ]
return organized
def get_weakest_enemy(offset=0):
return query(all_enemies, lambda t: t[1].hp)[offset][1]
def get_weakest_adjacent_enemy(offset=0):
return query(adjacent_enemies, lambda t: t[1].hp)[offset][1]
# first_enemy_location = get_first_enemy_location()
weakest_enemy = get_weakest_enemy()
target_enemy = weakest_enemy
if len(adjacent_enemies) > 0:
weakest_adjacent_enemy = get_weakest_adjacent_enemy()
target_enemy = weakest_adjacent_enemy
# move toward the center, if moving there would not put you in range of 2 robots
target_pos = rg.toward(self.location, weakest_enemy.location)
# figure out if any friendly robots would also want to move to our target
adjacent_to_target_friendlies = self.get_adjacent_robots_to(target_pos, game, operator.__eq__)
# if there are enemies around, attack them
# also consider suiciding when it will guarantee a kill, meaning enemy < 15 hp
suicide_threshold = 3 # 3 is better than 4 with 83% confidence, 7-42, 10-34 vs 3-43, 7-38
# 4 is [55, 30, 15] against 3
def has_suicide_priority():
adjacent_allies_to_target_enemy = self.get_adjacent_robots(game, operator.__eq__)
weakest_allies_next_to_adjacent_target_enemy = query(adjacent_allies_to_target_enemy, lambda t: t[1].hp)
return self.location == weakest_allies_next_to_adjacent_target_enemy[0][0]
if len(adjacent_enemies) > 0 and len(adjacent_enemies) < suicide_threshold:
# following line is better by 102-20-17 over just self.hp < 10
# inspired by peterm's stupid 2.6 bot
# assuming all adjacent enemies attacked me, if I would die
# i should instead suicide
if self.hp < (10*len(adjacent_enemies)):
return ['suicide']
# IDEA: if i could kill the enemy with 1 suicide instead of two attacks
# NOTE: if multiple allies are going for this target, i'll actually lose too many bots
# bad idea, 0-20 against self
# if weakest_adjacent_enemy.hp < 15 and weakest_adjacent_enemy.hp > 8 and has_suicide_priority():
# return ['suicide']
# if you could kill 2+ bots by suidiciding, do it
# should also avoid over-killing robots
return ['attack', weakest_adjacent_enemy.location]
elif len(adjacent_enemies) >= suicide_threshold:
return ['suicide']
#not using this priority method because it breaks on the server for some reason
def byroboidhas_priority(): # if i'm a newer bot, I have priority
for loc,bot in adjacent_to_target_friendlies.items():
their_target_pos = rg.toward(loc, weakest_enemy.location)
# check if bots would collide
if their_target_pos == target_pos:
if self.robot_id > bot.robot_id:
return False
return True
def has_priority(): # if i'm more bottom or more to the right, i'll take priority
for loc,bot in adjacent_to_target_friendlies.items():
their_target_pos = rg.toward(loc, weakest_enemy.location)
# check if bots would collide
if their_target_pos == target_pos:
if self.location[0] < loc[0] or self.location[1] < loc[1]:
#don't move then, do something else
return False
return True
if self.location != target_pos and has_priority():
if 'obstacle' not in rg.loc_types(target_pos):
adjacent_to_target_enemies = self.get_adjacent_robots_to(target_pos, game, operator.__ne__)
# if len(adjacent_to_target_enemies) <= 1 or len(adjacent_to_target_enemies) >= 3:
return ['move', target_pos]
#if we couldn't decide to do anything else, just guard
return self.guard()
def toward(curr, dest):
if curr == dest:
return curr
x0, y0 = curr
x, y = dest
x_diff, y_diff = x - x0, y - y0
if abs(x_diff) < abs(y_diff):
return (x0, y0 + y_diff / abs(y_diff))
elif abs(x_diff) == abs(y_diff):
# BROKEN FIX
return (0, 0)
else:
return (x0 + x_diff / abs(x_diff), y0)
def guard(self):
return ['guard']
def get_all_robots(self, game, player_comparator=None):
def generate():
for loc,bot in game.get('robots').items():
if player_comparator == None or player_comparator(self.player_id, bot.player_id):
yield (loc, bot)
return dict(generate())
def get_adjacent_robots_to(self, some_location, game, player_comparator=None):
def generate():
for loc,bot in game.get('robots').items():
if rg.wdist(loc, some_location) <= 1:
if player_comparator == None or player_comparator(self.player_id, bot.player_id):
yield (loc, bot)
return dict(generate())
def get_adjacent_robots(self, game, player_comparator=None):
return self.get_adjacent_robots_to(self.location, game, player_comparator)
| 44.171429
| 116
| 0.619825
|
3735a3a8f0c659f6b7a3e47ade5c1d09eb477d15
| 7,464
|
py
|
Python
|
tests/test_io.py
|
mabudz/pyam
|
ac64c7194c36b796ae473cae47c2917d3848e5bb
|
[
"Apache-2.0"
] | 2
|
2017-12-07T06:17:00.000Z
|
2018-03-05T09:09:42.000Z
|
tests/test_io.py
|
shaohuizhang/pyam
|
2dffec07ce86b2f3fb8133ac369fa3c172589064
|
[
"Apache-2.0"
] | 29
|
2017-12-06T21:52:53.000Z
|
2018-03-05T10:41:46.000Z
|
tests/test_io.py
|
shaohuizhang/pyam
|
2dffec07ce86b2f3fb8133ac369fa3c172589064
|
[
"Apache-2.0"
] | 8
|
2017-12-07T09:07:49.000Z
|
2018-02-26T07:55:16.000Z
|
from pathlib import Path
import pandas as pd
import numpy as np
import pytest
from pyam import IamDataFrame, read_datapackage
from pyam.utils import META_IDX
from pyam.testing import assert_iamframe_equal
from .conftest import TEST_DATA_DIR, META_DF
FILTER_ARGS = dict(scenario="scen_a")
def test_data_none():
# initializing with 'data=None' raises an error
match = "IamDataFrame constructor not properly called!"
with pytest.raises(ValueError, match=match):
IamDataFrame(None)
def test_unknown_type():
# initializing with unsupported argument type raises an error
match = "IamDataFrame constructor not properly called!"
with pytest.raises(ValueError, match=match):
IamDataFrame(True)
def test_not_a_file():
# initializing with a file-like that's not a file raises an error
match = "No such file: 'foo.csv'"
with pytest.raises(FileNotFoundError, match=match):
IamDataFrame("foo.csv")
def test_io_list():
# initializing with a list raises an error
match = r"Initializing from list is not supported,*."
with pytest.raises(ValueError, match=match):
IamDataFrame([1, 2])
def test_io_csv(test_df, tmpdir):
# write to csv
file = tmpdir / "testing_io_write_read.csv"
test_df.to_csv(file)
# read from csv and assert that `data` tables are equal
import_df = IamDataFrame(file)
pd.testing.assert_frame_equal(test_df.data, import_df.data)
@pytest.mark.parametrize(
"meta_args", [[{}, {}], [dict(include_meta="foo"), dict(meta_sheet_name="foo")]]
)
def test_io_xlsx(test_df, meta_args, tmpdir):
# write to xlsx (direct file name and ExcelWriter, see #300)
file = tmpdir / "testing_io_write_read.xlsx"
for f in [file, pd.ExcelWriter(file)]:
test_df.to_excel(f, **meta_args[0])
if isinstance(f, pd.ExcelWriter):
f.close()
# read from xlsx
import_df = IamDataFrame(file, **meta_args[1])
# assert that IamDataFrame instances are equal
assert_iamframe_equal(test_df, import_df)
@pytest.mark.parametrize(
"sheets, sheetname",
[
[["data1", "data2"], dict(sheet_name="data*")],
[["data1", "foo"], dict(sheet_name=["data*", "foo"])],
],
)
def test_io_xlsx_multiple_data_sheets(test_df, sheets, sheetname, tmpdir):
# write data to separate sheets in excel file
file = tmpdir / "testing_io_write_read.xlsx"
xl = pd.ExcelWriter(file)
for i, (model, scenario) in enumerate(test_df.index):
test_df.filter(scenario=scenario).to_excel(xl, sheet_name=sheets[i])
test_df.export_meta(xl)
xl.close()
# read from xlsx
import_df = IamDataFrame(file, **sheetname)
# assert that IamDataFrame instances are equal
assert_iamframe_equal(test_df, import_df)
def test_init_df_with_na_unit(test_pd_df, tmpdir):
# missing values in the unit column are replaced by an empty string
test_pd_df.loc[1, "unit"] = np.nan
df = IamDataFrame(test_pd_df)
assert df.unit == ["", "EJ/yr"]
# writing to file and importing as pandas returns `nan`, not empty string
file = tmpdir / "na_unit.csv"
df.to_csv(file)
df_csv = pd.read_csv(file)
assert np.isnan(df_csv.loc[1, "Unit"])
IamDataFrame(file) # reading from file as IamDataFrame works
file = tmpdir / "na_unit.xlsx"
df.to_excel(file)
df_excel = pd.read_excel(file, engine="openpyxl")
assert np.isnan(df_excel.loc[1, "Unit"])
IamDataFrame(file) # reading from file as IamDataFrame works
def test_init_df_with_na_column_raises(test_pd_df, tmpdir):
# reading from file with a "corrupted" column raises expected error
match = "Empty cells in `data` \(columns: 'unnamed: 7'\):"
with pytest.raises(ValueError, match=match):
IamDataFrame(TEST_DATA_DIR / "na_column.xlsx")
@pytest.mark.parametrize(
"sheet_name, init_args, rename",
[
("meta", {}, False),
("meta", dict(sheet_name="meta"), False),
("foo", dict(sheet_name="foo"), False),
("foo", dict(sheet_name="foo"), True),
],
)
def test_load_meta_xlsx(test_pd_df, sheet_name, init_args, rename, tmpdir):
"""Test loading meta from an Excel file"""
# downselect meta
meta = META_DF.iloc[0:1] if rename else META_DF
# initialize a new IamDataFrame directly from data and meta
exp = IamDataFrame(test_pd_df, meta=meta)
# write meta to file (without an exclude col)
file = tmpdir / "testing_io_meta.xlsx"
meta.reset_index().to_excel(file, sheet_name=sheet_name, index=False)
# initialize a new IamDataFrame and load meta from file
obs = IamDataFrame(test_pd_df)
obs.load_meta(file)
assert_iamframe_equal(obs, exp)
@pytest.mark.parametrize("rename", [True, False])
def test_load_meta_csv(test_pd_df, rename, tmpdir):
"""Test loading meta from an csv file"""
meta = META_DF.iloc[0:1] if rename else META_DF
# initialize a new IamDataFrame directly from data and meta
exp = IamDataFrame(test_pd_df, meta=meta)
# write meta to file (without an exclude col)
file = tmpdir / "testing_io_meta.csv"
meta.reset_index().to_csv(file, index=False)
# initialize a new IamDataFrame and load meta from file
obs = IamDataFrame(test_pd_df)
obs.load_meta(file)
assert_iamframe_equal(obs, exp)
def test_load_meta_wrong_index(test_df_year, tmpdir):
"""Loading meta without (at least) index cols as headers raises an error"""
# write meta frame with wrong index to file, then load to the IamDataFrame
file = tmpdir / "testing_meta_empty.xlsx"
pd.DataFrame(columns=["model", "foo"]).to_excel(file, index=False)
match = ".* \(sheet meta\) missing required index columns \['scenario'\]\!"
with pytest.raises(ValueError, match=match):
test_df_year.load_meta(file)
def test_load_meta_empty_rows(test_df_year, tmpdir):
"""Loading empty meta table (columns but no rows) from xlsx file"""
exp = test_df_year.copy() # loading empty file has no effect
# write empty meta frame to file, then load to the IamDataFrame
file = tmpdir / "testing_meta_empty.xlsx"
pd.DataFrame(columns=META_IDX).to_excel(file, index=False)
test_df_year.load_meta(file)
assert_iamframe_equal(test_df_year, exp)
def test_load_meta_empty(test_pd_df):
"""Initializing from xlsx where 'meta' has no rows and non-empty invisible header"""
obs = IamDataFrame(TEST_DATA_DIR / "empty_meta_sheet.xlsx")
exp = IamDataFrame(test_pd_df)
assert_iamframe_equal(obs, exp)
def test_load_ssp_database_downloaded_file(test_pd_df):
exp = IamDataFrame(test_pd_df).filter(**FILTER_ARGS).as_pandas()
file = TEST_DATA_DIR / "test_SSP_database_raw_download.xlsx"
obs_df = IamDataFrame(file)
pd.testing.assert_frame_equal(obs_df.as_pandas(), exp)
def test_load_rcp_database_downloaded_file(test_pd_df):
exp = IamDataFrame(test_pd_df).filter(**FILTER_ARGS).as_pandas()
file = TEST_DATA_DIR / "test_RCP_database_raw_download.xlsx"
obs_df = IamDataFrame(file)
pd.testing.assert_frame_equal(obs_df.as_pandas(), exp)
def test_io_datapackage(test_df, tmpdir):
# add column to `meta` and write to datapackage
file = Path(tmpdir) / "foo.zip"
test_df.set_meta(["a", "b"], "string")
test_df.to_datapackage(file)
# read from csv assert that IamDataFrame instances are equal
import_df = read_datapackage(file)
assert_iamframe_equal(test_df, import_df)
| 33.621622
| 88
| 0.704984
|
10413af077b8d64c5d74324bd462260c5791f740
| 214,633
|
py
|
Python
|
Mi Lenguaje/MyLanguage/lib/python3.8/site-packages/mypy/checkexpr.py
|
DataEngel/Creando-mi-primer-lenguaje-de-programaci-n-
|
92434f89c62b6cec0114441c669952450ba21b79
|
[
"MIT"
] | 6
|
2020-04-10T14:36:25.000Z
|
2021-04-25T13:11:32.000Z
|
Mi Lenguaje/MyLanguage/lib/python3.8/site-packages/mypy/checkexpr.py
|
DataEngel/Creando-mi-primer-lenguaje-de-programaci-n-
|
92434f89c62b6cec0114441c669952450ba21b79
|
[
"MIT"
] | null | null | null |
Mi Lenguaje/MyLanguage/lib/python3.8/site-packages/mypy/checkexpr.py
|
DataEngel/Creando-mi-primer-lenguaje-de-programaci-n-
|
92434f89c62b6cec0114441c669952450ba21b79
|
[
"MIT"
] | null | null | null |
"""Expression type checker. This file is conceptually part of TypeChecker."""
from collections import OrderedDict
from contextlib import contextmanager
import itertools
from typing import (
Any, cast, Dict, Set, List, Tuple, Callable, Union, Optional, Sequence, Iterator
)
from typing_extensions import ClassVar, Final, overload
from mypy.errors import report_internal_error
from mypy.typeanal import (
has_any_from_unimported_type, check_for_explicit_any, set_any_tvars, expand_type_alias,
make_optional_type,
)
from mypy.types import (
Type, AnyType, CallableType, Overloaded, NoneType, TypeVarDef,
TupleType, TypedDictType, Instance, TypeVarType, ErasedType, UnionType,
PartialType, DeletedType, UninhabitedType, TypeType, TypeOfAny, LiteralType, LiteralValue,
is_named_instance, FunctionLike,
StarType, is_optional, remove_optional, is_generic_instance, get_proper_type, ProperType,
get_proper_types, flatten_nested_unions
)
from mypy.nodes import (
NameExpr, RefExpr, Var, FuncDef, OverloadedFuncDef, TypeInfo, CallExpr,
MemberExpr, IntExpr, StrExpr, BytesExpr, UnicodeExpr, FloatExpr,
OpExpr, UnaryExpr, IndexExpr, CastExpr, RevealExpr, TypeApplication, ListExpr,
TupleExpr, DictExpr, LambdaExpr, SuperExpr, SliceExpr, Context, Expression,
ListComprehension, GeneratorExpr, SetExpr, MypyFile, Decorator,
ConditionalExpr, ComparisonExpr, TempNode, SetComprehension, AssignmentExpr,
DictionaryComprehension, ComplexExpr, EllipsisExpr, StarExpr, AwaitExpr, YieldExpr,
YieldFromExpr, TypedDictExpr, PromoteExpr, NewTypeExpr, NamedTupleExpr, TypeVarExpr,
TypeAliasExpr, BackquoteExpr, EnumCallExpr, TypeAlias, SymbolNode, PlaceholderNode,
ARG_POS, ARG_OPT, ARG_NAMED, ARG_STAR, ARG_STAR2, LITERAL_TYPE, REVEAL_TYPE,
)
from mypy.literals import literal
from mypy import nodes
import mypy.checker
from mypy import types
from mypy.sametypes import is_same_type
from mypy.erasetype import replace_meta_vars, erase_type, remove_instance_last_known_values
from mypy.maptype import map_instance_to_supertype
from mypy.messages import MessageBuilder
from mypy import message_registry
from mypy.infer import infer_type_arguments, infer_function_type_arguments
from mypy import join
from mypy.meet import narrow_declared_type, is_overlapping_types
from mypy.subtypes import is_subtype, is_proper_subtype, is_equivalent, non_method_protocol_members
from mypy import applytype
from mypy import erasetype
from mypy.checkmember import analyze_member_access, type_object_type
from mypy.argmap import ArgTypeExpander, map_actuals_to_formals, map_formals_to_actuals
from mypy.checkstrformat import StringFormatterChecker
from mypy.expandtype import expand_type, expand_type_by_instance, freshen_function_type_vars
from mypy.util import split_module_names
from mypy.typevars import fill_typevars
from mypy.visitor import ExpressionVisitor
from mypy.plugin import Plugin, MethodContext, MethodSigContext, FunctionContext
from mypy.typeops import (
tuple_fallback, make_simplified_union, true_only, false_only, erase_to_union_or_bound,
function_type, callable_type, try_getting_str_literals, custom_special_method,
is_literal_type_like,
)
import mypy.errorcodes as codes
# Type of callback user for checking individual function arguments. See
# check_args() below for details.
ArgChecker = Callable[[Type,
Type,
int,
Type,
int,
int,
CallableType,
Context,
Context,
MessageBuilder],
None]
# Maximum nesting level for math union in overloads, setting this to large values
# may cause performance issues. The reason is that although union math algorithm we use
# nicely captures most corner cases, its worst case complexity is exponential,
# see https://github.com/python/mypy/pull/5255#discussion_r196896335 for discussion.
MAX_UNIONS = 5 # type: Final
# Types considered safe for comparisons with --strict-equality due to known behaviour of __eq__.
# NOTE: All these types are subtypes of AbstractSet.
OVERLAPPING_TYPES_WHITELIST = ['builtins.set', 'builtins.frozenset',
'typing.KeysView', 'typing.ItemsView'] # type: Final
class TooManyUnions(Exception):
"""Indicates that we need to stop splitting unions in an attempt
to match an overload in order to save performance.
"""
def extract_refexpr_names(expr: RefExpr) -> Set[str]:
"""Recursively extracts all module references from a reference expression.
Note that currently, the only two subclasses of RefExpr are NameExpr and
MemberExpr."""
output = set() # type: Set[str]
while isinstance(expr.node, MypyFile) or expr.fullname is not None:
if isinstance(expr.node, MypyFile) and expr.fullname is not None:
# If it's None, something's wrong (perhaps due to an
# import cycle or a suppressed error). For now we just
# skip it.
output.add(expr.fullname)
if isinstance(expr, NameExpr):
is_suppressed_import = isinstance(expr.node, Var) and expr.node.is_suppressed_import
if isinstance(expr.node, TypeInfo):
# Reference to a class or a nested class
output.update(split_module_names(expr.node.module_name))
elif expr.fullname is not None and '.' in expr.fullname and not is_suppressed_import:
# Everything else (that is not a silenced import within a class)
output.add(expr.fullname.rsplit('.', 1)[0])
break
elif isinstance(expr, MemberExpr):
if isinstance(expr.expr, RefExpr):
expr = expr.expr
else:
break
else:
raise AssertionError("Unknown RefExpr subclass: {}".format(type(expr)))
return output
class Finished(Exception):
"""Raised if we can terminate overload argument check early (no match)."""
class ExpressionChecker(ExpressionVisitor[Type]):
"""Expression type checker.
This class works closely together with checker.TypeChecker.
"""
# Some services are provided by a TypeChecker instance.
chk = None # type: mypy.checker.TypeChecker
# This is shared with TypeChecker, but stored also here for convenience.
msg = None # type: MessageBuilder
# Type context for type inference
type_context = None # type: List[Optional[Type]]
strfrm_checker = None # type: StringFormatterChecker
plugin = None # type: Plugin
def __init__(self,
chk: 'mypy.checker.TypeChecker',
msg: MessageBuilder,
plugin: Plugin) -> None:
"""Construct an expression type checker."""
self.chk = chk
self.msg = msg
self.plugin = plugin
self.type_context = [None]
# Temporary overrides for expression types. This is currently
# used by the union math in overloads.
# TODO: refactor this to use a pattern similar to one in
# multiassign_from_union, or maybe even combine the two?
self.type_overrides = {} # type: Dict[Expression, Type]
self.strfrm_checker = StringFormatterChecker(self, self.chk, self.msg)
def visit_name_expr(self, e: NameExpr) -> Type:
"""Type check a name expression.
It can be of any kind: local, member or global.
"""
self.chk.module_refs.update(extract_refexpr_names(e))
result = self.analyze_ref_expr(e)
return self.narrow_type_from_binder(e, result)
def analyze_ref_expr(self, e: RefExpr, lvalue: bool = False) -> Type:
result = None # type: Optional[Type]
node = e.node
if isinstance(e, NameExpr) and e.is_special_form:
# A special form definition, nothing to check here.
return AnyType(TypeOfAny.special_form)
if isinstance(node, Var):
# Variable reference.
result = self.analyze_var_ref(node, e)
if isinstance(result, PartialType):
result = self.chk.handle_partial_var_type(result, lvalue, node, e)
elif isinstance(node, FuncDef):
# Reference to a global function.
result = function_type(node, self.named_type('builtins.function'))
elif isinstance(node, OverloadedFuncDef) and node.type is not None:
# node.type is None when there are multiple definitions of a function
# and it's decorated by something that is not typing.overload
# TODO: use a dummy Overloaded instead of AnyType in this case
# like we do in mypy.types.function_type()?
result = node.type
elif isinstance(node, TypeInfo):
# Reference to a type object.
result = type_object_type(node, self.named_type)
if (isinstance(result, CallableType) and
isinstance(result.ret_type, Instance)): # type: ignore
# We need to set correct line and column
# TODO: always do this in type_object_type by passing the original context
result.ret_type.line = e.line
result.ret_type.column = e.column
if isinstance(get_proper_type(self.type_context[-1]), TypeType):
# This is the type in a Type[] expression, so substitute type
# variables with Any.
result = erasetype.erase_typevars(result)
elif isinstance(node, MypyFile):
# Reference to a module object.
try:
result = self.named_type('types.ModuleType')
except KeyError:
# In test cases might 'types' may not be available.
# Fall back to a dummy 'object' type instead to
# avoid a crash.
result = self.named_type('builtins.object')
elif isinstance(node, Decorator):
result = self.analyze_var_ref(node.var, e)
elif isinstance(node, TypeAlias):
# Something that refers to a type alias appears in runtime context.
# Note that we suppress bogus errors for alias redefinitions,
# they are already reported in semanal.py.
result = self.alias_type_in_runtime_context(node, node.no_args, e,
alias_definition=e.is_alias_rvalue
or lvalue)
else:
if isinstance(node, PlaceholderNode):
assert False, 'PlaceholderNode %r leaked to checker' % node.fullname
# Unknown reference; use any type implicitly to avoid
# generating extra type errors.
result = AnyType(TypeOfAny.from_error)
assert result is not None
return result
def analyze_var_ref(self, var: Var, context: Context) -> Type:
if var.type:
var_type = get_proper_type(var.type)
if isinstance(var_type, Instance):
if self.is_literal_context() and var_type.last_known_value is not None:
return var_type.last_known_value
if var.name in {'True', 'False'}:
return self.infer_literal_expr_type(var.name == 'True', 'builtins.bool')
return var.type
else:
if not var.is_ready and self.chk.in_checked_function():
self.chk.handle_cannot_determine_type(var.name, context)
# Implicit 'Any' type.
return AnyType(TypeOfAny.special_form)
def visit_call_expr(self, e: CallExpr, allow_none_return: bool = False) -> Type:
"""Type check a call expression."""
if e.analyzed:
if isinstance(e.analyzed, NamedTupleExpr) and not e.analyzed.is_typed:
# Type check the arguments, but ignore the results. This relies
# on the typeshed stubs to type check the arguments.
self.visit_call_expr_inner(e)
# It's really a special form that only looks like a call.
return self.accept(e.analyzed, self.type_context[-1])
return self.visit_call_expr_inner(e, allow_none_return=allow_none_return)
def visit_call_expr_inner(self, e: CallExpr, allow_none_return: bool = False) -> Type:
if isinstance(e.callee, RefExpr) and isinstance(e.callee.node, TypeInfo) and \
e.callee.node.typeddict_type is not None:
# Use named fallback for better error messages.
typeddict_type = e.callee.node.typeddict_type.copy_modified(
fallback=Instance(e.callee.node, []))
return self.check_typeddict_call(typeddict_type, e.arg_kinds, e.arg_names, e.args, e)
if (isinstance(e.callee, NameExpr) and e.callee.name in ('isinstance', 'issubclass')
and len(e.args) == 2):
for typ in mypy.checker.flatten(e.args[1]):
node = None
if isinstance(typ, NameExpr):
try:
node = self.chk.lookup_qualified(typ.name)
except KeyError:
# Undefined names should already be reported in semantic analysis.
pass
if is_expr_literal_type(typ):
self.msg.cannot_use_function_with_type(e.callee.name, "Literal", e)
continue
if (node and isinstance(node.node, TypeAlias)
and isinstance(get_proper_type(node.node.target), AnyType)):
self.msg.cannot_use_function_with_type(e.callee.name, "Any", e)
continue
if ((isinstance(typ, IndexExpr)
and isinstance(typ.analyzed, (TypeApplication, TypeAliasExpr)))
or (isinstance(typ, NameExpr) and node and
isinstance(node.node, TypeAlias) and not node.node.no_args)):
self.msg.type_arguments_not_allowed(e)
if isinstance(typ, RefExpr) and isinstance(typ.node, TypeInfo):
if typ.node.typeddict_type:
self.msg.cannot_use_function_with_type(e.callee.name, "TypedDict", e)
elif typ.node.is_newtype:
self.msg.cannot_use_function_with_type(e.callee.name, "NewType", e)
self.try_infer_partial_type(e)
type_context = None
if isinstance(e.callee, LambdaExpr):
formal_to_actual = map_actuals_to_formals(
e.arg_kinds, e.arg_names,
e.callee.arg_kinds, e.callee.arg_names,
lambda i: self.accept(e.args[i]))
arg_types = [join.join_type_list([self.accept(e.args[j]) for j in formal_to_actual[i]])
for i in range(len(e.callee.arg_kinds))]
type_context = CallableType(arg_types, e.callee.arg_kinds, e.callee.arg_names,
ret_type=self.object_type(),
fallback=self.named_type('builtins.function'))
callee_type = get_proper_type(self.accept(e.callee, type_context, always_allow_any=True))
if (self.chk.options.disallow_untyped_calls and
self.chk.in_checked_function() and
isinstance(callee_type, CallableType)
and callee_type.implicit):
return self.msg.untyped_function_call(callee_type, e)
# Figure out the full name of the callee for plugin lookup.
object_type = None
member = None
fullname = None
if isinstance(e.callee, RefExpr):
# There are two special cases where plugins might act:
# * A "static" reference/alias to a class or function;
# get_function_hook() will be invoked for these.
fullname = e.callee.fullname
if isinstance(e.callee.node, TypeAlias):
target = get_proper_type(e.callee.node.target)
if isinstance(target, Instance):
fullname = target.type.fullname
# * Call to a method on object that has a full name (see
# method_fullname() for details on supported objects);
# get_method_hook() and get_method_signature_hook() will
# be invoked for these.
if (fullname is None
and isinstance(e.callee, MemberExpr)
and e.callee.expr in self.chk.type_map):
member = e.callee.name
object_type = self.chk.type_map[e.callee.expr]
ret_type = self.check_call_expr_with_callee_type(callee_type, e, fullname,
object_type, member)
if isinstance(e.callee, RefExpr) and len(e.args) == 2:
if e.callee.fullname in ('builtins.isinstance', 'builtins.issubclass'):
self.check_runtime_protocol_test(e)
if e.callee.fullname == 'builtins.issubclass':
self.check_protocol_issubclass(e)
if isinstance(e.callee, MemberExpr) and e.callee.name == 'format':
self.check_str_format_call(e)
ret_type = get_proper_type(ret_type)
if isinstance(ret_type, UninhabitedType) and not ret_type.ambiguous:
self.chk.binder.unreachable()
# Warn on calls to functions that always return None. The check
# of ret_type is both a common-case optimization and prevents reporting
# the error in dynamic functions (where it will be Any).
if (not allow_none_return and isinstance(ret_type, NoneType)
and self.always_returns_none(e.callee)):
self.chk.msg.does_not_return_value(callee_type, e)
return AnyType(TypeOfAny.from_error)
return ret_type
def check_str_format_call(self, e: CallExpr) -> None:
"""More precise type checking for str.format() calls on literals."""
assert isinstance(e.callee, MemberExpr)
format_value = None
if isinstance(e.callee.expr, (StrExpr, UnicodeExpr)):
format_value = e.callee.expr.value
elif e.callee.expr in self.chk.type_map:
base_typ = try_getting_literal(self.chk.type_map[e.callee.expr])
if isinstance(base_typ, LiteralType) and isinstance(base_typ.value, str):
format_value = base_typ.value
if format_value is not None:
self.strfrm_checker.check_str_format_call(e, format_value)
def method_fullname(self, object_type: Type, method_name: str) -> Optional[str]:
"""Convert a method name to a fully qualified name, based on the type of the object that
it is invoked on. Return `None` if the name of `object_type` cannot be determined.
"""
object_type = get_proper_type(object_type)
if isinstance(object_type, CallableType) and object_type.is_type_obj():
# For class method calls, object_type is a callable representing the class object.
# We "unwrap" it to a regular type, as the class/instance method difference doesn't
# affect the fully qualified name.
object_type = get_proper_type(object_type.ret_type)
elif isinstance(object_type, TypeType):
object_type = object_type.item
type_name = None
if isinstance(object_type, Instance):
type_name = object_type.type.fullname
elif isinstance(object_type, (TypedDictType, LiteralType)):
info = object_type.fallback.type.get_containing_type_info(method_name)
type_name = info.fullname if info is not None else None
elif isinstance(object_type, TupleType):
type_name = tuple_fallback(object_type).type.fullname
if type_name is not None:
return '{}.{}'.format(type_name, method_name)
else:
return None
def always_returns_none(self, node: Expression) -> bool:
"""Check if `node` refers to something explicitly annotated as only returning None."""
if isinstance(node, RefExpr):
if self.defn_returns_none(node.node):
return True
if isinstance(node, MemberExpr) and node.node is None: # instance or class attribute
typ = get_proper_type(self.chk.type_map.get(node.expr))
if isinstance(typ, Instance):
info = typ.type
elif isinstance(typ, CallableType) and typ.is_type_obj():
ret_type = get_proper_type(typ.ret_type)
if isinstance(ret_type, Instance):
info = ret_type.type
else:
return False
else:
return False
sym = info.get(node.name)
if sym and self.defn_returns_none(sym.node):
return True
return False
def defn_returns_none(self, defn: Optional[SymbolNode]) -> bool:
"""Check if `defn` can _only_ return None."""
if isinstance(defn, FuncDef):
return (isinstance(defn.type, CallableType) and
isinstance(get_proper_type(defn.type.ret_type), NoneType))
if isinstance(defn, OverloadedFuncDef):
return all(self.defn_returns_none(item) for item in defn.items)
if isinstance(defn, Var):
typ = get_proper_type(defn.type)
if (not defn.is_inferred and isinstance(typ, CallableType) and
isinstance(get_proper_type(typ.ret_type), NoneType)):
return True
if isinstance(typ, Instance):
sym = typ.type.get('__call__')
if sym and self.defn_returns_none(sym.node):
return True
return False
def check_runtime_protocol_test(self, e: CallExpr) -> None:
for expr in mypy.checker.flatten(e.args[1]):
tp = get_proper_type(self.chk.type_map[expr])
if (isinstance(tp, CallableType) and tp.is_type_obj() and
tp.type_object().is_protocol and
not tp.type_object().runtime_protocol):
self.chk.fail(message_registry.RUNTIME_PROTOCOL_EXPECTED, e)
def check_protocol_issubclass(self, e: CallExpr) -> None:
for expr in mypy.checker.flatten(e.args[1]):
tp = get_proper_type(self.chk.type_map[expr])
if (isinstance(tp, CallableType) and tp.is_type_obj() and
tp.type_object().is_protocol):
attr_members = non_method_protocol_members(tp.type_object())
if attr_members:
self.chk.msg.report_non_method_protocol(tp.type_object(),
attr_members, e)
def check_typeddict_call(self, callee: TypedDictType,
arg_kinds: List[int],
arg_names: Sequence[Optional[str]],
args: List[Expression],
context: Context) -> Type:
if len(args) >= 1 and all([ak == ARG_NAMED for ak in arg_kinds]):
# ex: Point(x=42, y=1337)
assert all(arg_name is not None for arg_name in arg_names)
item_names = cast(List[str], arg_names)
item_args = args
return self.check_typeddict_call_with_kwargs(
callee, OrderedDict(zip(item_names, item_args)), context)
if len(args) == 1 and arg_kinds[0] == ARG_POS:
unique_arg = args[0]
if isinstance(unique_arg, DictExpr):
# ex: Point({'x': 42, 'y': 1337})
return self.check_typeddict_call_with_dict(callee, unique_arg, context)
if isinstance(unique_arg, CallExpr) and isinstance(unique_arg.analyzed, DictExpr):
# ex: Point(dict(x=42, y=1337))
return self.check_typeddict_call_with_dict(callee, unique_arg.analyzed, context)
if len(args) == 0:
# ex: EmptyDict()
return self.check_typeddict_call_with_kwargs(
callee, OrderedDict(), context)
self.chk.fail(message_registry.INVALID_TYPEDDICT_ARGS, context)
return AnyType(TypeOfAny.from_error)
def validate_typeddict_kwargs(
self, kwargs: DictExpr) -> 'Optional[OrderedDict[str, Expression]]':
item_args = [item[1] for item in kwargs.items]
item_names = [] # List[str]
for item_name_expr, item_arg in kwargs.items:
literal_value = None
if item_name_expr:
key_type = self.accept(item_name_expr)
values = try_getting_str_literals(item_name_expr, key_type)
if values and len(values) == 1:
literal_value = values[0]
if literal_value is None:
key_context = item_name_expr or item_arg
self.chk.fail(message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL,
key_context)
return None
else:
item_names.append(literal_value)
return OrderedDict(zip(item_names, item_args))
def match_typeddict_call_with_dict(self, callee: TypedDictType,
kwargs: DictExpr,
context: Context) -> bool:
validated_kwargs = self.validate_typeddict_kwargs(kwargs=kwargs)
if validated_kwargs is not None:
return (callee.required_keys <= set(validated_kwargs.keys())
<= set(callee.items.keys()))
else:
return False
def check_typeddict_call_with_dict(self, callee: TypedDictType,
kwargs: DictExpr,
context: Context) -> Type:
validated_kwargs = self.validate_typeddict_kwargs(kwargs=kwargs)
if validated_kwargs is not None:
return self.check_typeddict_call_with_kwargs(
callee,
kwargs=validated_kwargs,
context=context)
else:
return AnyType(TypeOfAny.from_error)
def check_typeddict_call_with_kwargs(self, callee: TypedDictType,
kwargs: 'OrderedDict[str, Expression]',
context: Context) -> Type:
if not (callee.required_keys <= set(kwargs.keys()) <= set(callee.items.keys())):
expected_keys = [key for key in callee.items.keys()
if key in callee.required_keys or key in kwargs.keys()]
actual_keys = kwargs.keys()
self.msg.unexpected_typeddict_keys(
callee,
expected_keys=expected_keys,
actual_keys=list(actual_keys),
context=context)
return AnyType(TypeOfAny.from_error)
for (item_name, item_expected_type) in callee.items.items():
if item_name in kwargs:
item_value = kwargs[item_name]
self.chk.check_simple_assignment(
lvalue_type=item_expected_type, rvalue=item_value, context=item_value,
msg=message_registry.INCOMPATIBLE_TYPES,
lvalue_name='TypedDict item "{}"'.format(item_name),
rvalue_name='expression',
code=codes.TYPEDDICT_ITEM)
return callee
def get_partial_self_var(self, expr: MemberExpr) -> Optional[Var]:
"""Get variable node for a partial self attribute.
If the expression is not a self attribute, or attribute is not variable,
or variable is not partial, return None.
"""
if not (isinstance(expr.expr, NameExpr) and
isinstance(expr.expr.node, Var) and expr.expr.node.is_self):
# Not a self.attr expression.
return None
info = self.chk.scope.enclosing_class()
if not info or expr.name not in info.names:
# Don't mess with partial types in superclasses.
return None
sym = info.names[expr.name]
if isinstance(sym.node, Var) and isinstance(sym.node.type, PartialType):
return sym.node
return None
# Types and methods that can be used to infer partial types.
item_args = {'builtins.list': ['append'],
'builtins.set': ['add', 'discard'],
} # type: ClassVar[Dict[str, List[str]]]
container_args = {'builtins.list': {'extend': ['builtins.list']},
'builtins.dict': {'update': ['builtins.dict']},
'collections.OrderedDict': {'update': ['builtins.dict']},
'builtins.set': {'update': ['builtins.set', 'builtins.list']},
} # type: ClassVar[Dict[str, Dict[str, List[str]]]]
def try_infer_partial_type(self, e: CallExpr) -> None:
"""Try to make partial type precise from a call."""
if not isinstance(e.callee, MemberExpr):
return
callee = e.callee
if isinstance(callee.expr, RefExpr):
# Call a method with a RefExpr callee, such as 'x.method(...)'.
ret = self.get_partial_var(callee.expr)
if ret is None:
return
var, partial_types = ret
typ = self.try_infer_partial_value_type_from_call(e, callee.name, var)
if typ is not None:
var.type = typ
del partial_types[var]
elif isinstance(callee.expr, IndexExpr) and isinstance(callee.expr.base, RefExpr):
# Call 'x[y].method(...)'; may infer type of 'x' if it's a partial defaultdict.
if callee.expr.analyzed is not None:
return # A special form
base = callee.expr.base
index = callee.expr.index
ret = self.get_partial_var(base)
if ret is None:
return
var, partial_types = ret
partial_type = get_partial_instance_type(var.type)
if partial_type is None or partial_type.value_type is None:
return
value_type = self.try_infer_partial_value_type_from_call(e, callee.name, var)
if value_type is not None:
# Infer key type.
key_type = self.accept(index)
if mypy.checker.is_valid_inferred_type(key_type):
# Store inferred partial type.
assert partial_type.type is not None
typename = partial_type.type.fullname
var.type = self.chk.named_generic_type(typename,
[key_type, value_type])
del partial_types[var]
def get_partial_var(self, ref: RefExpr) -> Optional[Tuple[Var, Dict[Var, Context]]]:
var = ref.node
if var is None and isinstance(ref, MemberExpr):
var = self.get_partial_self_var(ref)
if not isinstance(var, Var):
return None
partial_types = self.chk.find_partial_types(var)
if partial_types is None:
return None
return var, partial_types
def try_infer_partial_value_type_from_call(
self,
e: CallExpr,
methodname: str,
var: Var) -> Optional[Instance]:
"""Try to make partial type precise from a call such as 'x.append(y)'."""
if self.chk.current_node_deferred:
return None
partial_type = get_partial_instance_type(var.type)
if partial_type is None:
return None
if partial_type.value_type:
typename = partial_type.value_type.type.fullname
else:
assert partial_type.type is not None
typename = partial_type.type.fullname
# Sometimes we can infer a full type for a partial List, Dict or Set type.
# TODO: Don't infer argument expression twice.
if (typename in self.item_args and methodname in self.item_args[typename]
and e.arg_kinds == [ARG_POS]):
item_type = self.accept(e.args[0])
if mypy.checker.is_valid_inferred_type(item_type):
return self.chk.named_generic_type(typename, [item_type])
elif (typename in self.container_args
and methodname in self.container_args[typename]
and e.arg_kinds == [ARG_POS]):
arg_type = get_proper_type(self.accept(e.args[0]))
if isinstance(arg_type, Instance):
arg_typename = arg_type.type.fullname
if arg_typename in self.container_args[typename][methodname]:
if all(mypy.checker.is_valid_inferred_type(item_type)
for item_type in arg_type.args):
return self.chk.named_generic_type(typename,
list(arg_type.args))
elif isinstance(arg_type, AnyType):
return self.chk.named_type(typename)
return None
def apply_function_plugin(self,
callee: CallableType,
arg_kinds: List[int],
arg_types: List[Type],
arg_names: Optional[Sequence[Optional[str]]],
formal_to_actual: List[List[int]],
args: List[Expression],
fullname: str,
object_type: Optional[Type],
context: Context) -> Type:
"""Use special case logic to infer the return type of a specific named function/method.
Caller must ensure that a plugin hook exists. There are two different cases:
- If object_type is None, the caller must ensure that a function hook exists
for fullname.
- If object_type is not None, the caller must ensure that a method hook exists
for fullname.
Return the inferred return type.
"""
num_formals = len(callee.arg_types)
formal_arg_types = [[] for _ in range(num_formals)] # type: List[List[Type]]
formal_arg_exprs = [[] for _ in range(num_formals)] # type: List[List[Expression]]
formal_arg_names = [[] for _ in range(num_formals)] # type: List[List[Optional[str]]]
formal_arg_kinds = [[] for _ in range(num_formals)] # type: List[List[int]]
for formal, actuals in enumerate(formal_to_actual):
for actual in actuals:
formal_arg_types[formal].append(arg_types[actual])
formal_arg_exprs[formal].append(args[actual])
if arg_names:
formal_arg_names[formal].append(arg_names[actual])
formal_arg_kinds[formal].append(arg_kinds[actual])
if object_type is None:
# Apply function plugin
callback = self.plugin.get_function_hook(fullname)
assert callback is not None # Assume that caller ensures this
return callback(
FunctionContext(formal_arg_types, formal_arg_kinds,
callee.arg_names, formal_arg_names,
callee.ret_type, formal_arg_exprs, context, self.chk))
else:
# Apply method plugin
method_callback = self.plugin.get_method_hook(fullname)
assert method_callback is not None # Assume that caller ensures this
object_type = get_proper_type(object_type)
return method_callback(
MethodContext(object_type, formal_arg_types, formal_arg_kinds,
callee.arg_names, formal_arg_names,
callee.ret_type, formal_arg_exprs, context, self.chk))
def apply_method_signature_hook(
self, callee: FunctionLike, args: List[Expression],
arg_kinds: List[int], context: Context,
arg_names: Optional[Sequence[Optional[str]]], object_type: Type,
signature_hook: Callable[[MethodSigContext], CallableType]) -> FunctionLike:
"""Apply a plugin hook that may infer a more precise signature for a method."""
if isinstance(callee, CallableType):
num_formals = len(callee.arg_kinds)
formal_to_actual = map_actuals_to_formals(
arg_kinds, arg_names,
callee.arg_kinds, callee.arg_names,
lambda i: self.accept(args[i]))
formal_arg_exprs = [[] for _ in range(num_formals)] # type: List[List[Expression]]
for formal, actuals in enumerate(formal_to_actual):
for actual in actuals:
formal_arg_exprs[formal].append(args[actual])
object_type = get_proper_type(object_type)
return signature_hook(
MethodSigContext(object_type, formal_arg_exprs, callee, context, self.chk))
else:
assert isinstance(callee, Overloaded)
items = []
for item in callee.items():
adjusted = self.apply_method_signature_hook(
item, args, arg_kinds, context, arg_names, object_type, signature_hook)
assert isinstance(adjusted, CallableType)
items.append(adjusted)
return Overloaded(items)
def transform_callee_type(
self, callable_name: Optional[str], callee: Type, args: List[Expression],
arg_kinds: List[int], context: Context,
arg_names: Optional[Sequence[Optional[str]]] = None,
object_type: Optional[Type] = None) -> Type:
"""Attempt to determine a more accurate signature for a method call.
This is done by looking up and applying a method signature hook (if one exists for the
given method name).
If no matching method signature hook is found, callee is returned unmodified. The same
happens if the arguments refer to a non-method callable (this is allowed so that the code
calling transform_callee_type needs to perform fewer boilerplate checks).
Note: this method is *not* called automatically as part of check_call, because in some
cases check_call is called multiple times while checking a single call (for example when
dealing with overloads). Instead, this method needs to be called explicitly
(if appropriate) before the signature is passed to check_call.
"""
callee = get_proper_type(callee)
if (callable_name is not None
and object_type is not None
and isinstance(callee, FunctionLike)):
signature_hook = self.plugin.get_method_signature_hook(callable_name)
if signature_hook:
return self.apply_method_signature_hook(
callee, args, arg_kinds, context, arg_names, object_type, signature_hook)
return callee
def check_call_expr_with_callee_type(self,
callee_type: Type,
e: CallExpr,
callable_name: Optional[str],
object_type: Optional[Type],
member: Optional[str] = None) -> Type:
"""Type check call expression.
The callee_type should be used as the type of callee expression. In particular,
in case of a union type this can be a particular item of the union, so that we can
apply plugin hooks to each item.
The 'member', 'callable_name' and 'object_type' are only used to call plugin hooks.
If 'callable_name' is None but 'member' is not None (member call), try constructing
'callable_name' using 'object_type' (the base type on which the method is called),
for example 'typing.Mapping.get'.
"""
if callable_name is None and member is not None:
assert object_type is not None
callable_name = self.method_fullname(object_type, member)
object_type = get_proper_type(object_type)
if callable_name:
# Try to refine the call signature using plugin hooks before checking the call.
callee_type = self.transform_callee_type(
callable_name, callee_type, e.args, e.arg_kinds, e, e.arg_names, object_type)
# Unions are special-cased to allow plugins to act on each item in the union.
elif member is not None and isinstance(object_type, UnionType):
return self.check_union_call_expr(e, object_type, member)
return self.check_call(callee_type, e.args, e.arg_kinds, e,
e.arg_names, callable_node=e.callee,
callable_name=callable_name,
object_type=object_type)[0]
def check_union_call_expr(self, e: CallExpr, object_type: UnionType, member: str) -> Type:
""""Type check calling a member expression where the base type is a union."""
res = [] # type: List[Type]
for typ in object_type.relevant_items():
# Member access errors are already reported when visiting the member expression.
self.msg.disable_errors()
item = analyze_member_access(member, typ, e, False, False, False,
self.msg, original_type=object_type, chk=self.chk,
in_literal_context=self.is_literal_context(),
self_type=typ)
self.msg.enable_errors()
narrowed = self.narrow_type_from_binder(e.callee, item, skip_non_overlapping=True)
if narrowed is None:
continue
callable_name = self.method_fullname(typ, member)
item_object_type = typ if callable_name else None
res.append(self.check_call_expr_with_callee_type(narrowed, e, callable_name,
item_object_type))
return make_simplified_union(res)
def check_call(self,
callee: Type,
args: List[Expression],
arg_kinds: List[int],
context: Context,
arg_names: Optional[Sequence[Optional[str]]] = None,
callable_node: Optional[Expression] = None,
arg_messages: Optional[MessageBuilder] = None,
callable_name: Optional[str] = None,
object_type: Optional[Type] = None) -> Tuple[Type, Type]:
"""Type check a call.
Also infer type arguments if the callee is a generic function.
Return (result type, inferred callee type).
Arguments:
callee: type of the called value
args: actual argument expressions
arg_kinds: contains nodes.ARG_* constant for each argument in args
describing whether the argument is positional, *arg, etc.
arg_names: names of arguments (optional)
callable_node: associate the inferred callable type to this node,
if specified
arg_messages: TODO
callable_name: Fully-qualified name of the function/method to call,
or None if unavailable (examples: 'builtins.open', 'typing.Mapping.get')
object_type: If callable_name refers to a method, the type of the object
on which the method is being called
"""
arg_messages = arg_messages or self.msg
callee = get_proper_type(callee)
if isinstance(callee, CallableType):
return self.check_callable_call(callee, args, arg_kinds, context, arg_names,
callable_node, arg_messages, callable_name,
object_type)
elif isinstance(callee, Overloaded):
return self.check_overload_call(callee, args, arg_kinds, arg_names, callable_name,
object_type, context, arg_messages)
elif isinstance(callee, AnyType) or not self.chk.in_checked_function():
return self.check_any_type_call(args, callee)
elif isinstance(callee, UnionType):
return self.check_union_call(callee, args, arg_kinds, arg_names, context, arg_messages)
elif isinstance(callee, Instance):
call_function = analyze_member_access('__call__', callee, context, is_lvalue=False,
is_super=False, is_operator=True, msg=self.msg,
original_type=callee, chk=self.chk,
in_literal_context=self.is_literal_context())
callable_name = callee.type.fullname + ".__call__"
# Apply method signature hook, if one exists
call_function = self.transform_callee_type(
callable_name, call_function, args, arg_kinds, context, arg_names, callee)
result = self.check_call(call_function, args, arg_kinds, context, arg_names,
callable_node, arg_messages, callable_name, callee)
if callable_node:
# check_call() stored "call_function" as the type, which is incorrect.
# Override the type.
self.chk.store_type(callable_node, callee)
return result
elif isinstance(callee, TypeVarType):
return self.check_call(callee.upper_bound, args, arg_kinds, context, arg_names,
callable_node, arg_messages)
elif isinstance(callee, TypeType):
item = self.analyze_type_type_callee(callee.item, context)
return self.check_call(item, args, arg_kinds, context, arg_names,
callable_node, arg_messages)
elif isinstance(callee, TupleType):
return self.check_call(tuple_fallback(callee), args, arg_kinds, context,
arg_names, callable_node, arg_messages, callable_name,
object_type)
else:
return self.msg.not_callable(callee, context), AnyType(TypeOfAny.from_error)
def check_callable_call(self,
callee: CallableType,
args: List[Expression],
arg_kinds: List[int],
context: Context,
arg_names: Optional[Sequence[Optional[str]]],
callable_node: Optional[Expression],
arg_messages: MessageBuilder,
callable_name: Optional[str],
object_type: Optional[Type]) -> Tuple[Type, Type]:
"""Type check a call that targets a callable value.
See the docstring of check_call for more information.
"""
if callable_name is None and callee.name:
callable_name = callee.name
ret_type = get_proper_type(callee.ret_type)
if callee.is_type_obj() and isinstance(ret_type, Instance):
callable_name = ret_type.type.fullname
if (isinstance(callable_node, RefExpr)
and callable_node.fullname in ('enum.Enum', 'enum.IntEnum',
'enum.Flag', 'enum.IntFlag')):
# An Enum() call that failed SemanticAnalyzerPass2.check_enum_call().
return callee.ret_type, callee
if (callee.is_type_obj() and callee.type_object().is_abstract
# Exception for Type[...]
and not callee.from_type_type
and not callee.type_object().fallback_to_any):
type = callee.type_object()
self.msg.cannot_instantiate_abstract_class(
callee.type_object().name, type.abstract_attributes,
context)
elif (callee.is_type_obj() and callee.type_object().is_protocol
# Exception for Type[...]
and not callee.from_type_type):
self.chk.fail(message_registry.CANNOT_INSTANTIATE_PROTOCOL
.format(callee.type_object().name), context)
formal_to_actual = map_actuals_to_formals(
arg_kinds, arg_names,
callee.arg_kinds, callee.arg_names,
lambda i: self.accept(args[i]))
if callee.is_generic():
callee = freshen_function_type_vars(callee)
callee = self.infer_function_type_arguments_using_context(
callee, context)
callee = self.infer_function_type_arguments(
callee, args, arg_kinds, formal_to_actual, context)
arg_types = self.infer_arg_types_in_context(
callee, args, arg_kinds, formal_to_actual)
self.check_argument_count(callee, arg_types, arg_kinds,
arg_names, formal_to_actual, context, self.msg)
self.check_argument_types(arg_types, arg_kinds, args, callee, formal_to_actual, context,
messages=arg_messages)
if (callee.is_type_obj() and (len(arg_types) == 1)
and is_equivalent(callee.ret_type, self.named_type('builtins.type'))):
callee = callee.copy_modified(ret_type=TypeType.make_normalized(arg_types[0]))
if callable_node:
# Store the inferred callable type.
self.chk.store_type(callable_node, callee)
if (callable_name
and ((object_type is None and self.plugin.get_function_hook(callable_name))
or (object_type is not None
and self.plugin.get_method_hook(callable_name)))):
new_ret_type = self.apply_function_plugin(
callee, arg_kinds, arg_types, arg_names, formal_to_actual, args,
callable_name, object_type, context)
callee = callee.copy_modified(ret_type=new_ret_type)
return callee.ret_type, callee
def analyze_type_type_callee(self, item: ProperType, context: Context) -> Type:
"""Analyze the callee X in X(...) where X is Type[item].
Return a Y that we can pass to check_call(Y, ...).
"""
if isinstance(item, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=item)
if isinstance(item, Instance):
res = type_object_type(item.type, self.named_type)
if isinstance(res, CallableType):
res = res.copy_modified(from_type_type=True)
expanded = get_proper_type(expand_type_by_instance(res, item))
if isinstance(expanded, CallableType):
# Callee of the form Type[...] should never be generic, only
# proper class objects can be.
expanded = expanded.copy_modified(variables=[])
return expanded
if isinstance(item, UnionType):
return UnionType([self.analyze_type_type_callee(get_proper_type(tp), context)
for tp in item.relevant_items()], item.line)
if isinstance(item, TypeVarType):
# Pretend we're calling the typevar's upper bound,
# i.e. its constructor (a poor approximation for reality,
# but better than AnyType...), but replace the return type
# with typevar.
callee = self.analyze_type_type_callee(get_proper_type(item.upper_bound), context)
callee = get_proper_type(callee)
if isinstance(callee, CallableType):
callee = callee.copy_modified(ret_type=item)
elif isinstance(callee, Overloaded):
callee = Overloaded([c.copy_modified(ret_type=item)
for c in callee.items()])
return callee
# We support Type of namedtuples but not of tuples in general
if (isinstance(item, TupleType)
and tuple_fallback(item).type.fullname != 'builtins.tuple'):
return self.analyze_type_type_callee(tuple_fallback(item), context)
self.msg.unsupported_type_type(item, context)
return AnyType(TypeOfAny.from_error)
def infer_arg_types_in_empty_context(self, args: List[Expression]) -> List[Type]:
"""Infer argument expression types in an empty context.
In short, we basically recurse on each argument without considering
in what context the argument was called.
"""
res = [] # type: List[Type]
for arg in args:
arg_type = self.accept(arg)
if has_erased_component(arg_type):
res.append(NoneType())
else:
res.append(arg_type)
return res
def infer_arg_types_in_context(
self, callee: CallableType, args: List[Expression], arg_kinds: List[int],
formal_to_actual: List[List[int]]) -> List[Type]:
"""Infer argument expression types using a callable type as context.
For example, if callee argument 2 has type List[int], infer the
argument expression with List[int] type context.
Returns the inferred types of *actual arguments*.
"""
res = [None] * len(args) # type: List[Optional[Type]]
for i, actuals in enumerate(formal_to_actual):
for ai in actuals:
if arg_kinds[ai] not in (nodes.ARG_STAR, nodes.ARG_STAR2):
res[ai] = self.accept(args[ai], callee.arg_types[i])
# Fill in the rest of the argument types.
for i, t in enumerate(res):
if not t:
res[i] = self.accept(args[i])
assert all(tp is not None for tp in res)
return cast(List[Type], res)
def infer_function_type_arguments_using_context(
self, callable: CallableType, error_context: Context) -> CallableType:
"""Unify callable return type to type context to infer type vars.
For example, if the return type is set[t] where 't' is a type variable
of callable, and if the context is set[int], return callable modified
by substituting 't' with 'int'.
"""
ctx = self.type_context[-1]
if not ctx:
return callable
# The return type may have references to type metavariables that
# we are inferring right now. We must consider them as indeterminate
# and they are not potential results; thus we replace them with the
# special ErasedType type. On the other hand, class type variables are
# valid results.
erased_ctx = replace_meta_vars(ctx, ErasedType())
ret_type = callable.ret_type
if is_optional(ret_type) and is_optional(ctx):
# If both the context and the return type are optional, unwrap the optional,
# since in 99% cases this is what a user expects. In other words, we replace
# Optional[T] <: Optional[int]
# with
# T <: int
# while the former would infer T <: Optional[int].
ret_type = remove_optional(ret_type)
erased_ctx = remove_optional(erased_ctx)
#
# TODO: Instead of this hack and the one below, we need to use outer and
# inner contexts at the same time. This is however not easy because of two
# reasons:
# * We need to support constraints like [1 <: 2, 2 <: X], i.e. with variables
# on both sides. (This is not too hard.)
# * We need to update all the inference "infrastructure", so that all
# variables in an expression are inferred at the same time.
# (And this is hard, also we need to be careful with lambdas that require
# two passes.)
if isinstance(ret_type, TypeVarType):
# Another special case: the return type is a type variable. If it's unrestricted,
# we could infer a too general type for the type variable if we use context,
# and this could result in confusing and spurious type errors elsewhere.
#
# So we give up and just use function arguments for type inference, with just two
# exceptions:
#
# 1. If the context is a generic instance type, actually use it as context, as
# this *seems* to usually be the reasonable thing to do.
#
# See also github issues #462 and #360.
#
# 2. If the context is some literal type, we want to "propagate" that information
# down so that we infer a more precise type for literal expressions. For example,
# the expression `3` normally has an inferred type of `builtins.int`: but if it's
# in a literal context like below, we want it to infer `Literal[3]` instead.
#
# def expects_literal(x: Literal[3]) -> None: pass
# def identity(x: T) -> T: return x
#
# expects_literal(identity(3)) # Should type-check
if not is_generic_instance(ctx) and not is_literal_type_like(ctx):
return callable.copy_modified()
args = infer_type_arguments(callable.type_var_ids(), ret_type, erased_ctx)
# Only substitute non-Uninhabited and non-erased types.
new_args = [] # type: List[Optional[Type]]
for arg in args:
if has_uninhabited_component(arg) or has_erased_component(arg):
new_args.append(None)
else:
new_args.append(arg)
# Don't show errors after we have only used the outer context for inference.
# We will use argument context to infer more variables.
return self.apply_generic_arguments(callable, new_args, error_context,
skip_unsatisfied=True)
def infer_function_type_arguments(self, callee_type: CallableType,
args: List[Expression],
arg_kinds: List[int],
formal_to_actual: List[List[int]],
context: Context) -> CallableType:
"""Infer the type arguments for a generic callee type.
Infer based on the types of arguments.
Return a derived callable type that has the arguments applied.
"""
if self.chk.in_checked_function():
# Disable type errors during type inference. There may be errors
# due to partial available context information at this time, but
# these errors can be safely ignored as the arguments will be
# inferred again later.
self.msg.disable_errors()
arg_types = self.infer_arg_types_in_context(
callee_type, args, arg_kinds, formal_to_actual)
self.msg.enable_errors()
arg_pass_nums = self.get_arg_infer_passes(
callee_type.arg_types, formal_to_actual, len(args))
pass1_args = [] # type: List[Optional[Type]]
for i, arg in enumerate(arg_types):
if arg_pass_nums[i] > 1:
pass1_args.append(None)
else:
pass1_args.append(arg)
inferred_args = infer_function_type_arguments(
callee_type, pass1_args, arg_kinds, formal_to_actual,
strict=self.chk.in_checked_function())
if 2 in arg_pass_nums:
# Second pass of type inference.
(callee_type,
inferred_args) = self.infer_function_type_arguments_pass2(
callee_type, args, arg_kinds, formal_to_actual,
inferred_args, context)
if callee_type.special_sig == 'dict' and len(inferred_args) == 2 and (
ARG_NAMED in arg_kinds or ARG_STAR2 in arg_kinds):
# HACK: Infer str key type for dict(...) with keyword args. The type system
# can't represent this so we special case it, as this is a pretty common
# thing. This doesn't quite work with all possible subclasses of dict
# if they shuffle type variables around, as we assume that there is a 1-1
# correspondence with dict type variables. This is a marginal issue and
# a little tricky to fix so it's left unfixed for now.
first_arg = get_proper_type(inferred_args[0])
if isinstance(first_arg, (NoneType, UninhabitedType)):
inferred_args[0] = self.named_type('builtins.str')
elif not first_arg or not is_subtype(self.named_type('builtins.str'), first_arg):
self.msg.fail(message_registry.KEYWORD_ARGUMENT_REQUIRES_STR_KEY_TYPE,
context)
else:
# In dynamically typed functions use implicit 'Any' types for
# type variables.
inferred_args = [AnyType(TypeOfAny.unannotated)] * len(callee_type.variables)
return self.apply_inferred_arguments(callee_type, inferred_args,
context)
def infer_function_type_arguments_pass2(
self, callee_type: CallableType,
args: List[Expression],
arg_kinds: List[int],
formal_to_actual: List[List[int]],
old_inferred_args: Sequence[Optional[Type]],
context: Context) -> Tuple[CallableType, List[Optional[Type]]]:
"""Perform second pass of generic function type argument inference.
The second pass is needed for arguments with types such as Callable[[T], S],
where both T and S are type variables, when the actual argument is a
lambda with inferred types. The idea is to infer the type variable T
in the first pass (based on the types of other arguments). This lets
us infer the argument and return type of the lambda expression and
thus also the type variable S in this second pass.
Return (the callee with type vars applied, inferred actual arg types).
"""
# None or erased types in inferred types mean that there was not enough
# information to infer the argument. Replace them with None values so
# that they are not applied yet below.
inferred_args = list(old_inferred_args)
for i, arg in enumerate(get_proper_types(inferred_args)):
if isinstance(arg, (NoneType, UninhabitedType)) or has_erased_component(arg):
inferred_args[i] = None
callee_type = self.apply_generic_arguments(callee_type, inferred_args, context)
arg_types = self.infer_arg_types_in_context(
callee_type, args, arg_kinds, formal_to_actual)
inferred_args = infer_function_type_arguments(
callee_type, arg_types, arg_kinds, formal_to_actual)
return callee_type, inferred_args
def get_arg_infer_passes(self, arg_types: List[Type],
formal_to_actual: List[List[int]],
num_actuals: int) -> List[int]:
"""Return pass numbers for args for two-pass argument type inference.
For each actual, the pass number is either 1 (first pass) or 2 (second
pass).
Two-pass argument type inference primarily lets us infer types of
lambdas more effectively.
"""
res = [1] * num_actuals
for i, arg in enumerate(arg_types):
if arg.accept(ArgInferSecondPassQuery()):
for j in formal_to_actual[i]:
res[j] = 2
return res
def apply_inferred_arguments(self, callee_type: CallableType,
inferred_args: Sequence[Optional[Type]],
context: Context) -> CallableType:
"""Apply inferred values of type arguments to a generic function.
Inferred_args contains the values of function type arguments.
"""
# Report error if some of the variables could not be solved. In that
# case assume that all variables have type Any to avoid extra
# bogus error messages.
for i, inferred_type in enumerate(inferred_args):
if not inferred_type or has_erased_component(inferred_type):
# Could not infer a non-trivial type for a type variable.
self.msg.could_not_infer_type_arguments(
callee_type, i + 1, context)
inferred_args = [AnyType(TypeOfAny.from_error)] * len(inferred_args)
# Apply the inferred types to the function type. In this case the
# return type must be CallableType, since we give the right number of type
# arguments.
return self.apply_generic_arguments(callee_type, inferred_args, context)
def check_argument_count(self,
callee: CallableType,
actual_types: List[Type],
actual_kinds: List[int],
actual_names: Optional[Sequence[Optional[str]]],
formal_to_actual: List[List[int]],
context: Optional[Context],
messages: Optional[MessageBuilder]) -> bool:
"""Check that there is a value for all required arguments to a function.
Also check that there are no duplicate values for arguments. Report found errors
using 'messages' if it's not None. If 'messages' is given, 'context' must also be given.
Return False if there were any errors. Otherwise return True
"""
if messages:
assert context, "Internal error: messages given without context"
elif context is None:
# Avoid "is None" checks
context = TempNode(AnyType(TypeOfAny.special_form))
# TODO(jukka): We could return as soon as we find an error if messages is None.
# Collect list of all actual arguments matched to formal arguments.
all_actuals = [] # type: List[int]
for actuals in formal_to_actual:
all_actuals.extend(actuals)
ok, is_unexpected_arg_error = self.check_for_extra_actual_arguments(
callee, actual_types, actual_kinds, actual_names, all_actuals, context, messages)
# Check for too many or few values for formals.
for i, kind in enumerate(callee.arg_kinds):
if kind == nodes.ARG_POS and (not formal_to_actual[i] and
not is_unexpected_arg_error):
# No actual for a mandatory positional formal.
if messages:
messages.too_few_arguments(callee, context, actual_names)
ok = False
elif kind == nodes.ARG_NAMED and (not formal_to_actual[i] and
not is_unexpected_arg_error):
# No actual for a mandatory named formal
if messages:
argname = callee.arg_names[i] or "?"
messages.missing_named_argument(callee, context, argname)
ok = False
elif kind in [nodes.ARG_POS, nodes.ARG_OPT,
nodes.ARG_NAMED, nodes.ARG_NAMED_OPT] and is_duplicate_mapping(
formal_to_actual[i], actual_kinds):
if (self.chk.in_checked_function() or
isinstance(get_proper_type(actual_types[formal_to_actual[i][0]]),
TupleType)):
if messages:
messages.duplicate_argument_value(callee, i, context)
ok = False
elif (kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT) and formal_to_actual[i] and
actual_kinds[formal_to_actual[i][0]] not in [nodes.ARG_NAMED, nodes.ARG_STAR2]):
# Positional argument when expecting a keyword argument.
if messages:
messages.too_many_positional_arguments(callee, context)
ok = False
return ok
def check_for_extra_actual_arguments(self,
callee: CallableType,
actual_types: List[Type],
actual_kinds: List[int],
actual_names: Optional[Sequence[Optional[str]]],
all_actuals: List[int],
context: Context,
messages: Optional[MessageBuilder]) -> Tuple[bool, bool]:
"""Check for extra actual arguments.
Return tuple (was everything ok,
was there an extra keyword argument error [used to avoid duplicate errors]).
"""
is_unexpected_arg_error = False # Keep track of errors to avoid duplicate errors
ok = True # False if we've found any error
for i, kind in enumerate(actual_kinds):
if i not in all_actuals and (
kind != nodes.ARG_STAR or
# We accept the other iterables than tuple (including Any)
# as star arguments because they could be empty, resulting no arguments.
is_non_empty_tuple(actual_types[i])):
# Extra actual: not matched by a formal argument.
ok = False
if kind != nodes.ARG_NAMED:
if messages:
messages.too_many_arguments(callee, context)
else:
if messages:
assert actual_names, "Internal error: named kinds without names given"
act_name = actual_names[i]
assert act_name is not None
act_type = actual_types[i]
messages.unexpected_keyword_argument(callee, act_name, act_type, context)
is_unexpected_arg_error = True
elif ((kind == nodes.ARG_STAR and nodes.ARG_STAR not in callee.arg_kinds)
or kind == nodes.ARG_STAR2):
actual_type = get_proper_type(actual_types[i])
if isinstance(actual_type, (TupleType, TypedDictType)):
if all_actuals.count(i) < len(actual_type.items):
# Too many tuple/dict items as some did not match.
if messages:
if (kind != nodes.ARG_STAR2
or not isinstance(actual_type, TypedDictType)):
messages.too_many_arguments(callee, context)
else:
messages.too_many_arguments_from_typed_dict(callee, actual_type,
context)
is_unexpected_arg_error = True
ok = False
# *args/**kwargs can be applied even if the function takes a fixed
# number of positional arguments. This may succeed at runtime.
return ok, is_unexpected_arg_error
def check_argument_types(self,
arg_types: List[Type],
arg_kinds: List[int],
args: List[Expression],
callee: CallableType,
formal_to_actual: List[List[int]],
context: Context,
messages: Optional[MessageBuilder] = None,
check_arg: Optional[ArgChecker] = None) -> None:
"""Check argument types against a callable type.
Report errors if the argument types are not compatible.
"""
messages = messages or self.msg
check_arg = check_arg or self.check_arg
# Keep track of consumed tuple *arg items.
mapper = ArgTypeExpander()
for i, actuals in enumerate(formal_to_actual):
for actual in actuals:
actual_type = arg_types[actual]
if actual_type is None:
continue # Some kind of error was already reported.
actual_kind = arg_kinds[actual]
# Check that a *arg is valid as varargs.
if (actual_kind == nodes.ARG_STAR and
not self.is_valid_var_arg(actual_type)):
messages.invalid_var_arg(actual_type, context)
if (actual_kind == nodes.ARG_STAR2 and
not self.is_valid_keyword_var_arg(actual_type)):
is_mapping = is_subtype(actual_type, self.chk.named_type('typing.Mapping'))
messages.invalid_keyword_var_arg(actual_type, is_mapping, context)
expanded_actual = mapper.expand_actual_type(
actual_type, actual_kind,
callee.arg_names[i], callee.arg_kinds[i])
check_arg(expanded_actual, actual_type, arg_kinds[actual],
callee.arg_types[i],
actual + 1, i + 1, callee, args[actual], context, messages)
def check_arg(self,
caller_type: Type,
original_caller_type: Type,
caller_kind: int,
callee_type: Type,
n: int,
m: int,
callee: CallableType,
context: Context,
outer_context: Context,
messages: MessageBuilder) -> None:
"""Check the type of a single argument in a call."""
caller_type = get_proper_type(caller_type)
original_caller_type = get_proper_type(original_caller_type)
callee_type = get_proper_type(callee_type)
if isinstance(caller_type, DeletedType):
messages.deleted_as_rvalue(caller_type, context)
# Only non-abstract non-protocol class can be given where Type[...] is expected...
elif (isinstance(caller_type, CallableType) and isinstance(callee_type, TypeType) and
caller_type.is_type_obj() and
(caller_type.type_object().is_abstract or caller_type.type_object().is_protocol) and
isinstance(callee_type.item, Instance) and
(callee_type.item.type.is_abstract or callee_type.item.type.is_protocol)):
self.msg.concrete_only_call(callee_type, context)
elif not is_subtype(caller_type, callee_type):
if self.chk.should_suppress_optional_error([caller_type, callee_type]):
return
code = messages.incompatible_argument(n,
m,
callee,
original_caller_type,
caller_kind,
context=context,
outer_context=outer_context)
messages.incompatible_argument_note(original_caller_type, callee_type, context,
code=code)
def check_overload_call(self,
callee: Overloaded,
args: List[Expression],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
callable_name: Optional[str],
object_type: Optional[Type],
context: Context,
arg_messages: MessageBuilder) -> Tuple[Type, Type]:
"""Checks a call to an overloaded function."""
arg_types = self.infer_arg_types_in_empty_context(args)
# Step 1: Filter call targets to remove ones where the argument counts don't match
plausible_targets = self.plausible_overload_call_targets(arg_types, arg_kinds,
arg_names, callee)
# Step 2: If the arguments contain a union, we try performing union math first,
# instead of picking the first matching overload.
# This is because picking the first overload often ends up being too greedy:
# for example, when we have a fallback alternative that accepts an unrestricted
# typevar. See https://github.com/python/mypy/issues/4063 for related discussion.
erased_targets = None # type: Optional[List[CallableType]]
unioned_result = None # type: Optional[Tuple[Type, Type]]
union_interrupted = False # did we try all union combinations?
if any(self.real_union(arg) for arg in arg_types):
unioned_errors = arg_messages.clean_copy()
try:
unioned_return = self.union_overload_result(plausible_targets, args,
arg_types, arg_kinds, arg_names,
callable_name, object_type,
context,
arg_messages=unioned_errors)
except TooManyUnions:
union_interrupted = True
else:
# Record if we succeeded. Next we need to see if maybe normal procedure
# gives a narrower type.
if unioned_return:
# TODO: fix signature of zip() in typeshed.
returns, inferred_types = cast(Any, zip)(*unioned_return)
# Note that we use `combine_function_signatures` instead of just returning
# a union of inferred callables because for example a call
# Union[int -> int, str -> str](Union[int, str]) is invalid and
# we don't want to introduce internal inconsistencies.
unioned_result = (make_simplified_union(list(returns),
context.line,
context.column),
self.combine_function_signatures(inferred_types))
# Step 3: We try checking each branch one-by-one.
inferred_result = self.infer_overload_return_type(plausible_targets, args, arg_types,
arg_kinds, arg_names, callable_name,
object_type, context, arg_messages)
# If any of checks succeed, stop early.
if inferred_result is not None and unioned_result is not None:
# Both unioned and direct checks succeeded, choose the more precise type.
if (is_subtype(inferred_result[0], unioned_result[0]) and
not isinstance(get_proper_type(inferred_result[0]), AnyType)):
return inferred_result
return unioned_result
elif unioned_result is not None:
return unioned_result
elif inferred_result is not None:
return inferred_result
# Step 4: Failure. At this point, we know there is no match. We fall back to trying
# to find a somewhat plausible overload target using the erased types
# so we can produce a nice error message.
#
# For example, suppose the user passes a value of type 'List[str]' into an
# overload with signatures f(x: int) -> int and f(x: List[int]) -> List[int].
#
# Neither alternative matches, but we can guess the user probably wants the
# second one.
erased_targets = self.overload_erased_call_targets(plausible_targets, arg_types,
arg_kinds, arg_names, args, context)
# Step 5: We try and infer a second-best alternative if possible. If not, fall back
# to using 'Any'.
if len(erased_targets) > 0:
# Pick the first plausible erased target as the fallback
# TODO: Adjust the error message here to make it clear there was no match.
# In order to do this, we need to find a clean way of associating
# a note with whatever error message 'self.check_call' will generate.
# In particular, the note's line and column numbers need to be the same
# as the error's.
target = erased_targets[0] # type: Type
else:
# There was no plausible match: give up
target = AnyType(TypeOfAny.from_error)
if not self.chk.should_suppress_optional_error(arg_types):
if not is_operator_method(callable_name):
code = None
else:
code = codes.OPERATOR
arg_messages.no_variant_matches_arguments(
plausible_targets, callee, arg_types, context, code=code)
result = self.check_call(target, args, arg_kinds, context, arg_names,
arg_messages=arg_messages,
callable_name=callable_name,
object_type=object_type)
if union_interrupted:
self.chk.fail("Not all union combinations were tried"
" because there are too many unions", context)
return result
def plausible_overload_call_targets(self,
arg_types: List[Type],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
overload: Overloaded) -> List[CallableType]:
"""Returns all overload call targets that having matching argument counts.
If the given args contains a star-arg (*arg or **kwarg argument), this method
will ensure all star-arg overloads appear at the start of the list, instead
of their usual location.
The only exception is if the starred argument is something like a Tuple or a
NamedTuple, which has a definitive "shape". If so, we don't move the corresponding
alternative to the front since we can infer a more precise match using the original
order."""
def has_shape(typ: Type) -> bool:
typ = get_proper_type(typ)
return (isinstance(typ, TupleType) or isinstance(typ, TypedDictType)
or (isinstance(typ, Instance) and typ.type.is_named_tuple))
matches = [] # type: List[CallableType]
star_matches = [] # type: List[CallableType]
args_have_var_arg = False
args_have_kw_arg = False
for kind, typ in zip(arg_kinds, arg_types):
if kind == ARG_STAR and not has_shape(typ):
args_have_var_arg = True
if kind == ARG_STAR2 and not has_shape(typ):
args_have_kw_arg = True
for typ in overload.items():
formal_to_actual = map_actuals_to_formals(arg_kinds, arg_names,
typ.arg_kinds, typ.arg_names,
lambda i: arg_types[i])
if self.check_argument_count(typ, arg_types, arg_kinds, arg_names,
formal_to_actual, None, None):
if args_have_var_arg and typ.is_var_arg:
star_matches.append(typ)
elif args_have_kw_arg and typ.is_kw_arg:
star_matches.append(typ)
else:
matches.append(typ)
return star_matches + matches
def infer_overload_return_type(self,
plausible_targets: List[CallableType],
args: List[Expression],
arg_types: List[Type],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
callable_name: Optional[str],
object_type: Optional[Type],
context: Context,
arg_messages: Optional[MessageBuilder] = None,
) -> Optional[Tuple[Type, Type]]:
"""Attempts to find the first matching callable from the given list.
If a match is found, returns a tuple containing the result type and the inferred
callee type. (This tuple is meant to be eventually returned by check_call.)
If multiple targets match due to ambiguous Any parameters, returns (AnyType, AnyType).
If no targets match, returns None.
Assumes all of the given targets have argument counts compatible with the caller.
"""
arg_messages = self.msg if arg_messages is None else arg_messages
matches = [] # type: List[CallableType]
return_types = [] # type: List[Type]
inferred_types = [] # type: List[Type]
args_contain_any = any(map(has_any_type, arg_types))
for typ in plausible_targets:
overload_messages = self.msg.clean_copy()
prev_messages = self.msg
assert self.msg is self.chk.msg
self.msg = overload_messages
self.chk.msg = overload_messages
try:
# Passing `overload_messages` as the `arg_messages` parameter doesn't
# seem to reliably catch all possible errors.
# TODO: Figure out why
ret_type, infer_type = self.check_call(
callee=typ,
args=args,
arg_kinds=arg_kinds,
arg_names=arg_names,
context=context,
arg_messages=overload_messages,
callable_name=callable_name,
object_type=object_type)
finally:
self.chk.msg = prev_messages
self.msg = prev_messages
is_match = not overload_messages.is_errors()
if is_match:
# Return early if possible; otherwise record info so we can
# check for ambiguity due to 'Any' below.
if not args_contain_any:
return ret_type, infer_type
matches.append(typ)
return_types.append(ret_type)
inferred_types.append(infer_type)
if len(matches) == 0:
# No match was found
return None
elif any_causes_overload_ambiguity(matches, return_types, arg_types, arg_kinds, arg_names):
# An argument of type or containing the type 'Any' caused ambiguity.
# We try returning a precise type if we can. If not, we give up and just return 'Any'.
if all_same_types(return_types):
return return_types[0], inferred_types[0]
elif all_same_types([erase_type(typ) for typ in return_types]):
return erase_type(return_types[0]), erase_type(inferred_types[0])
else:
return self.check_call(callee=AnyType(TypeOfAny.special_form),
args=args,
arg_kinds=arg_kinds,
arg_names=arg_names,
context=context,
arg_messages=arg_messages,
callable_name=callable_name,
object_type=object_type)
else:
# Success! No ambiguity; return the first match.
return return_types[0], inferred_types[0]
def overload_erased_call_targets(self,
plausible_targets: List[CallableType],
arg_types: List[Type],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
args: List[Expression],
context: Context) -> List[CallableType]:
"""Returns a list of all targets that match the caller after erasing types.
Assumes all of the given targets have argument counts compatible with the caller.
"""
matches = [] # type: List[CallableType]
for typ in plausible_targets:
if self.erased_signature_similarity(arg_types, arg_kinds, arg_names, args, typ,
context):
matches.append(typ)
return matches
def union_overload_result(self,
plausible_targets: List[CallableType],
args: List[Expression],
arg_types: List[Type],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
callable_name: Optional[str],
object_type: Optional[Type],
context: Context,
arg_messages: Optional[MessageBuilder] = None,
level: int = 0
) -> Optional[List[Tuple[Type, Type]]]:
"""Accepts a list of overload signatures and attempts to match calls by destructuring
the first union.
Return a list of (<return type>, <inferred variant type>) if call succeeds for every
item of the desctructured union. Returns None if there is no match.
"""
# Step 1: If we are already too deep, then stop immediately. Otherwise mypy might
# hang for long time because of a weird overload call. The caller will get
# the exception and generate an appropriate note message, if needed.
if level >= MAX_UNIONS:
raise TooManyUnions
# Step 2: Find position of the first union in arguments. Return the normal inferred
# type if no more unions left.
for idx, typ in enumerate(arg_types):
if self.real_union(typ):
break
else:
# No unions in args, just fall back to normal inference
with self.type_overrides_set(args, arg_types):
res = self.infer_overload_return_type(plausible_targets, args, arg_types,
arg_kinds, arg_names, callable_name,
object_type, context, arg_messages)
if res is not None:
return [res]
return None
# Step 3: Try a direct match before splitting to avoid unnecessary union splits
# and save performance.
with self.type_overrides_set(args, arg_types):
direct = self.infer_overload_return_type(plausible_targets, args, arg_types,
arg_kinds, arg_names, callable_name,
object_type, context, arg_messages)
if direct is not None and not isinstance(get_proper_type(direct[0]),
(UnionType, AnyType)):
# We only return non-unions soon, to avoid greedy match.
return [direct]
# Step 4: Split the first remaining union type in arguments into items and
# try to match each item individually (recursive).
first_union = get_proper_type(arg_types[idx])
assert isinstance(first_union, UnionType)
res_items = []
for item in first_union.relevant_items():
new_arg_types = arg_types.copy()
new_arg_types[idx] = item
sub_result = self.union_overload_result(plausible_targets, args, new_arg_types,
arg_kinds, arg_names, callable_name,
object_type, context, arg_messages,
level + 1)
if sub_result is not None:
res_items.extend(sub_result)
else:
# Some item doesn't match, return soon.
return None
# Step 5: If splitting succeeded, then filter out duplicate items before returning.
seen = set() # type: Set[Tuple[Type, Type]]
result = []
for pair in res_items:
if pair not in seen:
seen.add(pair)
result.append(pair)
return result
def real_union(self, typ: Type) -> bool:
typ = get_proper_type(typ)
return isinstance(typ, UnionType) and len(typ.relevant_items()) > 1
@contextmanager
def type_overrides_set(self, exprs: Sequence[Expression],
overrides: Sequence[Type]) -> Iterator[None]:
"""Set _temporary_ type overrides for given expressions."""
assert len(exprs) == len(overrides)
for expr, typ in zip(exprs, overrides):
self.type_overrides[expr] = typ
try:
yield
finally:
for expr in exprs:
del self.type_overrides[expr]
def combine_function_signatures(self, types: Sequence[Type]) -> Union[AnyType, CallableType]:
"""Accepts a list of function signatures and attempts to combine them together into a
new CallableType consisting of the union of all of the given arguments and return types.
If there is at least one non-callable type, return Any (this can happen if there is
an ambiguity because of Any in arguments).
"""
assert types, "Trying to merge no callables"
types = get_proper_types(types)
if not all(isinstance(c, CallableType) for c in types):
return AnyType(TypeOfAny.special_form)
callables = cast(Sequence[CallableType], types)
if len(callables) == 1:
return callables[0]
# Note: we are assuming here that if a user uses some TypeVar 'T' in
# two different functions, they meant for that TypeVar to mean the
# same thing.
#
# This function will make sure that all instances of that TypeVar 'T'
# refer to the same underlying TypeVarType and TypeVarDef objects to
# simplify the union-ing logic below.
#
# (If the user did *not* mean for 'T' to be consistently bound to the
# same type in their overloads, well, their code is probably too
# confusing and ought to be re-written anyways.)
callables, variables = merge_typevars_in_callables_by_name(callables)
new_args = [[] for _ in range(len(callables[0].arg_types))] # type: List[List[Type]]
new_kinds = list(callables[0].arg_kinds)
new_returns = [] # type: List[Type]
too_complex = False
for target in callables:
# We fall back to Callable[..., Union[<returns>]] if the functions do not have
# the exact same signature. The only exception is if one arg is optional and
# the other is positional: in that case, we continue unioning (and expect a
# positional arg).
# TODO: Enhance the merging logic to handle a wider variety of signatures.
if len(new_kinds) != len(target.arg_kinds):
too_complex = True
break
for i, (new_kind, target_kind) in enumerate(zip(new_kinds, target.arg_kinds)):
if new_kind == target_kind:
continue
elif new_kind in (ARG_POS, ARG_OPT) and target_kind in (ARG_POS, ARG_OPT):
new_kinds[i] = ARG_POS
else:
too_complex = True
break
if too_complex:
break # outer loop
for i, arg in enumerate(target.arg_types):
new_args[i].append(arg)
new_returns.append(target.ret_type)
union_return = make_simplified_union(new_returns)
if too_complex:
any = AnyType(TypeOfAny.special_form)
return callables[0].copy_modified(
arg_types=[any, any],
arg_kinds=[ARG_STAR, ARG_STAR2],
arg_names=[None, None],
ret_type=union_return,
variables=variables,
implicit=True)
final_args = []
for args_list in new_args:
new_type = make_simplified_union(args_list)
final_args.append(new_type)
return callables[0].copy_modified(
arg_types=final_args,
arg_kinds=new_kinds,
ret_type=union_return,
variables=variables,
implicit=True)
def erased_signature_similarity(self,
arg_types: List[Type],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
args: List[Expression],
callee: CallableType,
context: Context) -> bool:
"""Determine whether arguments could match the signature at runtime, after
erasing types."""
formal_to_actual = map_actuals_to_formals(arg_kinds,
arg_names,
callee.arg_kinds,
callee.arg_names,
lambda i: arg_types[i])
if not self.check_argument_count(callee, arg_types, arg_kinds, arg_names,
formal_to_actual, None, None):
# Too few or many arguments -> no match.
return False
def check_arg(caller_type: Type,
original_ccaller_type: Type,
caller_kind: int,
callee_type: Type,
n: int,
m: int,
callee: CallableType,
context: Context,
outer_context: Context,
messages: MessageBuilder) -> None:
if not arg_approximate_similarity(caller_type, callee_type):
# No match -- exit early since none of the remaining work can change
# the result.
raise Finished
try:
self.check_argument_types(arg_types, arg_kinds, args, callee,
formal_to_actual, context=context, check_arg=check_arg)
return True
except Finished:
return False
def apply_generic_arguments(self, callable: CallableType, types: Sequence[Optional[Type]],
context: Context, skip_unsatisfied: bool = False) -> CallableType:
"""Simple wrapper around mypy.applytype.apply_generic_arguments."""
return applytype.apply_generic_arguments(callable, types,
self.msg.incompatible_typevar_value, context,
skip_unsatisfied=skip_unsatisfied)
def check_any_type_call(self, args: List[Expression], callee: Type) -> Tuple[Type, Type]:
self.infer_arg_types_in_empty_context(args)
callee = get_proper_type(callee)
if isinstance(callee, AnyType):
return (AnyType(TypeOfAny.from_another_any, source_any=callee),
AnyType(TypeOfAny.from_another_any, source_any=callee))
else:
return AnyType(TypeOfAny.special_form), AnyType(TypeOfAny.special_form)
def check_union_call(self,
callee: UnionType,
args: List[Expression],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]],
context: Context,
arg_messages: MessageBuilder) -> Tuple[Type, Type]:
self.msg.disable_type_names += 1
results = [self.check_call(subtype, args, arg_kinds, context, arg_names,
arg_messages=arg_messages)
for subtype in callee.relevant_items()]
self.msg.disable_type_names -= 1
return (make_simplified_union([res[0] for res in results]),
callee)
def visit_member_expr(self, e: MemberExpr, is_lvalue: bool = False) -> Type:
"""Visit member expression (of form e.id)."""
self.chk.module_refs.update(extract_refexpr_names(e))
result = self.analyze_ordinary_member_access(e, is_lvalue)
return self.narrow_type_from_binder(e, result)
def analyze_ordinary_member_access(self, e: MemberExpr,
is_lvalue: bool) -> Type:
"""Analyse member expression or member lvalue."""
if e.kind is not None:
# This is a reference to a module attribute.
return self.analyze_ref_expr(e)
else:
# This is a reference to a non-module attribute.
original_type = self.accept(e.expr)
base = e.expr
module_symbol_table = None
if isinstance(base, RefExpr) and isinstance(base.node, MypyFile):
module_symbol_table = base.node.names
member_type = analyze_member_access(
e.name, original_type, e, is_lvalue, False, False,
self.msg, original_type=original_type, chk=self.chk,
in_literal_context=self.is_literal_context(),
module_symbol_table=module_symbol_table)
return member_type
def analyze_external_member_access(self, member: str, base_type: Type,
context: Context) -> Type:
"""Analyse member access that is external, i.e. it cannot
refer to private definitions. Return the result type.
"""
# TODO remove; no private definitions in mypy
return analyze_member_access(member, base_type, context, False, False, False,
self.msg, original_type=base_type, chk=self.chk,
in_literal_context=self.is_literal_context())
def is_literal_context(self) -> bool:
return is_literal_type_like(self.type_context[-1])
def infer_literal_expr_type(self, value: LiteralValue, fallback_name: str) -> Type:
"""Analyzes the given literal expression and determines if we should be
inferring an Instance type, a Literal[...] type, or an Instance that
remembers the original literal. We...
1. ...Infer a normal Instance in most circumstances.
2. ...Infer a Literal[...] if we're in a literal context. For example, if we
were analyzing the "3" in "foo(3)" where "foo" has a signature of
"def foo(Literal[3]) -> None", we'd want to infer that the "3" has a
type of Literal[3] instead of Instance.
3. ...Infer an Instance that remembers the original Literal if we're declaring
a Final variable with an inferred type -- for example, "bar" in "bar: Final = 3"
would be assigned an Instance that remembers it originated from a '3'. See
the comments in Instance's constructor for more details.
"""
typ = self.named_type(fallback_name)
if self.is_literal_context():
return LiteralType(value=value, fallback=typ)
else:
return typ.copy_modified(last_known_value=LiteralType(
value=value,
fallback=typ,
line=typ.line,
column=typ.column,
))
def concat_tuples(self, left: TupleType, right: TupleType) -> TupleType:
"""Concatenate two fixed length tuples."""
return TupleType(items=left.items + right.items,
fallback=self.named_type('builtins.tuple'))
def visit_int_expr(self, e: IntExpr) -> Type:
"""Type check an integer literal (trivial)."""
return self.infer_literal_expr_type(e.value, 'builtins.int')
def visit_str_expr(self, e: StrExpr) -> Type:
"""Type check a string literal (trivial)."""
return self.infer_literal_expr_type(e.value, 'builtins.str')
def visit_bytes_expr(self, e: BytesExpr) -> Type:
"""Type check a bytes literal (trivial)."""
return self.infer_literal_expr_type(e.value, 'builtins.bytes')
def visit_unicode_expr(self, e: UnicodeExpr) -> Type:
"""Type check a unicode literal (trivial)."""
return self.infer_literal_expr_type(e.value, 'builtins.unicode')
def visit_float_expr(self, e: FloatExpr) -> Type:
"""Type check a float literal (trivial)."""
return self.named_type('builtins.float')
def visit_complex_expr(self, e: ComplexExpr) -> Type:
"""Type check a complex literal."""
return self.named_type('builtins.complex')
def visit_ellipsis(self, e: EllipsisExpr) -> Type:
"""Type check '...'."""
if self.chk.options.python_version[0] >= 3:
return self.named_type('builtins.ellipsis')
else:
# '...' is not valid in normal Python 2 code, but it can
# be used in stubs. The parser makes sure that we only
# get this far if we are in a stub, and we can safely
# return 'object' as ellipsis is special cased elsewhere.
# The builtins.ellipsis type does not exist in Python 2.
return self.named_type('builtins.object')
def visit_op_expr(self, e: OpExpr) -> Type:
"""Type check a binary operator expression."""
if e.op == 'and' or e.op == 'or':
return self.check_boolean_op(e, e)
if e.op == '*' and isinstance(e.left, ListExpr):
# Expressions of form [...] * e get special type inference.
return self.check_list_multiply(e)
if e.op == '%':
pyversion = self.chk.options.python_version
if pyversion[0] == 3:
if isinstance(e.left, BytesExpr) and pyversion[1] >= 5:
return self.strfrm_checker.check_str_interpolation(e.left, e.right)
if isinstance(e.left, StrExpr):
return self.strfrm_checker.check_str_interpolation(e.left, e.right)
elif pyversion[0] <= 2:
if isinstance(e.left, (StrExpr, BytesExpr, UnicodeExpr)):
return self.strfrm_checker.check_str_interpolation(e.left, e.right)
left_type = self.accept(e.left)
proper_left_type = get_proper_type(left_type)
if isinstance(proper_left_type, TupleType) and e.op == '+':
left_add_method = proper_left_type.partial_fallback.type.get('__add__')
if left_add_method and left_add_method.fullname == 'builtins.tuple.__add__':
proper_right_type = get_proper_type(self.accept(e.right))
if isinstance(proper_right_type, TupleType):
right_radd_method = proper_right_type.partial_fallback.type.get('__radd__')
if right_radd_method is None:
return self.concat_tuples(proper_left_type, proper_right_type)
if e.op in nodes.op_methods:
method = self.get_operator_method(e.op)
result, method_type = self.check_op(method, left_type, e.right, e,
allow_reverse=True)
e.method_type = method_type
return result
else:
raise RuntimeError('Unknown operator {}'.format(e.op))
def visit_comparison_expr(self, e: ComparisonExpr) -> Type:
"""Type check a comparison expression.
Comparison expressions are type checked consecutive-pair-wise
That is, 'a < b > c == d' is check as 'a < b and b > c and c == d'
"""
result = None # type: Optional[Type]
sub_result = None # type: Optional[Type]
# Check each consecutive operand pair and their operator
for left, right, operator in zip(e.operands, e.operands[1:], e.operators):
left_type = self.accept(left)
method_type = None # type: Optional[mypy.types.Type]
if operator == 'in' or operator == 'not in':
# If the right operand has partial type, look it up without triggering
# a "Need type annotation ..." message, as it would be noise.
right_type = self.find_partial_type_ref_fast_path(right)
if right_type is None:
right_type = self.accept(right) # Validate the right operand
# Keep track of whether we get type check errors (these won't be reported, they
# are just to verify whether something is valid typing wise).
local_errors = self.msg.copy()
local_errors.disable_count = 0
_, method_type = self.check_method_call_by_name(
'__contains__', right_type, [left], [ARG_POS], e, local_errors)
sub_result = self.bool_type()
# Container item type for strict type overlap checks. Note: we need to only
# check for nominal type, because a usual "Unsupported operands for in"
# will be reported for types incompatible with __contains__().
# See testCustomContainsCheckStrictEquality for an example.
cont_type = self.chk.analyze_container_item_type(right_type)
if isinstance(right_type, PartialType):
# We don't really know if this is an error or not, so just shut up.
pass
elif (local_errors.is_errors() and
# is_valid_var_arg is True for any Iterable
self.is_valid_var_arg(right_type)):
_, itertype = self.chk.analyze_iterable_item_type(right)
method_type = CallableType(
[left_type],
[nodes.ARG_POS],
[None],
self.bool_type(),
self.named_type('builtins.function'))
if not is_subtype(left_type, itertype):
self.msg.unsupported_operand_types('in', left_type, right_type, e)
# Only show dangerous overlap if there are no other errors.
elif (not local_errors.is_errors() and cont_type and
self.dangerous_comparison(left_type, cont_type,
original_container=right_type)):
self.msg.dangerous_comparison(left_type, cont_type, 'container', e)
else:
self.msg.add_errors(local_errors)
elif operator in nodes.op_methods:
method = self.get_operator_method(operator)
err_count = self.msg.errors.total_errors()
sub_result, method_type = self.check_op(method, left_type, right, e,
allow_reverse=True)
# Only show dangerous overlap if there are no other errors. See
# testCustomEqCheckStrictEquality for an example.
if self.msg.errors.total_errors() == err_count and operator in ('==', '!='):
right_type = self.accept(right)
# We suppress the error if there is a custom __eq__() method on either
# side. User defined (or even standard library) classes can define this
# to return True for comparisons between non-overlapping types.
if (not custom_special_method(left_type, '__eq__') and
not custom_special_method(right_type, '__eq__')):
# Also flag non-overlapping literals in situations like:
# x: Literal['a', 'b']
# if x == 'c':
# ...
left_type = try_getting_literal(left_type)
right_type = try_getting_literal(right_type)
if self.dangerous_comparison(left_type, right_type):
self.msg.dangerous_comparison(left_type, right_type, 'equality', e)
elif operator == 'is' or operator == 'is not':
right_type = self.accept(right) # validate the right operand
sub_result = self.bool_type()
left_type = try_getting_literal(left_type)
right_type = try_getting_literal(right_type)
if self.dangerous_comparison(left_type, right_type):
self.msg.dangerous_comparison(left_type, right_type, 'identity', e)
method_type = None
else:
raise RuntimeError('Unknown comparison operator {}'.format(operator))
e.method_types.append(method_type)
# Determine type of boolean-and of result and sub_result
if result is None:
result = sub_result
else:
result = join.join_types(result, sub_result)
assert result is not None
return result
def find_partial_type_ref_fast_path(self, expr: Expression) -> Optional[Type]:
"""If expression has a partial generic type, return it without additional checks.
In particular, this does not generate an error about a missing annotation.
Otherwise, return None.
"""
if not isinstance(expr, RefExpr):
return None
if isinstance(expr.node, Var):
result = self.analyze_var_ref(expr.node, expr)
if isinstance(result, PartialType) and result.type is not None:
self.chk.store_type(expr, self.chk.fixup_partial_type(result))
return result
return None
def dangerous_comparison(self, left: Type, right: Type,
original_container: Optional[Type] = None) -> bool:
"""Check for dangerous non-overlapping comparisons like 42 == 'no'.
The original_container is the original container type for 'in' checks
(and None for equality checks).
Rules:
* X and None are overlapping even in strict-optional mode. This is to allow
'assert x is not None' for x defined as 'x = None # type: str' in class body
(otherwise mypy itself would have couple dozen errors because of this).
* Optional[X] and Optional[Y] are non-overlapping if X and Y are
non-overlapping, although technically None is overlap, it is most
likely an error.
* Any overlaps with everything, i.e. always safe.
* Special case: b'abc' in b'cde' is safe.
"""
if not self.chk.options.strict_equality:
return False
left, right = get_proper_types((left, right))
if self.chk.binder.is_unreachable_warning_suppressed():
# We are inside a function that contains type variables with value restrictions in
# its signature. In this case we just suppress all strict-equality checks to avoid
# false positives for code like:
#
# T = TypeVar('T', str, int)
# def f(x: T) -> T:
# if x == 0:
# ...
# return x
#
# TODO: find a way of disabling the check only for types resulted from the expansion.
return False
if isinstance(left, NoneType) or isinstance(right, NoneType):
return False
if isinstance(left, UnionType) and isinstance(right, UnionType):
left = remove_optional(left)
right = remove_optional(right)
left, right = get_proper_types((left, right))
py2 = self.chk.options.python_version < (3, 0)
if (original_container and has_bytes_component(original_container, py2) and
has_bytes_component(left, py2)):
# We need to special case bytes and bytearray, because 97 in b'abc', b'a' in b'abc',
# b'a' in bytearray(b'abc') etc. all return True (and we want to show the error only
# if the check can _never_ be True).
return False
if isinstance(left, Instance) and isinstance(right, Instance):
# Special case some builtin implementations of AbstractSet.
if (left.type.fullname in OVERLAPPING_TYPES_WHITELIST and
right.type.fullname in OVERLAPPING_TYPES_WHITELIST):
abstract_set = self.chk.lookup_typeinfo('typing.AbstractSet')
left = map_instance_to_supertype(left, abstract_set)
right = map_instance_to_supertype(right, abstract_set)
return not is_overlapping_types(left.args[0], right.args[0])
if isinstance(left, LiteralType) and isinstance(right, LiteralType):
if isinstance(left.value, bool) and isinstance(right.value, bool):
# Comparing different booleans is not dangerous.
return False
return not is_overlapping_types(left, right, ignore_promotions=False)
def get_operator_method(self, op: str) -> str:
if op == '/' and self.chk.options.python_version[0] == 2:
# TODO also check for "from __future__ import division"
return '__div__'
else:
return nodes.op_methods[op]
def check_method_call_by_name(self,
method: str,
base_type: Type,
args: List[Expression],
arg_kinds: List[int],
context: Context,
local_errors: Optional[MessageBuilder] = None,
original_type: Optional[Type] = None
) -> Tuple[Type, Type]:
"""Type check a call to a named method on an object.
Return tuple (result type, inferred method type). The 'original_type'
is used for error messages.
"""
local_errors = local_errors or self.msg
original_type = original_type or base_type
# Unions are special-cased to allow plugins to act on each element of the union.
base_type = get_proper_type(base_type)
if isinstance(base_type, UnionType):
return self.check_union_method_call_by_name(method, base_type,
args, arg_kinds,
context, local_errors, original_type)
method_type = analyze_member_access(method, base_type, context, False, False, True,
local_errors, original_type=original_type,
chk=self.chk,
in_literal_context=self.is_literal_context())
return self.check_method_call(
method, base_type, method_type, args, arg_kinds, context, local_errors)
def check_union_method_call_by_name(self,
method: str,
base_type: UnionType,
args: List[Expression],
arg_kinds: List[int],
context: Context,
local_errors: MessageBuilder,
original_type: Optional[Type] = None
) -> Tuple[Type, Type]:
"""Type check a call to a named method on an object with union type.
This essentially checks the call using check_method_call_by_name() for each
union item and unions the result. We do this to allow plugins to act on
individual union items.
"""
res = [] # type: List[Type]
meth_res = [] # type: List[Type]
for typ in base_type.relevant_items():
# Format error messages consistently with
# mypy.checkmember.analyze_union_member_access().
local_errors.disable_type_names += 1
item, meth_item = self.check_method_call_by_name(method, typ, args, arg_kinds,
context, local_errors,
original_type)
local_errors.disable_type_names -= 1
res.append(item)
meth_res.append(meth_item)
return make_simplified_union(res), make_simplified_union(meth_res)
def check_method_call(self,
method_name: str,
base_type: Type,
method_type: Type,
args: List[Expression],
arg_kinds: List[int],
context: Context,
local_errors: Optional[MessageBuilder] = None) -> Tuple[Type, Type]:
"""Type check a call to a method with the given name and type on an object.
Return tuple (result type, inferred method type).
"""
callable_name = self.method_fullname(base_type, method_name)
object_type = base_type if callable_name is not None else None
# Try to refine the method signature using plugin hooks before checking the call.
method_type = self.transform_callee_type(
callable_name, method_type, args, arg_kinds, context, object_type=object_type)
return self.check_call(method_type, args, arg_kinds,
context, arg_messages=local_errors,
callable_name=callable_name, object_type=object_type)
def check_op_reversible(self,
op_name: str,
left_type: Type,
left_expr: Expression,
right_type: Type,
right_expr: Expression,
context: Context,
msg: MessageBuilder) -> Tuple[Type, Type]:
def make_local_errors() -> MessageBuilder:
"""Creates a new MessageBuilder object."""
local_errors = msg.clean_copy()
local_errors.disable_count = 0
return local_errors
def lookup_operator(op_name: str, base_type: Type) -> Optional[Type]:
"""Looks up the given operator and returns the corresponding type,
if it exists."""
# This check is an important performance optimization,
# even though it is mostly a subset of
# analyze_member_access.
# TODO: Find a way to remove this call without performance implications.
if not self.has_member(base_type, op_name):
return None
local_errors = make_local_errors()
member = analyze_member_access(
name=op_name,
typ=base_type,
is_lvalue=False,
is_super=False,
is_operator=True,
original_type=base_type,
context=context,
msg=local_errors,
chk=self.chk,
in_literal_context=self.is_literal_context()
)
if local_errors.is_errors():
return None
else:
return member
def lookup_definer(typ: Instance, attr_name: str) -> Optional[str]:
"""Returns the name of the class that contains the actual definition of attr_name.
So if class A defines foo and class B subclasses A, running
'get_class_defined_in(B, "foo")` would return the full name of A.
However, if B were to override and redefine foo, that method call would
return the full name of B instead.
If the attr name is not present in the given class or its MRO, returns None.
"""
for cls in typ.type.mro:
if cls.names.get(attr_name):
return cls.fullname
return None
left_type = get_proper_type(left_type)
right_type = get_proper_type(right_type)
# If either the LHS or the RHS are Any, we can't really concluding anything
# about the operation since the Any type may or may not define an
# __op__ or __rop__ method. So, we punt and return Any instead.
if isinstance(left_type, AnyType):
any_type = AnyType(TypeOfAny.from_another_any, source_any=left_type)
return any_type, any_type
if isinstance(right_type, AnyType):
any_type = AnyType(TypeOfAny.from_another_any, source_any=right_type)
return any_type, any_type
# STEP 1:
# We start by getting the __op__ and __rop__ methods, if they exist.
rev_op_name = self.get_reverse_op_method(op_name)
left_op = lookup_operator(op_name, left_type)
right_op = lookup_operator(rev_op_name, right_type)
# STEP 2a:
# We figure out in which order Python will call the operator methods. As it
# turns out, it's not as simple as just trying to call __op__ first and
# __rop__ second.
#
# We store the determined order inside the 'variants_raw' variable,
# which records tuples containing the method, base type, and the argument.
bias_right = is_proper_subtype(right_type, left_type)
if op_name in nodes.op_methods_that_shortcut and is_same_type(left_type, right_type):
# When we do "A() + A()", for example, Python will only call the __add__ method,
# never the __radd__ method.
#
# This is the case even if the __add__ method is completely missing and the __radd__
# method is defined.
variants_raw = [
(left_op, left_type, right_expr)
]
elif (is_subtype(right_type, left_type)
and isinstance(left_type, Instance)
and isinstance(right_type, Instance)
and lookup_definer(left_type, op_name) != lookup_definer(right_type, rev_op_name)):
# When we do "A() + B()" where B is a subclass of B, we'll actually try calling
# B's __radd__ method first, but ONLY if B explicitly defines or overrides the
# __radd__ method.
#
# This mechanism lets subclasses "refine" the expected outcome of the operation, even
# if they're located on the RHS.
variants_raw = [
(right_op, right_type, left_expr),
(left_op, left_type, right_expr),
]
else:
# In all other cases, we do the usual thing and call __add__ first and
# __radd__ second when doing "A() + B()".
variants_raw = [
(left_op, left_type, right_expr),
(right_op, right_type, left_expr),
]
# STEP 2b:
# When running Python 2, we might also try calling the __cmp__ method.
is_python_2 = self.chk.options.python_version[0] == 2
if is_python_2 and op_name in nodes.ops_falling_back_to_cmp:
cmp_method = nodes.comparison_fallback_method
left_cmp_op = lookup_operator(cmp_method, left_type)
right_cmp_op = lookup_operator(cmp_method, right_type)
if bias_right:
variants_raw.append((right_cmp_op, right_type, left_expr))
variants_raw.append((left_cmp_op, left_type, right_expr))
else:
variants_raw.append((left_cmp_op, left_type, right_expr))
variants_raw.append((right_cmp_op, right_type, left_expr))
# STEP 3:
# We now filter out all non-existent operators. The 'variants' list contains
# all operator methods that are actually present, in the order that Python
# attempts to invoke them.
variants = [(op, obj, arg) for (op, obj, arg) in variants_raw if op is not None]
# STEP 4:
# We now try invoking each one. If an operation succeeds, end early and return
# the corresponding result. Otherwise, return the result and errors associated
# with the first entry.
errors = []
results = []
for method, obj, arg in variants:
local_errors = make_local_errors()
result = self.check_method_call(
op_name, obj, method, [arg], [ARG_POS], context, local_errors)
if local_errors.is_errors():
errors.append(local_errors)
results.append(result)
else:
return result
# We finish invoking above operators and no early return happens. Therefore,
# we check if either the LHS or the RHS is Instance and fallbacks to Any,
# if so, we also return Any
if ((isinstance(left_type, Instance) and left_type.type.fallback_to_any) or
(isinstance(right_type, Instance) and right_type.type.fallback_to_any)):
any_type = AnyType(TypeOfAny.special_form)
return any_type, any_type
# STEP 4b:
# Sometimes, the variants list is empty. In that case, we fall-back to attempting to
# call the __op__ method (even though it's missing).
if not variants:
local_errors = make_local_errors()
result = self.check_method_call_by_name(
op_name, left_type, [right_expr], [ARG_POS], context, local_errors)
if local_errors.is_errors():
errors.append(local_errors)
results.append(result)
else:
# In theory, we should never enter this case, but it seems
# we sometimes do, when dealing with Type[...]? E.g. see
# check-classes.testTypeTypeComparisonWorks.
#
# This is probably related to the TODO in lookup_operator(...)
# up above.
#
# TODO: Remove this extra case
return result
msg.add_errors(errors[0])
if len(results) == 1:
return results[0]
else:
error_any = AnyType(TypeOfAny.from_error)
result = error_any, error_any
return result
def check_op(self, method: str, base_type: Type,
arg: Expression, context: Context,
allow_reverse: bool = False) -> Tuple[Type, Type]:
"""Type check a binary operation which maps to a method call.
Return tuple (result type, inferred operator method type).
"""
if allow_reverse:
left_variants = [base_type]
base_type = get_proper_type(base_type)
if isinstance(base_type, UnionType):
left_variants = [item for item in
flatten_nested_unions(base_type.relevant_items(),
handle_type_alias_type=True)]
right_type = self.accept(arg)
# Step 1: We first try leaving the right arguments alone and destructure
# just the left ones. (Mypy can sometimes perform some more precise inference
# if we leave the right operands a union -- see testOperatorWithEmptyListAndSum.)
msg = self.msg.clean_copy()
msg.disable_count = 0
all_results = []
all_inferred = []
for left_possible_type in left_variants:
result, inferred = self.check_op_reversible(
op_name=method,
left_type=left_possible_type,
left_expr=TempNode(left_possible_type, context=context),
right_type=right_type,
right_expr=arg,
context=context,
msg=msg)
all_results.append(result)
all_inferred.append(inferred)
if not msg.is_errors():
results_final = make_simplified_union(all_results)
inferred_final = make_simplified_union(all_inferred)
return results_final, inferred_final
# Step 2: If that fails, we try again but also destructure the right argument.
# This is also necessary to make certain edge cases work -- see
# testOperatorDoubleUnionInterwovenUnionAdd, for example.
# Note: We want to pass in the original 'arg' for 'left_expr' and 'right_expr'
# whenever possible so that plugins and similar things can introspect on the original
# node if possible.
#
# We don't do the same for the base expression because it could lead to weird
# type inference errors -- e.g. see 'testOperatorDoubleUnionSum'.
# TODO: Can we use `type_overrides_set()` here?
right_variants = [(right_type, arg)]
right_type = get_proper_type(right_type)
if isinstance(right_type, UnionType):
right_variants = [(item, TempNode(item, context=context))
for item in flatten_nested_unions(right_type.relevant_items(),
handle_type_alias_type=True)]
msg = self.msg.clean_copy()
msg.disable_count = 0
all_results = []
all_inferred = []
for left_possible_type in left_variants:
for right_possible_type, right_expr in right_variants:
result, inferred = self.check_op_reversible(
op_name=method,
left_type=left_possible_type,
left_expr=TempNode(left_possible_type, context=context),
right_type=right_possible_type,
right_expr=right_expr,
context=context,
msg=msg)
all_results.append(result)
all_inferred.append(inferred)
if msg.is_errors():
self.msg.add_errors(msg)
if len(left_variants) >= 2 and len(right_variants) >= 2:
self.msg.warn_both_operands_are_from_unions(context)
elif len(left_variants) >= 2:
self.msg.warn_operand_was_from_union("Left", base_type, context=right_expr)
elif len(right_variants) >= 2:
self.msg.warn_operand_was_from_union("Right", right_type, context=right_expr)
# See the comment in 'check_overload_call' for more details on why
# we call 'combine_function_signature' instead of just unioning the inferred
# callable types.
results_final = make_simplified_union(all_results)
inferred_final = self.combine_function_signatures(all_inferred)
return results_final, inferred_final
else:
return self.check_method_call_by_name(
method=method,
base_type=base_type,
args=[arg],
arg_kinds=[ARG_POS],
context=context,
local_errors=self.msg,
)
def get_reverse_op_method(self, method: str) -> str:
if method == '__div__' and self.chk.options.python_version[0] == 2:
return '__rdiv__'
else:
return nodes.reverse_op_methods[method]
def check_boolean_op(self, e: OpExpr, context: Context) -> Type:
"""Type check a boolean operation ('and' or 'or')."""
# A boolean operation can evaluate to either of the operands.
# We use the current type context to guide the type inference of of
# the left operand. We also use the left operand type to guide the type
# inference of the right operand so that expressions such as
# '[1] or []' are inferred correctly.
ctx = self.type_context[-1]
left_type = self.accept(e.left, ctx)
assert e.op in ('and', 'or') # Checked by visit_op_expr
if e.op == 'and':
right_map, left_map = self.chk.find_isinstance_check(e.left)
restricted_left_type = false_only(left_type)
result_is_left = not left_type.can_be_true
elif e.op == 'or':
left_map, right_map = self.chk.find_isinstance_check(e.left)
restricted_left_type = true_only(left_type)
result_is_left = not left_type.can_be_false
# If right_map is None then we know mypy considers the right branch
# to be unreachable and therefore any errors found in the right branch
# should be suppressed.
#
# Note that we perform these checks *before* we take into account
# the analysis from the semanal phase below. We assume that nodes
# marked as unreachable during semantic analysis were done so intentionally.
# So, we shouldn't report an error.
if self.chk.options.warn_unreachable:
if left_map is None:
self.msg.redundant_left_operand(e.op, e.left)
if right_map is None:
self.msg.redundant_right_operand(e.op, e.right)
if e.right_unreachable:
right_map = None
elif e.right_always:
left_map = None
if right_map is None:
self.msg.disable_errors()
try:
right_type = self.analyze_cond_branch(right_map, e.right, left_type)
finally:
if right_map is None:
self.msg.enable_errors()
if right_map is None:
# The boolean expression is statically known to be the left value
assert left_map is not None # find_isinstance_check guarantees this
return left_type
if left_map is None:
# The boolean expression is statically known to be the right value
assert right_map is not None # find_isinstance_check guarantees this
return right_type
if isinstance(restricted_left_type, UninhabitedType):
# The left operand can never be the result
return right_type
elif result_is_left:
# The left operand is always the result
return left_type
else:
return make_simplified_union([restricted_left_type, right_type])
def check_list_multiply(self, e: OpExpr) -> Type:
"""Type check an expression of form '[...] * e'.
Type inference is special-cased for this common construct.
"""
right_type = self.accept(e.right)
if is_subtype(right_type, self.named_type('builtins.int')):
# Special case: [...] * <int value>. Use the type context of the
# OpExpr, since the multiplication does not affect the type.
left_type = self.accept(e.left, type_context=self.type_context[-1])
else:
left_type = self.accept(e.left)
result, method_type = self.check_op('__mul__', left_type, e.right, e)
e.method_type = method_type
return result
def visit_assignment_expr(self, e: AssignmentExpr) -> Type:
value = self.accept(e.value)
self.chk.check_assignment(e.target, e.value)
self.chk.check_final(e)
self.find_partial_type_ref_fast_path(e.target)
return value
def visit_unary_expr(self, e: UnaryExpr) -> Type:
"""Type check an unary operation ('not', '-', '+' or '~')."""
operand_type = self.accept(e.expr)
op = e.op
if op == 'not':
result = self.bool_type() # type: Type
else:
method = nodes.unary_op_methods[op]
result, method_type = self.check_method_call_by_name(method, operand_type, [], [], e)
e.method_type = method_type
return result
def visit_index_expr(self, e: IndexExpr) -> Type:
"""Type check an index expression (base[index]).
It may also represent type application.
"""
result = self.visit_index_expr_helper(e)
result = get_proper_type(self.narrow_type_from_binder(e, result))
if (self.is_literal_context() and isinstance(result, Instance)
and result.last_known_value is not None):
result = result.last_known_value
return result
def visit_index_expr_helper(self, e: IndexExpr) -> Type:
if e.analyzed:
# It's actually a type application.
return self.accept(e.analyzed)
left_type = self.accept(e.base)
return self.visit_index_with_type(left_type, e)
def visit_index_with_type(self, left_type: Type, e: IndexExpr,
original_type: Optional[ProperType] = None) -> Type:
"""Analyze type of an index expression for a given type of base expression.
The 'original_type' is used for error messages (currently used for union types).
"""
index = e.index
left_type = get_proper_type(left_type)
# Visit the index, just to make sure we have a type for it available
self.accept(index)
if isinstance(left_type, UnionType):
original_type = original_type or left_type
return make_simplified_union([self.visit_index_with_type(typ, e,
original_type)
for typ in left_type.relevant_items()])
elif isinstance(left_type, TupleType) and self.chk.in_checked_function():
# Special case for tuples. They return a more specific type when
# indexed by an integer literal.
if isinstance(index, SliceExpr):
return self.visit_tuple_slice_helper(left_type, index)
ns = self.try_getting_int_literals(index)
if ns is not None:
out = []
for n in ns:
if n < 0:
n += len(left_type.items)
if 0 <= n < len(left_type.items):
out.append(left_type.items[n])
else:
self.chk.fail(message_registry.TUPLE_INDEX_OUT_OF_RANGE, e)
return AnyType(TypeOfAny.from_error)
return make_simplified_union(out)
else:
return self.nonliteral_tuple_index_helper(left_type, index)
elif isinstance(left_type, TypedDictType):
return self.visit_typeddict_index_expr(left_type, e.index)
elif (isinstance(left_type, CallableType)
and left_type.is_type_obj() and left_type.type_object().is_enum):
return self.visit_enum_index_expr(left_type.type_object(), e.index, e)
else:
result, method_type = self.check_method_call_by_name(
'__getitem__', left_type, [e.index], [ARG_POS], e,
original_type=original_type)
e.method_type = method_type
return result
def visit_tuple_slice_helper(self, left_type: TupleType, slic: SliceExpr) -> Type:
begin = [None] # type: Sequence[Optional[int]]
end = [None] # type: Sequence[Optional[int]]
stride = [None] # type: Sequence[Optional[int]]
if slic.begin_index:
begin_raw = self.try_getting_int_literals(slic.begin_index)
if begin_raw is None:
return self.nonliteral_tuple_index_helper(left_type, slic)
begin = begin_raw
if slic.end_index:
end_raw = self.try_getting_int_literals(slic.end_index)
if end_raw is None:
return self.nonliteral_tuple_index_helper(left_type, slic)
end = end_raw
if slic.stride:
stride_raw = self.try_getting_int_literals(slic.stride)
if stride_raw is None:
return self.nonliteral_tuple_index_helper(left_type, slic)
stride = stride_raw
items = [] # type: List[Type]
for b, e, s in itertools.product(begin, end, stride):
items.append(left_type.slice(b, e, s))
return make_simplified_union(items)
def try_getting_int_literals(self, index: Expression) -> Optional[List[int]]:
"""If the given expression or type corresponds to an int literal
or a union of int literals, returns a list of the underlying ints.
Otherwise, returns None.
Specifically, this function is guaranteed to return a list with
one or more ints if one one the following is true:
1. 'expr' is a IntExpr or a UnaryExpr backed by an IntExpr
2. 'typ' is a LiteralType containing an int
3. 'typ' is a UnionType containing only LiteralType of ints
"""
if isinstance(index, IntExpr):
return [index.value]
elif isinstance(index, UnaryExpr):
if index.op == '-':
operand = index.expr
if isinstance(operand, IntExpr):
return [-1 * operand.value]
typ = get_proper_type(self.accept(index))
if isinstance(typ, Instance) and typ.last_known_value is not None:
typ = typ.last_known_value
if isinstance(typ, LiteralType) and isinstance(typ.value, int):
return [typ.value]
if isinstance(typ, UnionType):
out = []
for item in get_proper_types(typ.items):
if isinstance(item, LiteralType) and isinstance(item.value, int):
out.append(item.value)
else:
return None
return out
return None
def nonliteral_tuple_index_helper(self, left_type: TupleType, index: Expression) -> Type:
index_type = self.accept(index)
expected_type = UnionType.make_union([self.named_type('builtins.int'),
self.named_type('builtins.slice')])
if not self.chk.check_subtype(index_type, expected_type, index,
message_registry.INVALID_TUPLE_INDEX_TYPE,
'actual type', 'expected type'):
return AnyType(TypeOfAny.from_error)
else:
union = make_simplified_union(left_type.items)
if isinstance(index, SliceExpr):
return self.chk.named_generic_type('builtins.tuple', [union])
else:
return union
def visit_typeddict_index_expr(self, td_type: TypedDictType, index: Expression) -> Type:
if isinstance(index, (StrExpr, UnicodeExpr)):
key_names = [index.value]
else:
typ = get_proper_type(self.accept(index))
if isinstance(typ, UnionType):
key_types = list(typ.items) # type: List[Type]
else:
key_types = [typ]
key_names = []
for key_type in get_proper_types(key_types):
if isinstance(key_type, Instance) and key_type.last_known_value is not None:
key_type = key_type.last_known_value
if (isinstance(key_type, LiteralType)
and isinstance(key_type.value, str)
and key_type.fallback.type.fullname != 'builtins.bytes'):
key_names.append(key_type.value)
else:
self.msg.typeddict_key_must_be_string_literal(td_type, index)
return AnyType(TypeOfAny.from_error)
value_types = []
for key_name in key_names:
value_type = td_type.items.get(key_name)
if value_type is None:
self.msg.typeddict_key_not_found(td_type, key_name, index)
return AnyType(TypeOfAny.from_error)
else:
value_types.append(value_type)
return make_simplified_union(value_types)
def visit_enum_index_expr(self, enum_type: TypeInfo, index: Expression,
context: Context) -> Type:
string_type = self.named_type('builtins.str') # type: Type
if self.chk.options.python_version[0] < 3:
string_type = UnionType.make_union([string_type,
self.named_type('builtins.unicode')])
self.chk.check_subtype(self.accept(index), string_type, context,
"Enum index should be a string", "actual index type")
return Instance(enum_type, [])
def visit_cast_expr(self, expr: CastExpr) -> Type:
"""Type check a cast expression."""
source_type = self.accept(expr.expr, type_context=AnyType(TypeOfAny.special_form),
allow_none_return=True, always_allow_any=True)
target_type = expr.type
options = self.chk.options
if (options.warn_redundant_casts and not isinstance(get_proper_type(target_type), AnyType)
and is_same_type(source_type, target_type)):
self.msg.redundant_cast(target_type, expr)
if options.disallow_any_unimported and has_any_from_unimported_type(target_type):
self.msg.unimported_type_becomes_any("Target type of cast", target_type, expr)
check_for_explicit_any(target_type, self.chk.options, self.chk.is_typeshed_stub, self.msg,
context=expr)
return target_type
def visit_reveal_expr(self, expr: RevealExpr) -> Type:
"""Type check a reveal_type expression."""
if expr.kind == REVEAL_TYPE:
assert expr.expr is not None
revealed_type = self.accept(expr.expr, type_context=self.type_context[-1])
if not self.chk.current_node_deferred:
self.msg.reveal_type(revealed_type, expr.expr)
if not self.chk.in_checked_function():
self.msg.note("'reveal_type' always outputs 'Any' in unchecked functions",
expr.expr)
return revealed_type
else:
# REVEAL_LOCALS
if not self.chk.current_node_deferred:
# the RevealExpr contains a local_nodes attribute,
# calculated at semantic analysis time. Use it to pull out the
# corresponding subset of variables in self.chk.type_map
names_to_types = {
var_node.name: var_node.type for var_node in expr.local_nodes
} if expr.local_nodes is not None else {}
self.msg.reveal_locals(names_to_types, expr)
return NoneType()
def visit_type_application(self, tapp: TypeApplication) -> Type:
"""Type check a type application (expr[type, ...]).
There are two different options here, depending on whether expr refers
to a type alias or directly to a generic class. In the first case we need
to use a dedicated function typeanal.expand_type_aliases. This
is due to the fact that currently type aliases machinery uses
unbound type variables, while normal generics use bound ones;
see TypeAlias docstring for more details.
"""
if isinstance(tapp.expr, RefExpr) and isinstance(tapp.expr.node, TypeAlias):
# Subscription of a (generic) alias in runtime context, expand the alias.
item = expand_type_alias(tapp.expr.node, tapp.types, self.chk.fail,
tapp.expr.node.no_args, tapp)
item = get_proper_type(item)
if isinstance(item, Instance):
tp = type_object_type(item.type, self.named_type)
return self.apply_type_arguments_to_callable(tp, item.args, tapp)
else:
self.chk.fail(message_registry.ONLY_CLASS_APPLICATION, tapp)
return AnyType(TypeOfAny.from_error)
# Type application of a normal generic class in runtime context.
# This is typically used as `x = G[int]()`.
tp = get_proper_type(self.accept(tapp.expr))
if isinstance(tp, (CallableType, Overloaded)):
if not tp.is_type_obj():
self.chk.fail(message_registry.ONLY_CLASS_APPLICATION, tapp)
return self.apply_type_arguments_to_callable(tp, tapp.types, tapp)
if isinstance(tp, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=tp)
return AnyType(TypeOfAny.special_form)
def visit_type_alias_expr(self, alias: TypeAliasExpr) -> Type:
"""Right hand side of a type alias definition.
It has the same type as if the alias itself was used in a runtime context.
For example, here:
A = reveal_type(List[T])
reveal_type(A)
both `reveal_type` instances will reveal the same type `def (...) -> builtins.list[Any]`.
Note that type variables are implicitly substituted with `Any`.
"""
return self.alias_type_in_runtime_context(alias.node, alias.no_args,
alias, alias_definition=True)
def alias_type_in_runtime_context(self, alias: TypeAlias,
no_args: bool, ctx: Context,
*,
alias_definition: bool = False) -> Type:
"""Get type of a type alias (could be generic) in a runtime expression.
Note that this function can be called only if the alias appears _not_
as a target of type application, which is treated separately in the
visit_type_application method. Some examples where this method is called are
casts and instantiation:
class LongName(Generic[T]): ...
A = LongName[int]
x = A()
y = cast(A, ...)
"""
if isinstance(alias.target, Instance) and alias.target.invalid: # type: ignore
# An invalid alias, error already has been reported
return AnyType(TypeOfAny.from_error)
# If this is a generic alias, we set all variables to `Any`.
# For example:
# A = List[Tuple[T, T]]
# x = A() <- same as List[Tuple[Any, Any]], see PEP 484.
item = get_proper_type(set_any_tvars(alias, ctx.line, ctx.column))
if isinstance(item, Instance):
# Normally we get a callable type (or overloaded) with .is_type_obj() true
# representing the class's constructor
tp = type_object_type(item.type, self.named_type)
if no_args:
return tp
return self.apply_type_arguments_to_callable(tp, item.args, ctx)
elif (isinstance(item, TupleType) and
# Tuple[str, int]() fails at runtime, only named tuples and subclasses work.
tuple_fallback(item).type.fullname != 'builtins.tuple'):
return type_object_type(tuple_fallback(item).type, self.named_type)
elif isinstance(item, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=item)
else:
if alias_definition:
return AnyType(TypeOfAny.special_form)
# This type is invalid in most runtime contexts, give it an 'object' type.
return self.named_type('builtins.object')
def apply_type_arguments_to_callable(self, tp: Type, args: List[Type], ctx: Context) -> Type:
"""Apply type arguments to a generic callable type coming from a type object.
This will first perform type arguments count checks, report the
error as needed, and return the correct kind of Any. As a special
case this returns Any for non-callable types, because if type object type
is not callable, then an error should be already reported.
"""
tp = get_proper_type(tp)
if isinstance(tp, CallableType):
if len(tp.variables) != len(args):
self.msg.incompatible_type_application(len(tp.variables),
len(args), ctx)
return AnyType(TypeOfAny.from_error)
return self.apply_generic_arguments(tp, args, ctx)
if isinstance(tp, Overloaded):
for it in tp.items():
if len(it.variables) != len(args):
self.msg.incompatible_type_application(len(it.variables),
len(args), ctx)
return AnyType(TypeOfAny.from_error)
return Overloaded([self.apply_generic_arguments(it, args, ctx)
for it in tp.items()])
return AnyType(TypeOfAny.special_form)
def visit_list_expr(self, e: ListExpr) -> Type:
"""Type check a list expression [...]."""
return self.check_lst_expr(e.items, 'builtins.list', '<list>', e)
def visit_set_expr(self, e: SetExpr) -> Type:
return self.check_lst_expr(e.items, 'builtins.set', '<set>', e)
def check_lst_expr(self, items: List[Expression], fullname: str,
tag: str, context: Context) -> Type:
# Translate into type checking a generic function call.
# Used for list and set expressions, as well as for tuples
# containing star expressions that don't refer to a
# Tuple. (Note: "lst" stands for list-set-tuple. :-)
tvdef = TypeVarDef('T', 'T', -1, [], self.object_type())
tv = TypeVarType(tvdef)
constructor = CallableType(
[tv],
[nodes.ARG_STAR],
[None],
self.chk.named_generic_type(fullname, [tv]),
self.named_type('builtins.function'),
name=tag,
variables=[tvdef])
out = self.check_call(constructor,
[(i.expr if isinstance(i, StarExpr) else i)
for i in items],
[(nodes.ARG_STAR if isinstance(i, StarExpr) else nodes.ARG_POS)
for i in items],
context)[0]
return remove_instance_last_known_values(out)
def visit_tuple_expr(self, e: TupleExpr) -> Type:
"""Type check a tuple expression."""
# Try to determine type context for type inference.
type_context = get_proper_type(self.type_context[-1])
type_context_items = None
if isinstance(type_context, UnionType):
tuples_in_context = [t for t in get_proper_types(type_context.items)
if (isinstance(t, TupleType) and len(t.items) == len(e.items)) or
is_named_instance(t, 'builtins.tuple')]
if len(tuples_in_context) == 1:
type_context = tuples_in_context[0]
else:
# There are either no relevant tuples in the Union, or there is
# more than one. Either way, we can't decide on a context.
pass
if isinstance(type_context, TupleType):
type_context_items = type_context.items
elif type_context and is_named_instance(type_context, 'builtins.tuple'):
assert isinstance(type_context, Instance)
if type_context.args:
type_context_items = [type_context.args[0]] * len(e.items)
# NOTE: it's possible for the context to have a different
# number of items than e. In that case we use those context
# items that match a position in e, and we'll worry about type
# mismatches later.
# Infer item types. Give up if there's a star expression
# that's not a Tuple.
items = [] # type: List[Type]
j = 0 # Index into type_context_items; irrelevant if type_context_items is none
for i in range(len(e.items)):
item = e.items[i]
if isinstance(item, StarExpr):
# Special handling for star expressions.
# TODO: If there's a context, and item.expr is a
# TupleExpr, flatten it, so we can benefit from the
# context? Counterargument: Why would anyone write
# (1, *(2, 3)) instead of (1, 2, 3) except in a test?
tt = self.accept(item.expr)
tt = get_proper_type(tt)
if isinstance(tt, TupleType):
items.extend(tt.items)
j += len(tt.items)
else:
# A star expression that's not a Tuple.
# Treat the whole thing as a variable-length tuple.
return self.check_lst_expr(e.items, 'builtins.tuple', '<tuple>', e)
else:
if not type_context_items or j >= len(type_context_items):
tt = self.accept(item)
else:
tt = self.accept(item, type_context_items[j])
j += 1
items.append(tt)
# This is a partial fallback item type. A precise type will be calculated on demand.
fallback_item = AnyType(TypeOfAny.special_form)
return TupleType(items, self.chk.named_generic_type('builtins.tuple', [fallback_item]))
def visit_dict_expr(self, e: DictExpr) -> Type:
"""Type check a dict expression.
Translate it into a call to dict(), with provisions for **expr.
"""
# if the dict literal doesn't match TypedDict, check_typeddict_call_with_dict reports
# an error, but returns the TypedDict type that matches the literal it found
# that would cause a second error when that TypedDict type is returned upstream
# to avoid the second error, we always return TypedDict type that was requested
typeddict_context = self.find_typeddict_context(self.type_context[-1], e)
if typeddict_context:
self.check_typeddict_call_with_dict(
callee=typeddict_context,
kwargs=e,
context=e
)
return typeddict_context.copy_modified()
# Collect function arguments, watching out for **expr.
args = [] # type: List[Expression] # Regular "key: value"
stargs = [] # type: List[Expression] # For "**expr"
for key, value in e.items:
if key is None:
stargs.append(value)
else:
tup = TupleExpr([key, value])
if key.line >= 0:
tup.line = key.line
tup.column = key.column
else:
tup.line = value.line
tup.column = value.column
args.append(tup)
# Define type variables (used in constructors below).
ktdef = TypeVarDef('KT', 'KT', -1, [], self.object_type())
vtdef = TypeVarDef('VT', 'VT', -2, [], self.object_type())
kt = TypeVarType(ktdef)
vt = TypeVarType(vtdef)
rv = None
# Call dict(*args), unless it's empty and stargs is not.
if args or not stargs:
# The callable type represents a function like this:
#
# def <unnamed>(*v: Tuple[kt, vt]) -> Dict[kt, vt]: ...
constructor = CallableType(
[TupleType([kt, vt], self.named_type('builtins.tuple'))],
[nodes.ARG_STAR],
[None],
self.chk.named_generic_type('builtins.dict', [kt, vt]),
self.named_type('builtins.function'),
name='<dict>',
variables=[ktdef, vtdef])
rv = self.check_call(constructor, args, [nodes.ARG_POS] * len(args), e)[0]
else:
# dict(...) will be called below.
pass
# Call rv.update(arg) for each arg in **stargs,
# except if rv isn't set yet, then set rv = dict(arg).
if stargs:
for arg in stargs:
if rv is None:
constructor = CallableType(
[self.chk.named_generic_type('typing.Mapping', [kt, vt])],
[nodes.ARG_POS],
[None],
self.chk.named_generic_type('builtins.dict', [kt, vt]),
self.named_type('builtins.function'),
name='<list>',
variables=[ktdef, vtdef])
rv = self.check_call(constructor, [arg], [nodes.ARG_POS], arg)[0]
else:
self.check_method_call_by_name('update', rv, [arg], [nodes.ARG_POS], arg)
assert rv is not None
return rv
def find_typeddict_context(self, context: Optional[Type],
dict_expr: DictExpr) -> Optional[TypedDictType]:
context = get_proper_type(context)
if isinstance(context, TypedDictType):
return context
elif isinstance(context, UnionType):
items = []
for item in context.items:
item_context = self.find_typeddict_context(item, dict_expr)
if (item_context is not None
and self.match_typeddict_call_with_dict(
item_context, dict_expr, dict_expr)):
items.append(item_context)
if len(items) == 1:
# Only one union item is valid TypedDict for the given dict_expr, so use the
# context as it's unambiguous.
return items[0]
if len(items) > 1:
self.msg.typeddict_context_ambiguous(items, dict_expr)
# No TypedDict type in context.
return None
def visit_lambda_expr(self, e: LambdaExpr) -> Type:
"""Type check lambda expression."""
self.chk.check_default_args(e, body_is_trivial=False)
inferred_type, type_override = self.infer_lambda_type_using_context(e)
if not inferred_type:
self.chk.return_types.append(AnyType(TypeOfAny.special_form))
# Type check everything in the body except for the final return
# statement (it can contain tuple unpacking before return).
with self.chk.scope.push_function(e):
for stmt in e.body.body[:-1]:
stmt.accept(self.chk)
# Only type check the return expression, not the return statement.
# This is important as otherwise the following statements would be
# considered unreachable. There's no useful type context.
ret_type = self.accept(e.expr(), allow_none_return=True)
fallback = self.named_type('builtins.function')
self.chk.return_types.pop()
return callable_type(e, fallback, ret_type)
else:
# Type context available.
self.chk.return_types.append(inferred_type.ret_type)
self.chk.check_func_item(e, type_override=type_override)
if e.expr() not in self.chk.type_map:
# TODO: return expression must be accepted before exiting function scope.
self.accept(e.expr(), allow_none_return=True)
ret_type = self.chk.type_map[e.expr()]
self.chk.return_types.pop()
return replace_callable_return_type(inferred_type, ret_type)
def infer_lambda_type_using_context(self, e: LambdaExpr) -> Tuple[Optional[CallableType],
Optional[CallableType]]:
"""Try to infer lambda expression type using context.
Return None if could not infer type.
The second item in the return type is the type_override parameter for check_func_item.
"""
# TODO also accept 'Any' context
ctx = get_proper_type(self.type_context[-1])
if isinstance(ctx, UnionType):
callables = [t for t in get_proper_types(ctx.relevant_items())
if isinstance(t, CallableType)]
if len(callables) == 1:
ctx = callables[0]
if not ctx or not isinstance(ctx, CallableType):
return None, None
# The context may have function type variables in it. We replace them
# since these are the type variables we are ultimately trying to infer;
# they must be considered as indeterminate. We use ErasedType since it
# does not affect type inference results (it is for purposes like this
# only).
callable_ctx = get_proper_type(replace_meta_vars(ctx, ErasedType()))
assert isinstance(callable_ctx, CallableType)
arg_kinds = [arg.kind for arg in e.arguments]
if callable_ctx.is_ellipsis_args:
# Fill in Any arguments to match the arguments of the lambda.
callable_ctx = callable_ctx.copy_modified(
is_ellipsis_args=False,
arg_types=[AnyType(TypeOfAny.special_form)] * len(arg_kinds),
arg_kinds=arg_kinds,
arg_names=[None] * len(arg_kinds)
)
if ARG_STAR in arg_kinds or ARG_STAR2 in arg_kinds:
# TODO treat this case appropriately
return callable_ctx, None
if callable_ctx.arg_kinds != arg_kinds:
# Incompatible context; cannot use it to infer types.
self.chk.fail(message_registry.CANNOT_INFER_LAMBDA_TYPE, e)
return None, None
return callable_ctx, callable_ctx
def visit_super_expr(self, e: SuperExpr) -> Type:
"""Type check a super expression (non-lvalue)."""
# We have an expression like super(T, var).member
# First compute the types of T and var
types = self._super_arg_types(e)
if isinstance(types, tuple):
type_type, instance_type = types
else:
return types
# Now get the MRO
type_info = type_info_from_type(type_type)
if type_info is None:
self.chk.fail(message_registry.UNSUPPORTED_ARG_1_FOR_SUPER, e)
return AnyType(TypeOfAny.from_error)
instance_info = type_info_from_type(instance_type)
if instance_info is None:
self.chk.fail(message_registry.UNSUPPORTED_ARG_2_FOR_SUPER, e)
return AnyType(TypeOfAny.from_error)
mro = instance_info.mro
# The base is the first MRO entry *after* type_info that has a member
# with the right name
try:
index = mro.index(type_info)
except ValueError:
self.chk.fail(message_registry.SUPER_ARG_2_NOT_INSTANCE_OF_ARG_1, e)
return AnyType(TypeOfAny.from_error)
for base in mro[index+1:]:
if e.name in base.names or base == mro[-1]:
if e.info and e.info.fallback_to_any and base == mro[-1]:
# There's an undefined base class, and we're at the end of the
# chain. That's not an error.
return AnyType(TypeOfAny.special_form)
return analyze_member_access(name=e.name,
typ=instance_type,
is_lvalue=False,
is_super=True,
is_operator=False,
original_type=instance_type,
override_info=base,
context=e,
msg=self.msg,
chk=self.chk,
in_literal_context=self.is_literal_context())
assert False, 'unreachable'
def _super_arg_types(self, e: SuperExpr) -> Union[Type, Tuple[Type, Type]]:
"""
Computes the types of the type and instance expressions in super(T, instance), or the
implicit ones for zero-argument super() expressions. Returns a single type for the whole
super expression when possible (for errors, anys), otherwise the pair of computed types.
"""
if not self.chk.in_checked_function():
return AnyType(TypeOfAny.unannotated)
elif len(e.call.args) == 0:
if self.chk.options.python_version[0] == 2:
self.chk.fail(message_registry.TOO_FEW_ARGS_FOR_SUPER, e, code=codes.CALL_ARG)
return AnyType(TypeOfAny.from_error)
elif not e.info:
# This has already been reported by the semantic analyzer.
return AnyType(TypeOfAny.from_error)
elif self.chk.scope.active_class():
self.chk.fail(message_registry.SUPER_OUTSIDE_OF_METHOD_NOT_SUPPORTED, e)
return AnyType(TypeOfAny.from_error)
# Zero-argument super() is like super(<current class>, <self>)
current_type = fill_typevars(e.info)
type_type = TypeType(current_type) # type: ProperType
# Use the type of the self argument, in case it was annotated
method = self.chk.scope.top_function()
assert method is not None
if method.arguments:
instance_type = method.arguments[0].variable.type or current_type # type: Type
else:
self.chk.fail(message_registry.SUPER_ENCLOSING_POSITIONAL_ARGS_REQUIRED, e)
return AnyType(TypeOfAny.from_error)
elif ARG_STAR in e.call.arg_kinds:
self.chk.fail(message_registry.SUPER_VARARGS_NOT_SUPPORTED, e)
return AnyType(TypeOfAny.from_error)
elif set(e.call.arg_kinds) != {ARG_POS}:
self.chk.fail(message_registry.SUPER_POSITIONAL_ARGS_REQUIRED, e)
return AnyType(TypeOfAny.from_error)
elif len(e.call.args) == 1:
self.chk.fail(message_registry.SUPER_WITH_SINGLE_ARG_NOT_SUPPORTED, e)
return AnyType(TypeOfAny.from_error)
elif len(e.call.args) == 2:
type_type = get_proper_type(self.accept(e.call.args[0]))
instance_type = self.accept(e.call.args[1])
else:
self.chk.fail(message_registry.TOO_MANY_ARGS_FOR_SUPER, e)
return AnyType(TypeOfAny.from_error)
# Imprecisely assume that the type is the current class
if isinstance(type_type, AnyType):
if e.info:
type_type = TypeType(fill_typevars(e.info))
else:
return AnyType(TypeOfAny.from_another_any, source_any=type_type)
elif isinstance(type_type, TypeType):
type_item = type_type.item
if isinstance(type_item, AnyType):
if e.info:
type_type = TypeType(fill_typevars(e.info))
else:
return AnyType(TypeOfAny.from_another_any, source_any=type_item)
if (not isinstance(type_type, TypeType)
and not (isinstance(type_type, FunctionLike) and type_type.is_type_obj())):
self.msg.first_argument_for_super_must_be_type(type_type, e)
return AnyType(TypeOfAny.from_error)
# Imprecisely assume that the instance is of the current class
instance_type = get_proper_type(instance_type)
if isinstance(instance_type, AnyType):
if e.info:
instance_type = fill_typevars(e.info)
else:
return AnyType(TypeOfAny.from_another_any, source_any=instance_type)
elif isinstance(instance_type, TypeType):
instance_item = instance_type.item
if isinstance(instance_item, AnyType):
if e.info:
instance_type = TypeType(fill_typevars(e.info))
else:
return AnyType(TypeOfAny.from_another_any, source_any=instance_item)
return type_type, instance_type
def visit_slice_expr(self, e: SliceExpr) -> Type:
expected = make_optional_type(self.named_type('builtins.int'))
for index in [e.begin_index, e.end_index, e.stride]:
if index:
t = self.accept(index)
self.chk.check_subtype(t, expected,
index, message_registry.INVALID_SLICE_INDEX)
return self.named_type('builtins.slice')
def visit_list_comprehension(self, e: ListComprehension) -> Type:
return self.check_generator_or_comprehension(
e.generator, 'builtins.list', '<list-comprehension>')
def visit_set_comprehension(self, e: SetComprehension) -> Type:
return self.check_generator_or_comprehension(
e.generator, 'builtins.set', '<set-comprehension>')
def visit_generator_expr(self, e: GeneratorExpr) -> Type:
# If any of the comprehensions use async for, the expression will return an async generator
# object
if any(e.is_async):
typ = 'typing.AsyncGenerator'
# received type is always None in async generator expressions
additional_args = [NoneType()] # type: List[Type]
else:
typ = 'typing.Generator'
# received type and returned type are None
additional_args = [NoneType(), NoneType()]
return self.check_generator_or_comprehension(e, typ, '<generator>',
additional_args=additional_args)
def check_generator_or_comprehension(self, gen: GeneratorExpr,
type_name: str,
id_for_messages: str,
additional_args: Optional[List[Type]] = None) -> Type:
"""Type check a generator expression or a list comprehension."""
additional_args = additional_args or []
with self.chk.binder.frame_context(can_skip=True, fall_through=0):
self.check_for_comp(gen)
# Infer the type of the list comprehension by using a synthetic generic
# callable type.
tvdef = TypeVarDef('T', 'T', -1, [], self.object_type())
tv_list = [TypeVarType(tvdef)] # type: List[Type]
constructor = CallableType(
tv_list,
[nodes.ARG_POS],
[None],
self.chk.named_generic_type(type_name, tv_list + additional_args),
self.chk.named_type('builtins.function'),
name=id_for_messages,
variables=[tvdef])
return self.check_call(constructor,
[gen.left_expr], [nodes.ARG_POS], gen)[0]
def visit_dictionary_comprehension(self, e: DictionaryComprehension) -> Type:
"""Type check a dictionary comprehension."""
with self.chk.binder.frame_context(can_skip=True, fall_through=0):
self.check_for_comp(e)
# Infer the type of the list comprehension by using a synthetic generic
# callable type.
ktdef = TypeVarDef('KT', 'KT', -1, [], self.object_type())
vtdef = TypeVarDef('VT', 'VT', -2, [], self.object_type())
kt = TypeVarType(ktdef)
vt = TypeVarType(vtdef)
constructor = CallableType(
[kt, vt],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
self.chk.named_generic_type('builtins.dict', [kt, vt]),
self.chk.named_type('builtins.function'),
name='<dictionary-comprehension>',
variables=[ktdef, vtdef])
return self.check_call(constructor,
[e.key, e.value], [nodes.ARG_POS, nodes.ARG_POS], e)[0]
def check_for_comp(self, e: Union[GeneratorExpr, DictionaryComprehension]) -> None:
"""Check the for_comp part of comprehensions. That is the part from 'for':
... for x in y if z
Note: This adds the type information derived from the condlists to the current binder.
"""
for index, sequence, conditions, is_async in zip(e.indices, e.sequences,
e.condlists, e.is_async):
if is_async:
_, sequence_type = self.chk.analyze_async_iterable_item_type(sequence)
else:
_, sequence_type = self.chk.analyze_iterable_item_type(sequence)
self.chk.analyze_index_variables(index, sequence_type, True, e)
for condition in conditions:
self.accept(condition)
# values are only part of the comprehension when all conditions are true
true_map, false_map = self.chk.find_isinstance_check(condition)
if true_map:
for var, type in true_map.items():
self.chk.binder.put(var, type)
if self.chk.options.warn_unreachable:
if true_map is None:
self.msg.redundant_condition_in_comprehension(False, condition)
elif false_map is None:
self.msg.redundant_condition_in_comprehension(True, condition)
def visit_conditional_expr(self, e: ConditionalExpr, allow_none_return: bool = False) -> Type:
self.accept(e.cond)
ctx = self.type_context[-1]
# Gain type information from isinstance if it is there
# but only for the current expression
if_map, else_map = self.chk.find_isinstance_check(e.cond)
if self.chk.options.warn_unreachable:
if if_map is None:
self.msg.redundant_condition_in_if(False, e.cond)
elif else_map is None:
self.msg.redundant_condition_in_if(True, e.cond)
if_type = self.analyze_cond_branch(if_map, e.if_expr, context=ctx,
allow_none_return=allow_none_return)
# Analyze the right branch using full type context and store the type
full_context_else_type = self.analyze_cond_branch(else_map, e.else_expr, context=ctx,
allow_none_return=allow_none_return)
if not mypy.checker.is_valid_inferred_type(if_type):
# Analyze the right branch disregarding the left branch.
else_type = full_context_else_type
# If it would make a difference, re-analyze the left
# branch using the right branch's type as context.
if ctx is None or not is_equivalent(else_type, ctx):
# TODO: If it's possible that the previous analysis of
# the left branch produced errors that are avoided
# using this context, suppress those errors.
if_type = self.analyze_cond_branch(if_map, e.if_expr, context=else_type,
allow_none_return=allow_none_return)
else:
# Analyze the right branch in the context of the left
# branch's type.
else_type = self.analyze_cond_branch(else_map, e.else_expr, context=if_type,
allow_none_return=allow_none_return)
# Only create a union type if the type context is a union, to be mostly
# compatible with older mypy versions where we always did a join.
#
# TODO: Always create a union or at least in more cases?
if isinstance(get_proper_type(self.type_context[-1]), UnionType):
res = make_simplified_union([if_type, full_context_else_type])
else:
res = join.join_types(if_type, else_type)
return res
def analyze_cond_branch(self, map: Optional[Dict[Expression, Type]],
node: Expression, context: Optional[Type],
allow_none_return: bool = False) -> Type:
with self.chk.binder.frame_context(can_skip=True, fall_through=0):
if map is None:
# We still need to type check node, in case we want to
# process it for isinstance checks later
self.accept(node, type_context=context, allow_none_return=allow_none_return)
return UninhabitedType()
self.chk.push_type_map(map)
return self.accept(node, type_context=context, allow_none_return=allow_none_return)
def visit_backquote_expr(self, e: BackquoteExpr) -> Type:
self.accept(e.expr)
return self.named_type('builtins.str')
#
# Helpers
#
def accept(self,
node: Expression,
type_context: Optional[Type] = None,
allow_none_return: bool = False,
always_allow_any: bool = False,
) -> Type:
"""Type check a node in the given type context. If allow_none_return
is True and this expression is a call, allow it to return None. This
applies only to this expression and not any subexpressions.
"""
if node in self.type_overrides:
return self.type_overrides[node]
self.type_context.append(type_context)
try:
if allow_none_return and isinstance(node, CallExpr):
typ = self.visit_call_expr(node, allow_none_return=True)
elif allow_none_return and isinstance(node, YieldFromExpr):
typ = self.visit_yield_from_expr(node, allow_none_return=True)
elif allow_none_return and isinstance(node, ConditionalExpr):
typ = self.visit_conditional_expr(node, allow_none_return=True)
else:
typ = node.accept(self)
except Exception as err:
report_internal_error(err, self.chk.errors.file,
node.line, self.chk.errors, self.chk.options)
self.type_context.pop()
assert typ is not None
self.chk.store_type(node, typ)
if (self.chk.options.disallow_any_expr and
not always_allow_any and
not self.chk.is_stub and
self.chk.in_checked_function() and
has_any_type(typ) and not self.chk.current_node_deferred):
self.msg.disallowed_any_type(typ, node)
if not self.chk.in_checked_function() or self.chk.current_node_deferred:
return AnyType(TypeOfAny.unannotated)
else:
return typ
def named_type(self, name: str) -> Instance:
"""Return an instance type with type given by the name and no type
arguments. Alias for TypeChecker.named_type.
"""
return self.chk.named_type(name)
def is_valid_var_arg(self, typ: Type) -> bool:
"""Is a type valid as a *args argument?"""
typ = get_proper_type(typ)
return (isinstance(typ, TupleType) or
is_subtype(typ, self.chk.named_generic_type('typing.Iterable',
[AnyType(TypeOfAny.special_form)])) or
isinstance(typ, AnyType))
def is_valid_keyword_var_arg(self, typ: Type) -> bool:
"""Is a type valid as a **kwargs argument?"""
if self.chk.options.python_version[0] >= 3:
return is_subtype(typ, self.chk.named_generic_type(
'typing.Mapping', [self.named_type('builtins.str'),
AnyType(TypeOfAny.special_form)]))
else:
return (
is_subtype(typ, self.chk.named_generic_type(
'typing.Mapping',
[self.named_type('builtins.str'),
AnyType(TypeOfAny.special_form)]))
or
is_subtype(typ, self.chk.named_generic_type(
'typing.Mapping',
[self.named_type('builtins.unicode'),
AnyType(TypeOfAny.special_form)])))
def has_member(self, typ: Type, member: str) -> bool:
"""Does type have member with the given name?"""
# TODO: refactor this to use checkmember.analyze_member_access, otherwise
# these two should be carefully kept in sync.
# This is much faster than analyze_member_access, though, and so using
# it first as a filter is important for performance.
typ = get_proper_type(typ)
if isinstance(typ, TypeVarType):
typ = get_proper_type(typ.upper_bound)
if isinstance(typ, TupleType):
typ = tuple_fallback(typ)
if isinstance(typ, LiteralType):
typ = typ.fallback
if isinstance(typ, Instance):
return typ.type.has_readable_member(member)
if isinstance(typ, CallableType) and typ.is_type_obj():
return typ.fallback.type.has_readable_member(member)
elif isinstance(typ, AnyType):
return True
elif isinstance(typ, UnionType):
result = all(self.has_member(x, member) for x in typ.relevant_items())
return result
elif isinstance(typ, TypeType):
# Type[Union[X, ...]] is always normalized to Union[Type[X], ...],
# so we don't need to care about unions here.
item = typ.item
if isinstance(item, TypeVarType):
item = get_proper_type(item.upper_bound)
if isinstance(item, TupleType):
item = tuple_fallback(item)
if isinstance(item, Instance) and item.type.metaclass_type is not None:
return self.has_member(item.type.metaclass_type, member)
if isinstance(item, AnyType):
return True
return False
else:
return False
def not_ready_callback(self, name: str, context: Context) -> None:
"""Called when we can't infer the type of a variable because it's not ready yet.
Either defer type checking of the enclosing function to the next
pass or report an error.
"""
self.chk.handle_cannot_determine_type(name, context)
def visit_yield_expr(self, e: YieldExpr) -> Type:
return_type = self.chk.return_types[-1]
expected_item_type = self.chk.get_generator_yield_type(return_type, False)
if e.expr is None:
if (not isinstance(get_proper_type(expected_item_type), (NoneType, AnyType))
and self.chk.in_checked_function()):
self.chk.fail(message_registry.YIELD_VALUE_EXPECTED, e)
else:
actual_item_type = self.accept(e.expr, expected_item_type)
self.chk.check_subtype(actual_item_type, expected_item_type, e,
message_registry.INCOMPATIBLE_TYPES_IN_YIELD,
'actual type', 'expected type')
return self.chk.get_generator_receive_type(return_type, False)
def visit_await_expr(self, e: AwaitExpr) -> Type:
expected_type = self.type_context[-1]
if expected_type is not None:
expected_type = self.chk.named_generic_type('typing.Awaitable', [expected_type])
actual_type = get_proper_type(self.accept(e.expr, expected_type))
if isinstance(actual_type, AnyType):
return AnyType(TypeOfAny.from_another_any, source_any=actual_type)
return self.check_awaitable_expr(actual_type, e,
message_registry.INCOMPATIBLE_TYPES_IN_AWAIT)
def check_awaitable_expr(self, t: Type, ctx: Context, msg: str) -> Type:
"""Check the argument to `await` and extract the type of value.
Also used by `async for` and `async with`.
"""
if not self.chk.check_subtype(t, self.named_type('typing.Awaitable'), ctx,
msg, 'actual type', 'expected type'):
return AnyType(TypeOfAny.special_form)
else:
generator = self.check_method_call_by_name('__await__', t, [], [], ctx)[0]
return self.chk.get_generator_return_type(generator, False)
def visit_yield_from_expr(self, e: YieldFromExpr, allow_none_return: bool = False) -> Type:
# NOTE: Whether `yield from` accepts an `async def` decorated
# with `@types.coroutine` (or `@asyncio.coroutine`) depends on
# whether the generator containing the `yield from` is itself
# thus decorated. But it accepts a generator regardless of
# how it's decorated.
return_type = self.chk.return_types[-1]
# TODO: What should the context for the sub-expression be?
# If the containing function has type Generator[X, Y, ...],
# the context should be Generator[X, Y, T], where T is the
# context of the 'yield from' itself (but it isn't known).
subexpr_type = get_proper_type(self.accept(e.expr))
# Check that the expr is an instance of Iterable and get the type of the iterator produced
# by __iter__.
if isinstance(subexpr_type, AnyType):
iter_type = AnyType(TypeOfAny.from_another_any, source_any=subexpr_type) # type: Type
elif self.chk.type_is_iterable(subexpr_type):
if is_async_def(subexpr_type) and not has_coroutine_decorator(return_type):
self.chk.msg.yield_from_invalid_operand_type(subexpr_type, e)
any_type = AnyType(TypeOfAny.special_form)
generic_generator_type = self.chk.named_generic_type('typing.Generator',
[any_type, any_type, any_type])
iter_type, _ = self.check_method_call_by_name(
'__iter__', subexpr_type, [], [], context=generic_generator_type)
else:
if not (is_async_def(subexpr_type) and has_coroutine_decorator(return_type)):
self.chk.msg.yield_from_invalid_operand_type(subexpr_type, e)
iter_type = AnyType(TypeOfAny.from_error)
else:
iter_type = self.check_awaitable_expr(
subexpr_type, e, message_registry.INCOMPATIBLE_TYPES_IN_YIELD_FROM)
# Check that the iterator's item type matches the type yielded by the Generator function
# containing this `yield from` expression.
expected_item_type = self.chk.get_generator_yield_type(return_type, False)
actual_item_type = self.chk.get_generator_yield_type(iter_type, False)
self.chk.check_subtype(actual_item_type, expected_item_type, e,
message_registry.INCOMPATIBLE_TYPES_IN_YIELD_FROM,
'actual type', 'expected type')
# Determine the type of the entire yield from expression.
iter_type = get_proper_type(iter_type)
if (isinstance(iter_type, Instance) and
iter_type.type.fullname == 'typing.Generator'):
expr_type = self.chk.get_generator_return_type(iter_type, False)
else:
# Non-Generators don't return anything from `yield from` expressions.
# However special-case Any (which might be produced by an error).
actual_item_type = get_proper_type(actual_item_type)
if isinstance(actual_item_type, AnyType):
expr_type = AnyType(TypeOfAny.from_another_any, source_any=actual_item_type)
else:
# Treat `Iterator[X]` as a shorthand for `Generator[X, None, Any]`.
expr_type = NoneType()
if not allow_none_return and isinstance(get_proper_type(expr_type), NoneType):
self.chk.msg.does_not_return_value(None, e)
return expr_type
def visit_temp_node(self, e: TempNode) -> Type:
return e.type
def visit_type_var_expr(self, e: TypeVarExpr) -> Type:
return AnyType(TypeOfAny.special_form)
def visit_newtype_expr(self, e: NewTypeExpr) -> Type:
return AnyType(TypeOfAny.special_form)
def visit_namedtuple_expr(self, e: NamedTupleExpr) -> Type:
tuple_type = e.info.tuple_type
if tuple_type:
if (self.chk.options.disallow_any_unimported and
has_any_from_unimported_type(tuple_type)):
self.msg.unimported_type_becomes_any("NamedTuple type", tuple_type, e)
check_for_explicit_any(tuple_type, self.chk.options, self.chk.is_typeshed_stub,
self.msg, context=e)
return AnyType(TypeOfAny.special_form)
def visit_enum_call_expr(self, e: EnumCallExpr) -> Type:
for name, value in zip(e.items, e.values):
if value is not None:
typ = self.accept(value)
if not isinstance(get_proper_type(typ), AnyType):
var = e.info.names[name].node
if isinstance(var, Var):
# Inline TypeChecker.set_inferred_type(),
# without the lvalue. (This doesn't really do
# much, since the value attribute is defined
# to have type Any in the typeshed stub.)
var.type = typ
var.is_inferred = True
return AnyType(TypeOfAny.special_form)
def visit_typeddict_expr(self, e: TypedDictExpr) -> Type:
return AnyType(TypeOfAny.special_form)
def visit__promote_expr(self, e: PromoteExpr) -> Type:
return e.type
def visit_star_expr(self, e: StarExpr) -> StarType:
return StarType(self.accept(e.expr))
def object_type(self) -> Instance:
"""Return instance type 'object'."""
return self.named_type('builtins.object')
def bool_type(self) -> Instance:
"""Return instance type 'bool'."""
return self.named_type('builtins.bool')
@overload
def narrow_type_from_binder(self, expr: Expression, known_type: Type) -> Type: ...
@overload
def narrow_type_from_binder(self, expr: Expression, known_type: Type,
skip_non_overlapping: bool) -> Optional[Type]: ...
def narrow_type_from_binder(self, expr: Expression, known_type: Type,
skip_non_overlapping: bool = False) -> Optional[Type]:
"""Narrow down a known type of expression using information in conditional type binder.
If 'skip_non_overlapping' is True, return None if the type and restriction are
non-overlapping.
"""
if literal(expr) >= LITERAL_TYPE:
restriction = self.chk.binder.get(expr)
# If the current node is deferred, some variables may get Any types that they
# otherwise wouldn't have. We don't want to narrow down these since it may
# produce invalid inferred Optional[Any] types, at least.
if restriction and not (isinstance(get_proper_type(known_type), AnyType)
and self.chk.current_node_deferred):
# Note: this call should match the one in narrow_declared_type().
if (skip_non_overlapping and
not is_overlapping_types(known_type, restriction,
prohibit_none_typevar_overlap=True)):
return None
return narrow_declared_type(known_type, restriction)
return known_type
def has_any_type(t: Type) -> bool:
"""Whether t contains an Any type"""
return t.accept(HasAnyType())
class HasAnyType(types.TypeQuery[bool]):
def __init__(self) -> None:
super().__init__(any)
def visit_any(self, t: AnyType) -> bool:
return t.type_of_any != TypeOfAny.special_form # special forms are not real Any types
def has_coroutine_decorator(t: Type) -> bool:
"""Whether t came from a function decorated with `@coroutine`."""
t = get_proper_type(t)
return isinstance(t, Instance) and t.type.fullname == 'typing.AwaitableGenerator'
def is_async_def(t: Type) -> bool:
"""Whether t came from a function defined using `async def`."""
# In check_func_def(), when we see a function decorated with
# `@typing.coroutine` or `@async.coroutine`, we change the
# return type to typing.AwaitableGenerator[...], so that its
# type is compatible with either Generator or Awaitable.
# But for the check here we need to know whether the original
# function (before decoration) was an `async def`. The
# AwaitableGenerator type conveniently preserves the original
# type as its 4th parameter (3rd when using 0-origin indexing
# :-), so that we can recover that information here.
# (We really need to see whether the original, undecorated
# function was an `async def`, which is orthogonal to its
# decorations.)
t = get_proper_type(t)
if (isinstance(t, Instance)
and t.type.fullname == 'typing.AwaitableGenerator'
and len(t.args) >= 4):
t = get_proper_type(t.args[3])
return isinstance(t, Instance) and t.type.fullname == 'typing.Coroutine'
def is_non_empty_tuple(t: Type) -> bool:
t = get_proper_type(t)
return isinstance(t, TupleType) and bool(t.items)
def is_duplicate_mapping(mapping: List[int], actual_kinds: List[int]) -> bool:
# Multiple actuals can map to the same formal only if they both come from
# varargs (*args and **kwargs); in this case at runtime it is possible that
# there are no duplicates. We need to allow this, as the convention
# f(..., *args, **kwargs) is common enough.
return len(mapping) > 1 and not (
len(mapping) == 2 and
actual_kinds[mapping[0]] == nodes.ARG_STAR and
actual_kinds[mapping[1]] == nodes.ARG_STAR2)
def replace_callable_return_type(c: CallableType, new_ret_type: Type) -> CallableType:
"""Return a copy of a callable type with a different return type."""
return c.copy_modified(ret_type=new_ret_type)
class ArgInferSecondPassQuery(types.TypeQuery[bool]):
"""Query whether an argument type should be inferred in the second pass.
The result is True if the type has a type variable in a callable return
type anywhere. For example, the result for Callable[[], T] is True if t is
a type variable.
"""
def __init__(self) -> None:
super().__init__(any)
def visit_callable_type(self, t: CallableType) -> bool:
return self.query_types(t.arg_types) or t.accept(HasTypeVarQuery())
class HasTypeVarQuery(types.TypeQuery[bool]):
"""Visitor for querying whether a type has a type variable component."""
def __init__(self) -> None:
super().__init__(any)
def visit_type_var(self, t: TypeVarType) -> bool:
return True
def has_erased_component(t: Optional[Type]) -> bool:
return t is not None and t.accept(HasErasedComponentsQuery())
class HasErasedComponentsQuery(types.TypeQuery[bool]):
"""Visitor for querying whether a type has an erased component."""
def __init__(self) -> None:
super().__init__(any)
def visit_erased_type(self, t: ErasedType) -> bool:
return True
def has_uninhabited_component(t: Optional[Type]) -> bool:
return t is not None and t.accept(HasUninhabitedComponentsQuery())
class HasUninhabitedComponentsQuery(types.TypeQuery[bool]):
"""Visitor for querying whether a type has an UninhabitedType component."""
def __init__(self) -> None:
super().__init__(any)
def visit_uninhabited_type(self, t: UninhabitedType) -> bool:
return True
def arg_approximate_similarity(actual: Type, formal: Type) -> bool:
"""Return if caller argument (actual) is roughly compatible with signature arg (formal).
This function is deliberately loose and will report two types are similar
as long as their "shapes" are plausibly the same.
This is useful when we're doing error reporting: for example, if we're trying
to select an overload alternative and there's no exact match, we can use
this function to help us identify which alternative the user might have
*meant* to match.
"""
actual = get_proper_type(actual)
formal = get_proper_type(formal)
# Erase typevars: we'll consider them all to have the same "shape".
if isinstance(actual, TypeVarType):
actual = erase_to_union_or_bound(actual)
if isinstance(formal, TypeVarType):
formal = erase_to_union_or_bound(formal)
# Callable or Type[...]-ish types
def is_typetype_like(typ: ProperType) -> bool:
return (isinstance(typ, TypeType)
or (isinstance(typ, FunctionLike) and typ.is_type_obj())
or (isinstance(typ, Instance) and typ.type.fullname == "builtins.type"))
if isinstance(formal, CallableType):
if isinstance(actual, (CallableType, Overloaded, TypeType)):
return True
if is_typetype_like(actual) and is_typetype_like(formal):
return True
# Unions
if isinstance(actual, UnionType):
return any(arg_approximate_similarity(item, formal) for item in actual.relevant_items())
if isinstance(formal, UnionType):
return any(arg_approximate_similarity(actual, item) for item in formal.relevant_items())
# TypedDicts
if isinstance(actual, TypedDictType):
if isinstance(formal, TypedDictType):
return True
return arg_approximate_similarity(actual.fallback, formal)
# Instances
# For instances, we mostly defer to the existing is_subtype check.
if isinstance(formal, Instance):
if isinstance(actual, CallableType):
actual = actual.fallback
if isinstance(actual, Overloaded):
actual = actual.items()[0].fallback
if isinstance(actual, TupleType):
actual = tuple_fallback(actual)
if isinstance(actual, Instance) and formal.type in actual.type.mro:
# Try performing a quick check as an optimization
return True
# Fall back to a standard subtype check for the remaining kinds of type.
return is_subtype(erasetype.erase_type(actual), erasetype.erase_type(formal))
def any_causes_overload_ambiguity(items: List[CallableType],
return_types: List[Type],
arg_types: List[Type],
arg_kinds: List[int],
arg_names: Optional[Sequence[Optional[str]]]) -> bool:
"""May an argument containing 'Any' cause ambiguous result type on call to overloaded function?
Note that this sometimes returns True even if there is no ambiguity, since a correct
implementation would be complex (and the call would be imprecisely typed due to Any
types anyway).
Args:
items: Overload items matching the actual arguments
arg_types: Actual argument types
arg_kinds: Actual argument kinds
arg_names: Actual argument names
"""
if all_same_types(return_types):
return False
actual_to_formal = [
map_formals_to_actuals(
arg_kinds, arg_names, item.arg_kinds, item.arg_names, lambda i: arg_types[i])
for item in items
]
for arg_idx, arg_type in enumerate(arg_types):
if has_any_type(arg_type):
matching_formals_unfiltered = [(item_idx, lookup[arg_idx])
for item_idx, lookup in enumerate(actual_to_formal)
if lookup[arg_idx]]
matching_returns = []
matching_formals = []
for item_idx, formals in matching_formals_unfiltered:
matched_callable = items[item_idx]
matching_returns.append(matched_callable.ret_type)
# Note: if an actual maps to multiple formals of differing types within
# a single callable, then we know at least one of those formals must be
# a different type then the formal(s) in some other callable.
# So it's safe to just append everything to the same list.
for formal in formals:
matching_formals.append(matched_callable.arg_types[formal])
if not all_same_types(matching_formals) and not all_same_types(matching_returns):
# Any maps to multiple different types, and the return types of these items differ.
return True
return False
def all_same_types(types: List[Type]) -> bool:
if len(types) == 0:
return True
return all(is_same_type(t, types[0]) for t in types[1:])
def merge_typevars_in_callables_by_name(
callables: Sequence[CallableType]) -> Tuple[List[CallableType], List[TypeVarDef]]:
"""Takes all the typevars present in the callables and 'combines' the ones with the same name.
For example, suppose we have two callables with signatures "f(x: T, y: S) -> T" and
"f(x: List[Tuple[T, S]]) -> Tuple[T, S]". Both callables use typevars named "T" and
"S", but we treat them as distinct, unrelated typevars. (E.g. they could both have
distinct ids.)
If we pass in both callables into this function, it returns a a list containing two
new callables that are identical in signature, but use the same underlying TypeVarDef
and TypeVarType objects for T and S.
This is useful if we want to take the output lists and "merge" them into one callable
in some way -- for example, when unioning together overloads.
Returns both the new list of callables and a list of all distinct TypeVarDef objects used.
"""
output = [] # type: List[CallableType]
unique_typevars = {} # type: Dict[str, TypeVarType]
variables = [] # type: List[TypeVarDef]
for target in callables:
if target.is_generic():
target = freshen_function_type_vars(target)
rename = {} # Dict[TypeVarId, TypeVar]
for tvdef in target.variables:
name = tvdef.fullname
if name not in unique_typevars:
unique_typevars[name] = TypeVarType(tvdef)
variables.append(tvdef)
rename[tvdef.id] = unique_typevars[name]
target = cast(CallableType, expand_type(target, rename))
output.append(target)
return output, variables
def try_getting_literal(typ: Type) -> ProperType:
"""If possible, get a more precise literal type for a given type."""
typ = get_proper_type(typ)
if isinstance(typ, Instance) and typ.last_known_value is not None:
return typ.last_known_value
return typ
def is_expr_literal_type(node: Expression) -> bool:
"""Returns 'true' if the given node is a Literal"""
valid = ('typing.Literal', 'typing_extensions.Literal')
if isinstance(node, IndexExpr):
base = node.base
return isinstance(base, RefExpr) and base.fullname in valid
if isinstance(node, NameExpr):
underlying = node.node
return isinstance(underlying, TypeAlias) and isinstance(get_proper_type(underlying.target),
LiteralType)
return False
def has_bytes_component(typ: Type, py2: bool = False) -> bool:
"""Is this one of builtin byte types, or a union that contains it?"""
typ = get_proper_type(typ)
if py2:
byte_types = {'builtins.str', 'builtins.bytearray'}
else:
byte_types = {'builtins.bytes', 'builtins.bytearray'}
if isinstance(typ, UnionType):
return any(has_bytes_component(t) for t in typ.items)
if isinstance(typ, Instance) and typ.type.fullname in byte_types:
return True
return False
def type_info_from_type(typ: Type) -> Optional[TypeInfo]:
"""Gets the TypeInfo for a type, indirecting through things like type variables and tuples."""
typ = get_proper_type(typ)
if isinstance(typ, FunctionLike) and typ.is_type_obj():
return typ.type_object()
if isinstance(typ, TypeType):
typ = typ.item
if isinstance(typ, TypeVarType):
typ = get_proper_type(typ.upper_bound)
if isinstance(typ, TupleType):
typ = tuple_fallback(typ)
if isinstance(typ, Instance):
return typ.type
# A complicated type. Too tricky, give up.
# TODO: Do something more clever here.
return None
def is_operator_method(fullname: Optional[str]) -> bool:
if fullname is None:
return False
short_name = fullname.split('.')[-1]
return (
short_name in nodes.op_methods.values() or
short_name in nodes.reverse_op_methods.values() or
short_name in nodes.unary_op_methods.values())
def get_partial_instance_type(t: Optional[Type]) -> Optional[PartialType]:
if t is None or not isinstance(t, PartialType) or t.type is None:
return None
return t
| 49.036555
| 99
| 0.591577
|
4f3c10a39331f5b35babea089ba49dcc80c627a1
| 22,910
|
py
|
Python
|
ML_AI/PyTorch/Enet_PyTorch.py
|
KeerthanaPravallika/OpenOctober
|
e93c120c90ce6c298b7052a2f7759560a2a2761c
|
[
"Apache-2.0"
] | 32
|
2020-10-17T09:58:41.000Z
|
2021-10-13T04:43:35.000Z
|
ML_AI/PyTorch/Enet_PyTorch.py
|
KeerthanaPravallika/OpenOctober
|
e93c120c90ce6c298b7052a2f7759560a2a2761c
|
[
"Apache-2.0"
] | 380
|
2020-10-18T15:35:49.000Z
|
2021-12-25T05:03:50.000Z
|
ML_AI/PyTorch/Enet_PyTorch.py
|
KeerthanaPravallika/OpenOctober
|
e93c120c90ce6c298b7052a2f7759560a2a2761c
|
[
"Apache-2.0"
] | 68
|
2020-10-17T17:29:54.000Z
|
2021-10-13T04:43:35.000Z
|
import torch.nn as nn
import torch
class InitialBlock(nn.Module):
"""The initial block is composed of two branches:
1. a main branch which performs a regular convolution with stride 2;
2. an extension branch which performs max-pooling.
Doing both operations in parallel and concatenating their results
allows for efficient downsampling and expansion. The main branch
outputs 13 feature maps while the extension branch outputs 3, for a
total of 16 feature maps after concatenation.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number output channels.
- kernel_size (int, optional): the kernel size of the filters used in
the convolution layer. Default: 3.
- padding (int, optional): zero-padding added to both sides of the
input. Default: 0.
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
bias=False,
relu=True):
super().__init__()
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - As stated above the number of output channels for this
# branch is the total minus 3, since the remaining channels come from
# the extension branch
self.main_branch = nn.Conv2d(
in_channels,
out_channels - 3,
kernel_size=3,
stride=2,
padding=1,
bias=bias)
# Extension branch
self.ext_branch = nn.MaxPool2d(3, stride=2, padding=1)
# Initialize batch normalization to be used after concatenation
self.batch_norm = nn.BatchNorm2d(out_channels)
# PReLU layer to apply after concatenating the branches
self.out_activation = activation()
def forward(self, x):
main = self.main_branch(x)
ext = self.ext_branch(x)
# Concatenate branches
out = torch.cat((main, ext), 1)
# Apply batch normalization
out = self.batch_norm(out)
return self.out_activation(out)
class RegularBottleneck(nn.Module):
"""Regular bottlenecks are the main building block of ENet.
Main branch:
1. Shortcut connection.
Extension branch:
1. 1x1 convolution which decreases the number of channels by
``internal_ratio``, also called a projection;
2. regular, dilated or asymmetric convolution;
3. 1x1 convolution which increases the number of channels back to
``channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- channels (int): the number of input and output channels.
- internal_ratio (int, optional): a scale factor applied to
``channels`` used to compute the number of
channels after the projection. eg. given ``channels`` equal to 128 and
internal_ratio equal to 2 the number of channels after the projection
is 64. Default: 4.
- kernel_size (int, optional): the kernel size of the filters used in
the convolution layer described above in item 2 of the extension
branch. Default: 3.
- padding (int, optional): zero-padding added to both sides of the
input. Default: 0.
- dilation (int, optional): spacing between kernel elements for the
convolution described in item 2 of the extension branch. Default: 1.
asymmetric (bool, optional): flags if the convolution described in
item 2 of the extension branch is asymmetric or not. Default: False.
- dropout_prob (float, optional): probability of an element to be
zeroed. Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
channels,
internal_ratio=4,
kernel_size=3,
padding=0,
dilation=1,
asymmetric=False,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}."
.format(channels, internal_ratio))
internal_channels = channels // internal_ratio
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - shortcut connection
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution, and,
# finally, a regularizer (spatial dropout). Number of channels is constant.
# 1x1 projection convolution
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
channels,
internal_channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# If the convolution is asymmetric we split the main convolution in
# two. Eg. for a 5x5 asymmetric convolution we have two convolution:
# the first is 5x1 and the second is 1x5.
if asymmetric:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(kernel_size, 1),
stride=1,
padding=(padding, 0),
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation(),
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(1, kernel_size),
stride=1,
padding=(0, padding),
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
else:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels,
channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(channels), activation())
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after adding the branches
self.out_activation = activation()
def forward(self, x):
# Main branch shortcut
main = x
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = main + ext
return self.out_activation(out)
class DownsamplingBottleneck(nn.Module):
"""Downsampling bottlenecks further downsample the feature map size.
Main branch:
1. max pooling with stride 2; indices are saved to be used for
unpooling later.
Extension branch:
1. 2x2 convolution with stride 2 that decreases the number of channels
by ``internal_ratio``, also called a projection;
2. regular convolution (by default, 3x3);
3. 1x1 convolution which increases the number of channels to
``out_channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number of output channels.
- internal_ratio (int, optional): a scale factor applied to ``channels``
used to compute the number of channels after the projection. eg. given
``channels`` equal to 128 and internal_ratio equal to 2 the number of
channels after the projection is 64. Default: 4.
- return_indices (bool, optional): if ``True``, will return the max
indices along with the outputs. Useful when unpooling later.
- dropout_prob (float, optional): probability of an element to be
zeroed. Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
internal_ratio=4,
return_indices=False,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Store parameters that are needed later
self.return_indices = return_indices
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}. "
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - max pooling followed by feature map (channels) padding
self.main_max1 = nn.MaxPool2d(
2,
stride=2,
return_indices=return_indices)
# Extension branch - 2x2 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 2x2 projection convolution with stride 2
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
in_channels,
internal_channels,
kernel_size=2,
stride=2,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# Convolution
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=3,
stride=1,
padding=1,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels,
out_channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(out_channels), activation())
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_activation = activation()
def forward(self, x):
# Main branch shortcut
if self.return_indices:
main, max_indices = self.main_max1(x)
else:
main = self.main_max1(x)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Main branch channel padding
n, ch_ext, h, w = ext.size()
ch_main = main.size()[1]
padding = torch.zeros(n, ch_ext - ch_main, h, w)
# Before concatenating, check if main is on the CPU or GPU and
# convert padding accordingly
if main.is_cuda:
padding = padding.cuda()
# Concatenate
main = torch.cat((main, padding), 1)
# Add main and extension branches
out = main + ext
return self.out_activation(out), max_indices
class UpsamplingBottleneck(nn.Module):
"""The upsampling bottlenecks upsample the feature map resolution using max
pooling indices stored from the corresponding downsampling bottleneck.
Main branch:
1. 1x1 convolution with stride 1 that decreases the number of channels by
``internal_ratio``, also called a projection;
2. max unpool layer using the max pool indices from the corresponding
downsampling max pool layer.
Extension branch:
1. 1x1 convolution with stride 1 that decreases the number of channels by
``internal_ratio``, also called a projection;
2. transposed convolution (by default, 3x3);
3. 1x1 convolution which increases the number of channels to
``out_channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number of output channels.
- internal_ratio (int, optional): a scale factor applied to ``in_channels``
used to compute the number of channels after the projection. eg. given
``in_channels`` equal to 128 and ``internal_ratio`` equal to 2 the number
of channels after the projection is 64. Default: 4.
- dropout_prob (float, optional): probability of an element to be zeroed.
Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if ``True``.
Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
internal_ratio=4,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}. "
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - max pooling followed by feature map (channels) padding
self.main_conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels))
# Remember that the stride is the same as the kernel_size, just like
# the max pooling layers
self.main_unpool1 = nn.MaxUnpool2d(kernel_size=2)
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 1x1 projection convolution with stride 1
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
in_channels, internal_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(internal_channels), activation())
# Transposed convolution
self.ext_tconv1 = nn.ConvTranspose2d(
internal_channels,
internal_channels,
kernel_size=2,
stride=2,
bias=bias)
self.ext_tconv1_bnorm = nn.BatchNorm2d(internal_channels)
self.ext_tconv1_activation = activation()
# 1x1 expansion convolution
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels), activation())
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_activation = activation()
def forward(self, x, max_indices, output_size):
# Main branch shortcut
main = self.main_conv1(x)
main = self.main_unpool1(
main, max_indices, output_size=output_size)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_tconv1(ext, output_size=output_size)
ext = self.ext_tconv1_bnorm(ext)
ext = self.ext_tconv1_activation(ext)
ext = self.ext_conv2(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = main + ext
return self.out_activation(out)
class ENet(nn.Module):
"""Generate the ENet model.
Keyword arguments:
- num_classes (int): the number of classes to segment.
- encoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the encoder blocks/layers; otherwise, PReLU
is used. Default: False.
- decoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the decoder blocks/layers; otherwise, PReLU
is used. Default: True.
"""
def __init__(self, num_classes, encoder_relu=False, decoder_relu=True):
super().__init__()
self.initial_block = InitialBlock(3, 16, relu=encoder_relu)
# Stage 1 - Encoder
self.downsample1_0 = DownsamplingBottleneck(
16,
64,
return_indices=True,
dropout_prob=0.01,
relu=encoder_relu)
self.regular1_1 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_2 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_3 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_4 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
# Stage 2 - Encoder
self.downsample2_0 = DownsamplingBottleneck(
64,
128,
return_indices=True,
dropout_prob=0.1,
relu=encoder_relu)
self.regular2_1 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_2 = RegularBottleneck(
128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_3 = RegularBottleneck(
128,
kernel_size=5,
padding=2,
asymmetric=True,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_4 = RegularBottleneck(
128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular2_5 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_6 = RegularBottleneck(
128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_7 = RegularBottleneck(
128,
kernel_size=5,
asymmetric=True,
padding=2,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_8 = RegularBottleneck(
128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 3 - Encoder
self.regular3_0 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_1 = RegularBottleneck(
128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_2 = RegularBottleneck(
128,
kernel_size=5,
padding=2,
asymmetric=True,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated3_3 = RegularBottleneck(
128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular3_4 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_5 = RegularBottleneck(
128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_6 = RegularBottleneck(
128,
kernel_size=5,
asymmetric=True,
padding=2,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated3_7 = RegularBottleneck(
128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 4 - Decoder
self.upsample4_0 = UpsamplingBottleneck(
128, 64, dropout_prob=0.1, relu=decoder_relu)
self.regular4_1 = RegularBottleneck(
64, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.regular4_2 = RegularBottleneck(
64, padding=1, dropout_prob=0.1, relu=decoder_relu)
# Stage 5 - Decoder
self.upsample5_0 = UpsamplingBottleneck(
64, 16, dropout_prob=0.1, relu=decoder_relu)
self.regular5_1 = RegularBottleneck(
16, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.transposed_conv = nn.ConvTranspose2d(
16,
num_classes,
kernel_size=3,
stride=2,
padding=1,
bias=False)
def forward(self, x):
# Initial block
input_size = x.size()
x = self.initial_block(x)
# Stage 1 - Encoder
stage1_input_size = x.size()
x, max_indices1_0 = self.downsample1_0(x)
x = self.regular1_1(x)
x = self.regular1_2(x)
x = self.regular1_3(x)
x = self.regular1_4(x)
# Stage 2 - Encoder
stage2_input_size = x.size()
x, max_indices2_0 = self.downsample2_0(x)
x = self.regular2_1(x)
x = self.dilated2_2(x)
x = self.asymmetric2_3(x)
x = self.dilated2_4(x)
x = self.regular2_5(x)
x = self.dilated2_6(x)
x = self.asymmetric2_7(x)
x = self.dilated2_8(x)
# Stage 3 - Encoder
x = self.regular3_0(x)
x = self.dilated3_1(x)
x = self.asymmetric3_2(x)
x = self.dilated3_3(x)
x = self.regular3_4(x)
x = self.dilated3_5(x)
x = self.asymmetric3_6(x)
x = self.dilated3_7(x)
# Stage 4 - Decoder
x = self.upsample4_0(x, max_indices2_0, output_size=stage2_input_size)
x = self.regular4_1(x)
x = self.regular4_2(x)
# Stage 5 - Decoder
x = self.upsample5_0(x, max_indices1_0, output_size=stage1_input_size)
x = self.regular5_1(x)
x = self.transposed_conv(x, output_size=input_size)
return x
| 36.307448
| 83
| 0.604889
|
39daf4ff16e74cc6352422464704f6ace218c2f7
| 188
|
py
|
Python
|
mt/opencv/__init__.py
|
inteplus/opencvmt
|
29a049f23f6bff8c3399cbadc527c9a6583737ec
|
[
"MIT"
] | null | null | null |
mt/opencv/__init__.py
|
inteplus/opencvmt
|
29a049f23f6bff8c3399cbadc527c9a6583737ec
|
[
"MIT"
] | null | null | null |
mt/opencv/__init__.py
|
inteplus/opencvmt
|
29a049f23f6bff8c3399cbadc527c9a6583737ec
|
[
"MIT"
] | null | null | null |
from mt.base import logger
try:
import cv2
except ImportError:
logger.error("IMPORT: OpenCV for Python is not detected. Please install a version of OpenCV for Python.")
raise
| 23.5
| 109
| 0.739362
|
e268b6fe54ce941f8fa8ffdaf76596ff4424b290
| 1,695
|
py
|
Python
|
migrations/versions/abcaeda711f9_.py
|
sroy8091/flask-app
|
e606574aa067edd77ebb3208af7bb9e57f2604fb
|
[
"MIT"
] | null | null | null |
migrations/versions/abcaeda711f9_.py
|
sroy8091/flask-app
|
e606574aa067edd77ebb3208af7bb9e57f2604fb
|
[
"MIT"
] | null | null | null |
migrations/versions/abcaeda711f9_.py
|
sroy8091/flask-app
|
e606574aa067edd77ebb3208af7bb9e57f2604fb
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: abcaeda711f9
Revises: b1fc06bece22
Create Date: 2022-01-17 15:48:03.404712
"""
# revision identifiers, used by Alembic.
import json
import requests
from elasticsearch import helpers
from sqlalchemy import MetaData, Table
import config
from models import ES
revision = 'abcaeda711f9'
down_revision = 'b1fc06bece22'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
data = json.loads(requests.get('https://api.jsonbin.io/b/61e47db70f639830851d4bfe').content)
meta = MetaData(bind=op.get_bind())
meta.reflect(only=('movies',))
movies_table = Table('movies', meta)
genres_table = Table('genres', meta)
op.bulk_insert(movies_table, [{"name": movie['name'], "director": movie['director'], "imdb_score": movie['imdb_score'], "popularity": movie['popularity'], "genre": [genre.id for genre in genres_table.select().where(genres_table.name == movie['genre']).execute().fetchall()]} for movie in data])
actions = [
{
"_index": config.ES_INDEX,
"_type": config.ES_DOC_TYPE,
"_id": j+1,
"_source": {
"name": data[j]['name'],
"director": data[j]['director'],
"imdb_score": data[j]['imdb_score'],
"popularity": data[j]['popularity'],
"genre": data[j]['genre']
}
}
for j in range(0, len(data))
]
helpers.bulk(ES, actions)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 28.728814
| 299
| 0.615929
|
06127dc43d526f62659f60bc62eb04cdbe3aeb89
| 102
|
py
|
Python
|
iunctus/cli/new.py
|
jeertmans/iunctus
|
b94ed9b36cb819d383e9946c45bac150a3e8df12
|
[
"MIT"
] | null | null | null |
iunctus/cli/new.py
|
jeertmans/iunctus
|
b94ed9b36cb819d383e9946c45bac150a3e8df12
|
[
"MIT"
] | null | null | null |
iunctus/cli/new.py
|
jeertmans/iunctus
|
b94ed9b36cb819d383e9946c45bac150a3e8df12
|
[
"MIT"
] | null | null | null |
import click
@click.command()
def new():
"""
Create a new iunctus project.
"""
pass
| 10.2
| 33
| 0.558824
|
724e3b904fb19209033887fec1e647694b3c716f
| 6,394
|
py
|
Python
|
src/oci/network_load_balancer/models/update_backend_set_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/network_load_balancer/models/update_backend_set_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/network_load_balancer/models/update_backend_set_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateBackendSetDetails(object):
"""
The configuration details for updating a load balancer backend set.
For more information about backend set configuration, see
`Managing Backend Sets`__.
**Caution:** Oracle recommends that you avoid using any confidential information when you supply string values using the API.
__ https://docs.cloud.oracle.com/Content/Balance/Tasks/managingbackendsets.htm
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateBackendSetDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param policy:
The value to assign to the policy property of this UpdateBackendSetDetails.
:type policy: str
:param is_preserve_source:
The value to assign to the is_preserve_source property of this UpdateBackendSetDetails.
:type is_preserve_source: bool
:param backends:
The value to assign to the backends property of this UpdateBackendSetDetails.
:type backends: list[oci.network_load_balancer.models.BackendDetails]
:param health_checker:
The value to assign to the health_checker property of this UpdateBackendSetDetails.
:type health_checker: oci.network_load_balancer.models.HealthCheckerDetails
"""
self.swagger_types = {
'policy': 'str',
'is_preserve_source': 'bool',
'backends': 'list[BackendDetails]',
'health_checker': 'HealthCheckerDetails'
}
self.attribute_map = {
'policy': 'policy',
'is_preserve_source': 'isPreserveSource',
'backends': 'backends',
'health_checker': 'healthChecker'
}
self._policy = None
self._is_preserve_source = None
self._backends = None
self._health_checker = None
@property
def policy(self):
"""
Gets the policy of this UpdateBackendSetDetails.
The network load balancer policy for the backend set. To get a list of available policies, use the
:func:`list_network_load_balancers_policies` operation.
Example: `FIVE_TUPLE`
:return: The policy of this UpdateBackendSetDetails.
:rtype: str
"""
return self._policy
@policy.setter
def policy(self, policy):
"""
Sets the policy of this UpdateBackendSetDetails.
The network load balancer policy for the backend set. To get a list of available policies, use the
:func:`list_network_load_balancers_policies` operation.
Example: `FIVE_TUPLE`
:param policy: The policy of this UpdateBackendSetDetails.
:type: str
"""
self._policy = policy
@property
def is_preserve_source(self):
"""
Gets the is_preserve_source of this UpdateBackendSetDetails.
If this parameter is enabled, then the network load balancer preserves the source IP of the packet when it is forwarded to backends.
Backends see the original source IP. If the isPreserveSourceDestination parameter is enabled for the network load balancer resource, then this parameter cannot be disabled.
The value is true by default.
:return: The is_preserve_source of this UpdateBackendSetDetails.
:rtype: bool
"""
return self._is_preserve_source
@is_preserve_source.setter
def is_preserve_source(self, is_preserve_source):
"""
Sets the is_preserve_source of this UpdateBackendSetDetails.
If this parameter is enabled, then the network load balancer preserves the source IP of the packet when it is forwarded to backends.
Backends see the original source IP. If the isPreserveSourceDestination parameter is enabled for the network load balancer resource, then this parameter cannot be disabled.
The value is true by default.
:param is_preserve_source: The is_preserve_source of this UpdateBackendSetDetails.
:type: bool
"""
self._is_preserve_source = is_preserve_source
@property
def backends(self):
"""
Gets the backends of this UpdateBackendSetDetails.
An array of backends associated with the backend set.
:return: The backends of this UpdateBackendSetDetails.
:rtype: list[oci.network_load_balancer.models.BackendDetails]
"""
return self._backends
@backends.setter
def backends(self, backends):
"""
Sets the backends of this UpdateBackendSetDetails.
An array of backends associated with the backend set.
:param backends: The backends of this UpdateBackendSetDetails.
:type: list[oci.network_load_balancer.models.BackendDetails]
"""
self._backends = backends
@property
def health_checker(self):
"""
Gets the health_checker of this UpdateBackendSetDetails.
:return: The health_checker of this UpdateBackendSetDetails.
:rtype: oci.network_load_balancer.models.HealthCheckerDetails
"""
return self._health_checker
@health_checker.setter
def health_checker(self, health_checker):
"""
Sets the health_checker of this UpdateBackendSetDetails.
:param health_checker: The health_checker of this UpdateBackendSetDetails.
:type: oci.network_load_balancer.models.HealthCheckerDetails
"""
self._health_checker = health_checker
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 36.329545
| 245
| 0.686894
|
d4353dbda697bf8e34fdd32d9a7a9506155e0bba
| 580
|
py
|
Python
|
deque.py
|
Nikhilxavier/Queue
|
ddb1a0be124d84b91a3872b09f1edc25af59aabf
|
[
"BSD-3-Clause"
] | null | null | null |
deque.py
|
Nikhilxavier/Queue
|
ddb1a0be124d84b91a3872b09f1edc25af59aabf
|
[
"BSD-3-Clause"
] | null | null | null |
deque.py
|
Nikhilxavier/Queue
|
ddb1a0be124d84b91a3872b09f1edc25af59aabf
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Implementation of a Deque.
"""
# Author: Nikhil Xavier <nikhilxavier@yahoo.com>
# License: BSD 3 clause
class Deque:
"""Deque class with inbuilt functions."""
def __init__(self):
self.items = []
def add_front(self, item):
self.items.insert(0, item)
def add_rear(self, item):
self.items.append(item)
def remove_front(self):
return self.items.pop(0)
def remove_rear(self):
return self.items.pop()
def is_empty(self):
return self.items == []
def size(self):
return len(self.items)
| 18.125
| 48
| 0.606897
|
8e21e8cf6b3f24ccae275e3c01232e19409d9c71
| 391
|
py
|
Python
|
app/testAPI/wsgi.py
|
thenils/testing-pipeline-api
|
2743c828cd988df04736c8c8da4a88ec3a389ebe
|
[
"MIT"
] | null | null | null |
app/testAPI/wsgi.py
|
thenils/testing-pipeline-api
|
2743c828cd988df04736c8c8da4a88ec3a389ebe
|
[
"MIT"
] | null | null | null |
app/testAPI/wsgi.py
|
thenils/testing-pipeline-api
|
2743c828cd988df04736c8c8da4a88ec3a389ebe
|
[
"MIT"
] | null | null | null |
"""
WSGI config for testAPI project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testAPI.settings')
application = get_wsgi_application()
| 23
| 78
| 0.785166
|
4bc480c63e848d7f1aae5035b34f70fd00c0af08
| 296
|
py
|
Python
|
mangopaysdk/types/bankaccountdetailsiban.py
|
bearstech/mangopay2-python-sdk
|
c01ff0bd55c0b2d6e53a81097d028fb1fa28fb1e
|
[
"MIT"
] | null | null | null |
mangopaysdk/types/bankaccountdetailsiban.py
|
bearstech/mangopay2-python-sdk
|
c01ff0bd55c0b2d6e53a81097d028fb1fa28fb1e
|
[
"MIT"
] | null | null | null |
mangopaysdk/types/bankaccountdetailsiban.py
|
bearstech/mangopay2-python-sdk
|
c01ff0bd55c0b2d6e53a81097d028fb1fa28fb1e
|
[
"MIT"
] | 1
|
2017-09-22T13:29:53.000Z
|
2017-09-22T13:29:53.000Z
|
from mangopaysdk.types.bankaccountdetails import BankAccountDetails
class BankAccountDetailsIBAN(BankAccountDetails):
"""IBAN bank account type for BankAccount entity."""
def __init__(self):
self.IBAN = None
"""IBAN number"""
self.BIC = None
"""BIC"""
| 22.769231
| 67
| 0.665541
|
f9305966bc2d56abefd1c24ba4a808f79733ca6b
| 706
|
py
|
Python
|
drivers/sensor/ucube.py
|
ruoranluomu/AliOS-Things
|
d0f3431bcacac5b61645e9beb231a0a53be8078b
|
[
"Apache-2.0"
] | 6
|
2019-06-24T08:30:42.000Z
|
2021-12-26T21:15:27.000Z
|
drivers/sensor/ucube.py
|
ewfweftwer/AliOS-Things
|
26a5c1a2d6b1771590f5d302f0b2e7fe2fcf843e
|
[
"Apache-2.0"
] | 1
|
2019-04-02T10:03:10.000Z
|
2019-04-02T10:03:10.000Z
|
drivers/sensor/ucube.py
|
ewfweftwer/AliOS-Things
|
26a5c1a2d6b1771590f5d302f0b2e7fe2fcf843e
|
[
"Apache-2.0"
] | 9
|
2019-07-12T02:50:10.000Z
|
2021-08-20T17:24:36.000Z
|
src =Split('''
hal/sensor_hal.c
hal/sensor_drv_api.c
drv/drv_temp_humi_baro_bosch_bme280.c
drv/drv_acc_bosch_bma253.c
drv/drv_baro_bosch_bmp280.c
drv/drv_acc_gyro_st_lsm6dsl.c
drv/drv_baro_st_lps22hb.c
drv/drv_acc_mir3_da217.c
drv/drv_als_ps_liteon_ltr553.c
drv/drv_temp_humi_sensirion_shtc1.c
drv/drv_temp_humi_st_hts221.c \
drv/drv_mag_st_lis3mdl.c \
drv/drv_mag_temp_memsic_mmc3680kj.c
''')
component =aos_component('sensor', src)
global_includes =Split('''
./include
''')
for i in global_includes:
component.add_global_includes(i)
global_macros =Split('''
AOS_SENSOR
''')
for i in global_macros:
component.add_global_macros(i)
| 21.393939
| 41
| 0.73796
|
4dfb4322464ef67dd2c746182fa0238e362696d4
| 20,683
|
py
|
Python
|
src/sas/sascalc/dataloader/file_reader_base_class.py
|
llimeht/sasview
|
d0c10746a2397c5021ed8bbc842ba99243a9b0ac
|
[
"BSD-3-Clause"
] | null | null | null |
src/sas/sascalc/dataloader/file_reader_base_class.py
|
llimeht/sasview
|
d0c10746a2397c5021ed8bbc842ba99243a9b0ac
|
[
"BSD-3-Clause"
] | null | null | null |
src/sas/sascalc/dataloader/file_reader_base_class.py
|
llimeht/sasview
|
d0c10746a2397c5021ed8bbc842ba99243a9b0ac
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This is the base file reader class most file readers should inherit from.
All generic functionality required for a file loader/reader is built into this
class
"""
import os
import sys
import math
import logging
from abc import abstractmethod
import numpy as np
from .loader_exceptions import NoKnownLoaderException, FileContentsException,\
DataReaderException, DefaultReaderException
from .data_info import Data1D, Data2D, DataInfo, plottable_1D, plottable_2D,\
combine_data_info_with_plottable
from sas.sascalc.data_util.nxsunit import Converter
logger = logging.getLogger(__name__)
if sys.version_info[0] < 3:
def decode(s):
return s
else:
def decode(s):
return s.decode() if isinstance(s, bytes) else s
# Data 1D fields for iterative purposes
FIELDS_1D = ('x', 'y', 'dx', 'dy', 'dxl', 'dxw')
# Data 2D fields for iterative purposes
FIELDS_2D = ('data', 'qx_data', 'qy_data', 'q_data', 'err_data',
'dqx_data', 'dqy_data', 'mask')
DEPRECATION_MESSAGE = ("\rThe extension of this file suggests the data set migh"
"t not be fully reduced. Support for the reader associat"
"ed with this file type has been removed. An attempt to "
"load the file was made, but, should it be successful, "
"SasView cannot guarantee the accuracy of the data.")
class FileReader(object):
# String to describe the type of data this reader can load
type_name = "ASCII"
# Wildcards to display
type = ["Text files (*.txt|*.TXT)"]
# List of allowed extensions
ext = ['.txt']
# Deprecated extensions
deprecated_extensions = ['.asc']
# Bypass extension check and try to load anyway
allow_all = False
# Able to import the unit converter
has_converter = True
# Default value of zero
_ZERO = 1e-16
def __init__(self):
# List of Data1D and Data2D objects to be sent back to data_loader
self.output = []
# Current plottable_(1D/2D) object being loaded in
self.current_dataset = None
# Current DataInfo object being loaded in
self.current_datainfo = None
# File path sent to reader
self.filepath = None
# Open file handle
self.f_open = None
def read(self, filepath):
"""
Basic file reader
:param filepath: The full or relative path to a file to be loaded
"""
self.filepath = filepath
if os.path.isfile(filepath):
basename, extension = os.path.splitext(os.path.basename(filepath))
self.extension = extension.lower()
# If the file type is not allowed, return nothing
if self.extension in self.ext or self.allow_all:
# Try to load the file, but raise an error if unable to.
try:
self.f_open = open(filepath, 'rb')
self.get_file_contents()
except DataReaderException as e:
self.handle_error_message(e.message)
except FileContentsException as e:
raise
except OSError as e:
# If the file cannot be opened
msg = "Unable to open file: {}\n".format(filepath)
msg += e.message
self.handle_error_message(msg)
except Exception as e:
self.handle_error_message(e.message)
finally:
# Close the file handle if it is open
if not self.f_open.closed:
self.f_open.close()
if any(filepath.lower().endswith(ext) for ext in
self.deprecated_extensions):
self.handle_error_message(DEPRECATION_MESSAGE)
if len(self.output) > 0:
# Sort the data that's been loaded
self.convert_data_units()
self.sort_data()
else:
msg = "Unable to find file at: {}\n".format(filepath)
msg += "Please check your file path and try again."
self.handle_error_message(msg)
# Return a list of parsed entries that data_loader can manage
final_data = self.output
self.reset_state()
return final_data
def reset_state(self):
"""
Resets the class state to a base case when loading a new data file so previous
data files do not appear a second time
"""
self.current_datainfo = None
self.current_dataset = None
self.filepath = None
self.ind = None
self.output = []
def nextline(self):
"""
Returns the next line in the file as a string.
"""
#return self.f_open.readline()
return decode(self.f_open.readline())
def nextlines(self):
"""
Returns the next line in the file as a string.
"""
for line in self.f_open:
#yield line
yield decode(line)
def readall(self):
"""
Returns the entire file as a string.
"""
return decode(self.f_open.read())
def handle_error_message(self, msg):
"""
Generic error handler to add an error to the current datainfo to
propagate the error up the error chain.
:param msg: Error message
"""
if len(self.output) > 0:
self.output[-1].errors.append(msg)
elif isinstance(self.current_datainfo, DataInfo):
self.current_datainfo.errors.append(msg)
else:
logger.warning(msg)
raise NoKnownLoaderException(msg)
def send_to_output(self):
"""
Helper that automatically combines the info and set and then appends it
to output
"""
data_obj = combine_data_info_with_plottable(self.current_dataset,
self.current_datainfo)
self.output.append(data_obj)
def sort_data(self):
"""
Sort 1D data along the X axis for consistency
"""
for data in self.output:
if isinstance(data, Data1D):
# Normalize the units for
data.x_unit = self.format_unit(data.x_unit)
data._xunit = data.x_unit
data.y_unit = self.format_unit(data.y_unit)
data._yunit = data.y_unit
# Sort data by increasing x and remove 1st point
ind = np.lexsort((data.y, data.x))
data.x = self._reorder_1d_array(data.x, ind)
data.y = self._reorder_1d_array(data.y, ind)
if data.dx is not None:
if len(data.dx) == 0:
data.dx = None
continue
data.dx = self._reorder_1d_array(data.dx, ind)
if data.dxl is not None:
data.dxl = self._reorder_1d_array(data.dxl, ind)
if data.dxw is not None:
data.dxw = self._reorder_1d_array(data.dxw, ind)
if data.dy is not None:
if len(data.dy) == 0:
data.dy = None
continue
data.dy = self._reorder_1d_array(data.dy, ind)
if data.lam is not None:
data.lam = self._reorder_1d_array(data.lam, ind)
if data.dlam is not None:
data.dlam = self._reorder_1d_array(data.dlam, ind)
data = self._remove_nans_in_data(data)
if len(data.x) > 0:
data.xmin = np.min(data.x)
data.xmax = np.max(data.x)
data.ymin = np.min(data.y)
data.ymax = np.max(data.y)
elif isinstance(data, Data2D):
# Normalize the units for
data.Q_unit = self.format_unit(data.Q_unit)
data.I_unit = self.format_unit(data.I_unit)
data._xunit = data.Q_unit
data._yunit = data.Q_unit
data._zunit = data.I_unit
data.data = data.data.astype(np.float64)
data.qx_data = data.qx_data.astype(np.float64)
data.xmin = np.min(data.qx_data)
data.xmax = np.max(data.qx_data)
data.qy_data = data.qy_data.astype(np.float64)
data.ymin = np.min(data.qy_data)
data.ymax = np.max(data.qy_data)
data.q_data = np.sqrt(data.qx_data * data.qx_data
+ data.qy_data * data.qy_data)
if data.err_data is not None:
data.err_data = data.err_data.astype(np.float64)
if data.dqx_data is not None:
data.dqx_data = data.dqx_data.astype(np.float64)
if data.dqy_data is not None:
data.dqy_data = data.dqy_data.astype(np.float64)
if data.mask is not None:
data.mask = data.mask.astype(dtype=bool)
# If all mask elements are False, give a warning to the user
if not data.mask.any():
error = "The entire dataset is masked and may not "
error += "produce usable fits."
data.errors.append(error)
if len(data.data.shape) == 2:
n_rows, n_cols = data.data.shape
data.y_bins = data.qy_data[0::int(n_cols)]
data.x_bins = data.qx_data[:int(n_cols)]
data.data = data.data.flatten()
data = self._remove_nans_in_data(data)
if len(data.data) > 0:
data.xmin = np.min(data.qx_data)
data.xmax = np.max(data.qx_data)
data.ymin = np.min(data.qy_data)
data.ymax = np.max(data.qy_data)
@staticmethod
def _reorder_1d_array(array, ind):
"""
Reorders a 1D array based on the indices passed as ind
:param array: Array to be reordered
:param ind: Indices used to reorder array
:return: reordered array
"""
array = np.asarray(array, dtype=np.float64)
return array[ind]
@staticmethod
def _remove_nans_in_data(data):
"""
Remove data points where nan is loaded
:param data: 1D or 2D data object
:return: data with nan points removed
"""
if isinstance(data, Data1D):
fields = FIELDS_1D
elif isinstance(data, Data2D):
fields = FIELDS_2D
else:
return data
# Make array of good points - all others will be removed
good = np.isfinite(getattr(data, fields[0]))
for name in fields[1:]:
array = getattr(data, name)
if array is not None:
# Update good points only if not already changed
good &= np.isfinite(array)
if not np.all(good):
for name in fields:
array = getattr(data, name)
if array is not None:
setattr(data, name, array[good])
return data
@staticmethod
def set_default_1d_units(data):
"""
Set the x and y axes to the default 1D units
:param data: 1D data set
:return:
"""
data.xaxis(r"\rm{Q}", '1/A')
data.yaxis(r"\rm{Intensity}", "1/cm")
return data
@staticmethod
def set_default_2d_units(data):
"""
Set the x and y axes to the default 2D units
:param data: 2D data set
:return:
"""
data.xaxis("\\rm{Q_{x}}", '1/A')
data.yaxis("\\rm{Q_{y}}", '1/A')
data.zaxis("\\rm{Intensity}", "1/cm")
return data
def convert_data_units(self, default_q_unit="1/A"):
"""
Converts al; data to the sasview default of units of A^{-1} for Q and
cm^{-1} for I.
:param default_q_unit: The default Q unit used by Sasview
"""
convert_q = True
new_output = []
for data in self.output:
if data.isSesans:
new_output.append(data)
continue
try:
file_x_unit = data._xunit
data_conv_x = Converter(file_x_unit)
except KeyError:
logger.info("Unrecognized Q units in data file. No data "
"conversion attempted")
convert_q = False
try:
if isinstance(data, Data1D):
if convert_q:
data.x = data_conv_x(data.x, units=default_q_unit)
data._xunit = default_q_unit
data.x_unit = default_q_unit
if data.dx is not None:
data.dx = data_conv_x(data.dx,
units=default_q_unit)
if data.dxl is not None:
data.dxl = data_conv_x(data.dxl,
units=default_q_unit)
if data.dxw is not None:
data.dxw = data_conv_x(data.dxw,
units=default_q_unit)
elif isinstance(data, Data2D):
if convert_q:
data.qx_data = data_conv_x(data.qx_data,
units=default_q_unit)
if data.dqx_data is not None:
data.dqx_data = data_conv_x(data.dqx_data,
units=default_q_unit)
try:
file_y_unit = data._yunit
data_conv_y = Converter(file_y_unit)
data.qy_data = data_conv_y(data.qy_data,
units=default_q_unit)
if data.dqy_data is not None:
data.dqy_data = data_conv_y(data.dqy_data,
units=default_q_unit)
except KeyError:
logger.info("Unrecognized Qy units in data file. No"
" data conversion attempted")
except KeyError:
message = "Unable to convert Q units from {0} to 1/A."
message.format(default_q_unit)
data.errors.append(message)
new_output.append(data)
self.output = new_output
def format_unit(self, unit=None):
"""
Format units a common way
:param unit:
:return:
"""
if unit:
split = unit.split("/")
if len(split) == 1:
return unit
elif split[0] == '1':
return "{0}^".format(split[1]) + "{-1}"
else:
return "{0}*{1}^".format(split[0], split[1]) + "{-1}"
def set_all_to_none(self):
"""
Set all mutable values to None for error handling purposes
"""
self.current_dataset = None
self.current_datainfo = None
self.output = []
def data_cleanup(self):
"""
Clean up the data sets and refresh everything
:return: None
"""
self.remove_empty_q_values()
self.send_to_output() # Combine datasets with DataInfo
self.current_datainfo = DataInfo() # Reset DataInfo
def remove_empty_q_values(self):
"""
Remove any point where Q == 0
"""
if isinstance(self.current_dataset, plottable_1D):
# Booleans for resolutions
has_error_dx = self.current_dataset.dx is not None
has_error_dxl = self.current_dataset.dxl is not None
has_error_dxw = self.current_dataset.dxw is not None
has_error_dy = self.current_dataset.dy is not None
# Create arrays of zeros for non-existent resolutions
if has_error_dxw and not has_error_dxl:
array_size = self.current_dataset.dxw.size - 1
self.current_dataset.dxl = np.append(self.current_dataset.dxl,
np.zeros([array_size]))
has_error_dxl = True
elif has_error_dxl and not has_error_dxw:
array_size = self.current_dataset.dxl.size - 1
self.current_dataset.dxw = np.append(self.current_dataset.dxw,
np.zeros([array_size]))
has_error_dxw = True
elif not has_error_dxl and not has_error_dxw and not has_error_dx:
array_size = self.current_dataset.x.size - 1
self.current_dataset.dx = np.append(self.current_dataset.dx,
np.zeros([array_size]))
has_error_dx = True
if not has_error_dy:
array_size = self.current_dataset.y.size - 1
self.current_dataset.dy = np.append(self.current_dataset.dy,
np.zeros([array_size]))
has_error_dy = True
# Remove points where q = 0
x = self.current_dataset.x
self.current_dataset.x = self.current_dataset.x[x != 0]
self.current_dataset.y = self.current_dataset.y[x != 0]
if has_error_dy:
self.current_dataset.dy = self.current_dataset.dy[x != 0]
if has_error_dx:
self.current_dataset.dx = self.current_dataset.dx[x != 0]
if has_error_dxl:
self.current_dataset.dxl = self.current_dataset.dxl[x != 0]
if has_error_dxw:
self.current_dataset.dxw = self.current_dataset.dxw[x != 0]
elif isinstance(self.current_dataset, plottable_2D):
has_error_dqx = self.current_dataset.dqx_data is not None
has_error_dqy = self.current_dataset.dqy_data is not None
has_error_dy = self.current_dataset.err_data is not None
has_mask = self.current_dataset.mask is not None
x = self.current_dataset.qx_data
self.current_dataset.data = self.current_dataset.data[x != 0]
self.current_dataset.qx_data = self.current_dataset.qx_data[x != 0]
self.current_dataset.qy_data = self.current_dataset.qy_data[x != 0]
self.current_dataset.q_data = np.sqrt(
np.square(self.current_dataset.qx_data) + np.square(
self.current_dataset.qy_data))
if has_error_dy:
self.current_dataset.err_data = self.current_dataset.err_data[
x != 0]
if has_error_dqx:
self.current_dataset.dqx_data = self.current_dataset.dqx_data[
x != 0]
if has_error_dqy:
self.current_dataset.dqy_data = self.current_dataset.dqy_data[
x != 0]
if has_mask:
self.current_dataset.mask = self.current_dataset.mask[x != 0]
def reset_data_list(self, no_lines=0):
"""
Reset the plottable_1D object
"""
# Initialize data sets with arrays the maximum possible size
x = np.zeros(no_lines)
y = np.zeros(no_lines)
dx = np.zeros(no_lines)
dy = np.zeros(no_lines)
self.current_dataset = plottable_1D(x, y, dx, dy)
@staticmethod
def splitline(line):
"""
Splits a line into pieces based on common delimiters
:param line: A single line of text
:return: list of values
"""
# Initial try for CSV (split on ,)
toks = line.split(',')
# Now try SCSV (split on ;)
if len(toks) < 2:
toks = line.split(';')
# Now go for whitespace
if len(toks) < 2:
toks = line.split()
return toks
@abstractmethod
def get_file_contents(self):
"""
Reader specific class to access the contents of the file
All reader classes that inherit from FileReader must implement
"""
pass
| 40.634578
| 86
| 0.530339
|
32e221569c667e42504c29a9915fa44ee773a516
| 10,585
|
py
|
Python
|
elekto/controllers/elections.py
|
elekto-io/elekto
|
1052397d207bce351e9becb28df1561e19c82108
|
[
"Apache-2.0"
] | 17
|
2021-01-19T03:32:58.000Z
|
2022-02-15T07:11:05.000Z
|
elekto/controllers/elections.py
|
elekto-io/elekto
|
1052397d207bce351e9becb28df1561e19c82108
|
[
"Apache-2.0"
] | 20
|
2021-01-07T00:16:40.000Z
|
2022-03-30T04:29:12.000Z
|
elekto/controllers/elections.py
|
elekto-io/elekto
|
1052397d207bce351e9becb28df1561e19c82108
|
[
"Apache-2.0"
] | 7
|
2021-01-06T03:51:37.000Z
|
2022-03-05T04:32:43.000Z
|
# Copyright 2020 The Elekto Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author(s): Manish Sahani <rec.manish.sahani@gmail.com>
"""
The module is responsible for handling all the election's related request.
"""
import secrets
import string
import flask as F
from werkzeug.security import generate_password_hash, check_password_hash
from elekto import constants, APP, SESSION
from elekto.models import meta
from elekto.core.election import Election as CoreElection
from elekto.models.sql import Election, Ballot, Voter, Request
from elekto.middlewares.auth import auth_guard
from elekto.middlewares.election import * # noqa
@APP.route('/app')
@auth_guard
def app():
running = meta.Election.where('status', constants.ELEC_STAT_RUNNING)
return F.render_template('views/dashboard.html', running=running)
@APP.route('/app/elections') # Election listing
@auth_guard
def elections():
status = F.request.args.get('status')
res = meta.Election.all() if status is None else meta.Election.where('status', status)
res.sort(key=lambda e: e['start_datetime'], reverse=True)
return F.render_template('views/elections/index.html',
elections=res,
status=status)
@APP.route('/app/elections/<eid>') # Particular Election
@auth_guard
def elections_single(eid):
try:
election = meta.Election(eid)
candidates = election.candidates()
voters = election.voters()
e = SESSION.query(Election).filter_by(key=eid).first()
return F.render_template('views/elections/single.html',
election=election.get(),
candidates=candidates,
voters=voters,
voted=[v.user_id for v in e.voters])
except Exception as err:
return F.render_template('errors/message.html',
title='Error While rendering the election',
message=err.args[0])
@APP.route('/app/elections/<eid>/candidates/<cid>') # Particular Candidate
@auth_guard
def elections_candidate(eid, cid):
election = meta.Election(eid)
candidate = election.candidate(cid)
return F.render_template('views/elections/candidate.html',
election=election.get(),
candidate=candidate)
@APP.route('/app/elections/<eid>/vote', methods=['GET', 'POST'])
@auth_guard
@voter_guard
def elections_voting_page(eid):
election = meta.Election(eid)
candidates = election.candidates()
voters = election.voters()
e = SESSION.query(Election).filter_by(key=eid).first()
# Redirect to thankyou page if already voted
if F.g.user.id in [v.user_id for v in e.voters]:
return F.render_template('errors/message.html',
title='You have already voted',
message='To re-cast your vote, please visit\
the election page.')
if F.request.method == 'POST':
# encrypt the voter identity
passcode = ''.join(secrets.choice(string.digits) for i in range(6))
if len(F.request.form['password']):
passcode = F.request.form['password']
voter = generate_password_hash(F.g.user.username + '+' + passcode)
for k in F.request.form.keys():
if k.split('@')[0] == 'candidate':
candidate = k.split('@')[-1]
rank = F.request.form[k]
ballot = Ballot(rank=rank, candidate=candidate, voter=voter)
e.ballots.append(ballot)
# Add user to the voted list
e.voters.append(Voter(user_id=F.g.user.id))
SESSION.commit()
return F.redirect(F.url_for('elections_confirmation_page', eid=eid))
return F.render_template('views/elections/vote.html',
election=election.get(),
candidates=candidates,
voters=voters)
@APP.route('/app/elections/<eid>/vote/edit', methods=['POST'])
@auth_guard
@voter_guard
@has_voted_condition
def elections_edit(eid):
election = meta.Election(eid)
e = SESSION.query(Election).filter_by(key=eid).first()
# encrypt the voter identity
voter = F.g.user.username + '+' + F.request.form['password']
ballots = [b for b in e.ballots if check_password_hash(b.voter, voter)]
if not len(ballots):
F.flash('Incorrect password, the password must match with the one used\
before')
return F.redirect(F.url_for('elections_single', eid=eid))
# Delete all the ballots for the user
for b in ballots:
SESSION.delete(b)
# Remove the voter from the voted list
for voter in e.voters:
if voter.user_id == F.g.user.id:
SESSION.delete(voter)
SESSION.commit()
F.flash('The old ballot is sucessfully deleted, please re-cast the ballot.')
return F.redirect(F.url_for('elections_single', eid=eid))
@APP.route('/app/elections/<eid>/confirmation', methods=['GET'])
@auth_guard
def elections_confirmation_page(eid):
election = meta.Election(eid)
e = SESSION.query(Election).filter_by(key=eid).first()
if F.g.user.id in [v.user_id for v in e.voters]:
return F.render_template('views/elections/confirmation.html',
election=election.get())
return F.redirect(F.url_for('elections_single', eid=eid))
@APP.route('/app/elections/<eid>/results/') # Election's Result
@auth_guard
@has_completed_condition
def elections_results(eid):
election = meta.Election(eid)
return F.render_template('views/elections/results.html',
election=election.get())
# Exception Request form
@APP.route('/app/elections/<eid>/exception', methods=['POST', 'GET'])
@auth_guard
@exception_guard
def elections_exception(eid):
election = meta.Election(eid)
e = SESSION.query(Election).filter_by(key=eid).first()
req = SESSION.query(Request).join(Request, Election.requests).filter(
Request.user_id == F.g.user.id).first()
if req:
return F.render_template('errors/message.html',
title="You have already filled the form.",
message="please wait for the election's\
supervisor to view your request.")
if F.request.method == 'POST':
erequest = Request(user_id=F.g.user.id,
name=F.request.form['name'],
email=F.request.form['email'],
chat=F.request.form['chat'],
description=F.request.form['description'],
comments=F.request.form['comments'])
e.requests.append(erequest)
SESSION.commit()
F.flash('Request sucessfully submitted.')
return F.redirect(F.url_for('elections_single', eid=eid))
return F.render_template('views/elections/exception.html',
election=election.get())
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# #
# /!/ Election officer section \!\ #
# #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
@APP.route('/app/elections/<eid>/admin/') # Admin page for the election
@auth_guard
@admin_guard
def elections_admin(eid):
election = meta.Election(eid)
e = SESSION.query(Election).filter_by(key=eid).first()
return F.render_template('views/elections/admin.html',
election=election.get(),
e=e)
@APP.route('/app/elections/<eid>/admin/exception/<rid>', methods=['GET', 'POST'])
@auth_guard # Admin page for the reviewing exception
@admin_guard
def elections_admin_review(eid, rid):
election = meta.Election(eid)
e = SESSION.query(Election).filter_by(key=eid).first()
req = SESSION.query(Request).join(
Request, Election.requests).filter(Request.id == rid).first()
if F.request.method == 'POST':
req.reviewed = False if req.reviewed else True
SESSION.commit()
return F.render_template('views/elections/admin_exception.html',
election=election.get(),
req=req,
e=e)
@APP.route('/app/elections/<eid>/admin/results') # Admin page for the election
@auth_guard
@admin_guard
@has_completed_condition
def elections_admin_results(eid):
election = meta.Election(eid)
candidates = election.candidates()
e = SESSION.query(Election).filter_by(key=eid).first()
result = CoreElection.build(candidates, e.ballots).schulze()
return F.render_template('views/elections/admin_result.html',
election=election.get(),
result=result)
@APP.route('/app/elections/<eid>/admin/download') # download ballots as csv
@auth_guard
@admin_guard
@has_completed_condition
def elections_admin_download(eid):
election = meta.Election(eid)
candidates = election.candidates()
e = SESSION.query(Election).filter_by(key=eid).first()
# Generate a csv
ballots = CoreElection.build(candidates, e.ballots).ballots
candidates = {c['key']: '' for c in candidates}
csv = ','.join(list(candidates.keys())) + '\n'
for b in ballots.keys():
for c in candidates.keys():
candidates[c] = 'No opinion'
for c, rank in ballots[b]:
candidates[c] = rank
csv += ','.join([str(candidates[c]) for c in candidates.keys()]) + '\n'
return F.Response(csv,
mimetype="text/csv",
headers={"Content-disposition": "attachment; filename=ballots.csv"})
| 36.626298
| 90
| 0.602078
|
8cb49b68eddee7310d5a630d8be10af305b5f683
| 4,294
|
py
|
Python
|
scripts/.config/scripts/i3-wk-switch/i3-wk-switch.py
|
gmartinezramirez/.dotfiles
|
2e23909256cac0689615d2c61050dc8430413303
|
[
"Apache-2.0"
] | 1
|
2018-04-14T01:09:50.000Z
|
2018-04-14T01:09:50.000Z
|
scripts/.config/scripts/i3-wk-switch/i3-wk-switch.py
|
gmartinezramirez/.dotfiles
|
2e23909256cac0689615d2c61050dc8430413303
|
[
"Apache-2.0"
] | null | null | null |
scripts/.config/scripts/i3-wk-switch/i3-wk-switch.py
|
gmartinezramirez/.dotfiles
|
2e23909256cac0689615d2c61050dc8430413303
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
"""Emulates xmonad's workspace switching behavior in i3"""
# pylint: disable=no-member
import logging
import sys
from pprint import pformat
import i3
import time
LOG = logging.getLogger()
def setup_logger(level):
"""Initializes logger with debug level"""
LOG.setLevel(logging.DEBUG)
channel = logging.FileHandler("/tmp/i3-wk-switcher.log")
channel.setLevel(level)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
channel.setFormatter(formatter)
LOG.addHandler(channel)
def get_focused_workspace():
"""Get workspace that is currently focused"""
actives = [wk for wk in i3.get_workspaces() if wk['focused']]
assert len(actives) == 1
return actives[0]
def get_active_outputs():
"""Returns outputs (monitors) that are active"""
return [outp for outp in i3.get_outputs() if outp['active']]
def get_workspace(num):
"""Returns workspace with num or None of it does not exist"""
want_workspace_cands = [wk for wk in i3.get_workspaces()
if wk['num'] == num]
assert len(want_workspace_cands) in [0, 1]
if len(want_workspace_cands) == 0:
return None
else:
return want_workspace_cands[0]
def switch_workspace(num):
"""Switches to workspace number"""
i3.workspace('number %d' % num)
def swap_visible_workspaces(wk_a, wk_b):
"""Swaps two workspaces that are visible"""
switch_workspace(wk_a['num'])
i3.command('move', 'workspace to output ' + wk_b['output'])
switch_workspace(wk_b['num'])
i3.command('move', 'workspace to output ' + wk_a['output'])
def change_workspace(num):
"""
Switches to workspace num like xmonad.
Always sets focused output to workspace num. If the workspace is on
another output, then the workspaces are "shifted" among the outputs.
"""
# Allow for string or int type for argument
num = int(num)
focused_workspace = get_focused_workspace()
original_output = focused_workspace['output']
LOG.debug('Switching to workspace:{} on output:{}, display: {}:'.format(
num, focused_workspace['output'], pformat(focused_workspace, indent=2)))
# Check if already on workspace
if int(focused_workspace['num']) == num:
LOG.debug('Already on correct workspace')
return
# Get workspace we want to switch to
want_workspace = get_workspace(num)
if want_workspace is None:
LOG.debug('Switching to workspace because it does not exist, i3 will create it')
switch_workspace(num)
return
LOG.debug('Want workspace:\n' + pformat(want_workspace, indent=2))
# Save workspace originally showing on want_workspace's output
other_output = [outp for outp in get_active_outputs()
if outp['name'] == want_workspace['output']][0]
LOG.debug('Other_output=%s', pformat(other_output, indent=2))
other_workspace = [wk for wk in i3.get_workspaces()
if wk['name'] == other_output['current_workspace']][0]
LOG.debug('Other workspace:\n' + pformat(other_workspace, indent=2))
# Check if wanted workspace is on focused output
if focused_workspace['output'] == want_workspace['output']:
LOG.debug('Wanted workspace already on focused output, '
'switching as normal')
switch_workspace(num)
return
# Check if wanted workspace is on other output
if not want_workspace['visible']:
LOG.debug('Workspace to switch to is hidden')
# Switch to workspace on other output
switch_workspace(num)
LOG.debug('Wanted workspace is on other output')
# Wanted workspace is visible, so swap workspaces
swap_visible_workspaces(want_workspace, focused_workspace)
# Focus other_workspace
switch_workspace(other_workspace['num'])
# Focus on wanted workspace
time.sleep(.15)
LOG.debug('Setting focus to {}'.format(original_output))
i3.command('focus', 'output', original_output)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'Usage: %s WORKSPACE_NUM' % sys.argv[0]
sys.exit(1)
setup_logger(logging.DEBUG)
try:
change_workspace(sys.argv[1])
except Exception:
LOG.exception('An error occured')
| 30.671429
| 88
| 0.670703
|
325cff8e52ae4421a4599a74a66f4ac2a6d9f153
| 332
|
py
|
Python
|
app/__init__.py
|
florinior12/SmartPlayer
|
c88b891bc377ee5f1c3554ca9870b68bc0ff47cd
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
florinior12/SmartPlayer
|
c88b891bc377ee5f1c3554ca9870b68bc0ff47cd
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
florinior12/SmartPlayer
|
c88b891bc377ee5f1c3554ca9870b68bc0ff47cd
|
[
"MIT"
] | null | null | null |
#Import Flask
from flask import Flask
#Import SQLAlchemy
from flask_sqlalchemy import SQLAlchemy
#Create the app object
app = Flask(__name__)
#Load configurations from config file
app.config.from_object('config')
#load database from our app
db = SQLAlchemy(app)
app.secret_key = app.config['SECRET_KEY']
from app import routes
| 18.444444
| 41
| 0.792169
|
881488c23a2fb60c458178dac014dd725c716dd5
| 434
|
py
|
Python
|
worker_templates/python_empty/src/python_empty.py
|
larshesel/mzbench
|
f8da05dd2dfc06a87bc3870095e360ee184de4e2
|
[
"BSD-3-Clause"
] | 127
|
2017-11-28T22:38:16.000Z
|
2022-02-24T11:17:03.000Z
|
worker_templates/python_empty/src/python_empty.py
|
velimir/mzbench
|
753d2e26fea92e0010c7e23378e93cb4ce982f20
|
[
"BSD-3-Clause"
] | 42
|
2017-11-27T16:56:44.000Z
|
2021-08-13T08:37:14.000Z
|
worker_templates/python_empty/src/python_empty.py
|
velimir/mzbench
|
753d2e26fea92e0010c7e23378e93cb4ce982f20
|
[
"BSD-3-Clause"
] | 27
|
2017-11-23T14:54:51.000Z
|
2021-11-22T19:03:41.000Z
|
import random
import mzbench
def initial_state():
pass
def metrics():
return [
[
('print', 'counter'),
('print_2', 'counter')
],
('dummy', 'histogram')
]
def my_print(msg):
mzbench.notify(('print', 'counter'), 1)
mzbench.notify(('print_2', 'counter'), 2)
print "{0}".format(msg)
mzbench.notify(('dummy', 'histogram'), random.uniform(0, 1000000000)/7)
| 17.36
| 75
| 0.543779
|
1c2b5cb3fc61452aed2ffa3b1df033d244d6a253
| 2,232
|
py
|
Python
|
WaveBlocksND/GradientLinearCombinationHAWP.py
|
raoulbq/WaveBlocksND
|
225b5dd9b1af1998bd40b5f6467ee959292b6a83
|
[
"BSD-3-Clause"
] | 3
|
2016-09-01T21:13:54.000Z
|
2020-03-23T15:45:32.000Z
|
WaveBlocksND/GradientLinearCombinationHAWP.py
|
raoulbq/WaveBlocksND
|
225b5dd9b1af1998bd40b5f6467ee959292b6a83
|
[
"BSD-3-Clause"
] | null | null | null |
WaveBlocksND/GradientLinearCombinationHAWP.py
|
raoulbq/WaveBlocksND
|
225b5dd9b1af1998bd40b5f6467ee959292b6a83
|
[
"BSD-3-Clause"
] | 6
|
2016-03-16T15:22:01.000Z
|
2021-03-13T14:06:54.000Z
|
"""The WaveBlocks Project
Compute the action of the gradient operator applied to a
linear combination of Hagedorn wavepackets.
@author: R. Bourquin
@copyright: Copyright (C) 2013, 2014 R. Bourquin
@license: Modified BSD License
"""
from numpy import squeeze
from WaveBlocksND.Gradient import Gradient
from WaveBlocksND.GradientHAWP import GradientHAWP
from WaveBlocksND.LinearCombinationOfHAWPs import LinearCombinationOfHAWPs
__all__ = ["GradientLinearCombinationHAWP"]
class GradientLinearCombinationHAWP(Gradient):
r"""This class implements the computation of the action of the
gradient operator :math:`-i \varepsilon^2 \nabla_x` applied to
a linear combination :math:`\Upsilon` of Hagedorn wavepackets :math:`\Psi`.
"""
def __init__(self):
r"""
"""
pass
# TODO: Find a more efficient way to compute gradients
def apply_gradient(self, lincomb, component=None):
r"""Compute the effect of the gradient operator :math:`-i \varepsilon^2 \nabla_x`
on the linear combination :math:`\Upsilon` of Hagedorn wavepackets :math:`\Psi`.
:param lincomb: The linear combination :math:`\Upsilon`.
:type lincomb: A :py:class:`LinearCombinationOfHAWPs` instance.
:param component: The index :math:`i` of the component :math:`\Phi_i`.
:type component: Integer or ``None``.
:return: One linear combination :math:`\Upsilon_d` containing the gradients
for the component :math:`\partial_{x_d}` for each space dimension
component :math:`d = 1, \ldots, D`.
"""
D = lincomb.get_dimension()
N = lincomb.get_number_components()
J = lincomb.get_number_packets()
Cj = squeeze(lincomb.get_coefficients())
eps = lincomb.get_eps()
G = GradientHAWP()
new_lincombs = [LinearCombinationOfHAWPs(D, N, eps) for d in range(D)]
# Handle each wavepacket individually
for j in range(J):
packet = lincomb.get_wavepacket(j)
grads = G.apply_gradient(packet, component=component)
for d, grad in enumerate(grads):
new_lincombs[d].add_wavepacket(grad, Cj[j])
return new_lincombs
| 34.875
| 89
| 0.672939
|
ce71e754c607a9d1a5b16ca68246ec6b1e629154
| 11,876
|
py
|
Python
|
src/RNAsketch-1.5/build/scripts-3.7/design-energyshift.py
|
lrsoenksen/CL_RNA_SynthBio
|
08b67ac5b0b262c4a79217dfe1d7457cd42791ab
|
[
"MIT"
] | 5
|
2020-10-14T11:34:17.000Z
|
2021-11-14T16:26:18.000Z
|
src/RNAsketch-1.5/build/scripts-3.7/design-energyshift.py
|
lrsoenksen/CL_RNA_SynthBio
|
08b67ac5b0b262c4a79217dfe1d7457cd42791ab
|
[
"MIT"
] | null | null | null |
src/RNAsketch-1.5/build/scripts-3.7/design-energyshift.py
|
lrsoenksen/CL_RNA_SynthBio
|
08b67ac5b0b262c4a79217dfe1d7457cd42791ab
|
[
"MIT"
] | 2
|
2020-12-27T05:26:05.000Z
|
2022-02-14T03:09:30.000Z
|
#!/anaconda3/bin/python
from __future__ import print_function
try:
from RNAsketch import *
except ImportError as e:
print(e)
exit(1)
import RNAblueprint as rbp
import argparse
import sys
import os
import time
from collections import Counter
from scipy import stats
def main():
parser = argparse.ArgumentParser(description='Design a multi-stable riboswitch similar using Boltzmann sampling.')
parser.add_argument("-f", "--file", type = str, default=None, help='Read file in *.inp format')
parser.add_argument("-i", "--input", default=False, action='store_true', help='Read custom structures and sequence constraints from stdin')
parser.add_argument("-q", "--package", type=str, default='vrna', help='Chose the calculation package: hotknots, pkiss, nupack, or vrna/ViennaRNA (default: vrna)')
parser.add_argument("-j", "--objective", type=str, default='1', help='Chose the objective function: 1 for abs differences and 2 for squared (default: 1)')
parser.add_argument("-T", "--temperature", type=float, default=37.0, help='Temperature of the energy calculations.')
parser.add_argument("-n", "--number", type=int, default=1000, help='Number of designs to generate')
parser.add_argument("-m", "--model", type=str, default='stacking', help='Model for getting a new sequence: uniform, nussinov, basepairs, stacking')
parser.add_argument("-e", "--energies", type=str, default='', help='Target Energies for design. String of comma separated float values.')
parser.add_argument("-s", "--stop", type=int, default=0, help='Stop optimization run of unpaired bases if no better solution is aquired after (stop) trials. 0 is no local optimization.')
parser.add_argument("-c", "--csv", default=False, action='store_true', help='Write output as semi-colon csv file to stdout')
parser.add_argument("-k", "--kill", type=int, default=0, help='Timeout value of graph construction in seconds. (default: infinite)')
parser.add_argument("-p", "--progress", default=False, action='store_true', help='Show progress of optimization')
parser.add_argument("-d", "--debug", default=False, action='store_true', help='Show debug information of library')
args = parser.parse_args()
if args.debug:
print("# Options: number={0:d}, model={1:}, stop={2:d}, package={3:}, temperature={4:}".format(args.number, args.model, args.stop, args.package, args.temperature))
rbp.initialize_library(args.debug, args.kill)
# define structures
structures = []
constraint = ''
start_sequence = ''
if (args.input):
data = ''
for line in sys.stdin:
data = data + '\n' + line
(structures, constraint, start_sequence) = read_input(data)
elif (args.file is not None):
if args.debug:
print("# Input File: {0:}".format(args.file))
(structures, constraint, start_sequence) = read_inp_file(args.file)
else:
structures = ['((((....))))....((((....))))........',
'........((((....((((....))))....))))',
'((((((((....))))((((....))))....))))']
constraint = 'NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN'
# try to construct dependency graph, catch errors and timeouts
construction_time = 0.0
sample_time = 0.0
# remove lonely pairs
structures = [RNAStructure(s).removeLonelyPairs() for s in structures]
# general DG values
if args.debug:
print("# " + "\n# ".join(structures) + "\n# " + constraint)
design = get_Design(structures, start_sequence, args.package, args.temperature)
# print header for csv file
if (args.csv):
print(";".join(["stop",
"model",
"score",
"num_mutations",
"construction_time",
"sample_time",
design.write_csv_header()]))
# read target energies
target_energies = {}
if args.energies:
for i, w in enumerate(args.energies.split(',')):
target_energies[i] = -1*float(w)
else:
exit(1)
if args.debug:
print("# Turner Target Energies are: ", target_energies)
# get energy offsets
slope, intercept = getEnergyOffsets(structures, args)
# correct target energies with offsets
for t in range(0, len(structures)):
target_energies[t] = (target_energies[t] - intercept[t]) / slope[t]
if args.debug:
print("# Simple Target Energies are: ", target_energies)
nstr = len(structures)
wastefactor = 20
sampler = RPSampler(structures, model=args.model, weights=([1.0] * nstr), gcweight=1.0, temperature=args.temperature, stacksize=(wastefactor*args.number))
AdmissibleSample = Sample(sampler, nstr, target_energies, target_GC=0.5, number=args.number, args=args)
for a in AdmissibleSample:
design = get_Design(structures, a['seq'], args.package, args.temperature)
#out = '$;'
#for i in range(0, design.number_of_structures):
# out = ';'.join([out, str(a['energies'][i]), str(intercept[i]+slope[i]*a['energies'][i]), str(design.eos[str(i)])])
#print(out)
#print('$ simple model: ', a['energies'], ' viennaRNA: ', design.eos)
objective = calculate_objective
if (args.objective == 2):
objective = squared_objective
# if args.stop is not 0, we want to optimize unpaired positions in the structure
if (args.stop):
score, num, sample_time = local_optimization(design, objective, args)
else:
score = objective(design)
num = 0
# output sequence
if (args.csv):
print(args.stop,
"\"" + args.model + "\"",
score,
num, # number of sequences sampled
construction_time,
sample_time, # sample time until now
design.write_csv(), sep=";")
else:
print(design.write_out(score))
def getEnergyOffsets(structures, args):
sampler = RPSampler(structures, model=args.model, temperature=args.temperature, stacksize=1000, StopConstruct=True, debug=args.debug)
# get new sequecne
newsample, energies = sampler.dump_new_stack()
nstr = len(structures)
simple = np.zeros( (len(newsample), nstr) )
turner = np.zeros( (len(newsample), nstr) )
# iterate over sample
for i, s in enumerate(newsample):
# get design object
design = get_Design(structures, s, args.package, args.temperature)
# iterate over structures
for t in range(0, nstr):
# calculate offset between turner eos and simple model eos
#print(structures[t], s, design.eos[str(t)], energies[i][t])
simple[i,t] = energies[i][t]
turner[i,t] = design.eos[str(t)]
#print(i, t, energies[i][t], design.eos[str(t)], structures[t], s)
#turner = np.where(turner > 1000, np.nan, turner)
# get linear regression
slope = {}
intercept = {}
r_value = {}
for t in range(0, nstr):
#varx=simple[:,t] # > varx[mask]
#vary=turner[:,t] # > vary[mask]
#mask = ~np.isnan(varx) & ~np.isnan(vary)
slope[t], intercept[t], r_value[t], p_value, std_err = stats.linregress(simple[:,t],turner[:,t])
if args.debug:
plotRegression(simple, turner, slope, intercept, r_value)
print('# Slopes, intercepts and r-value are: ', slope, intercept, r_value)
return slope, intercept
def plotRegression(simple, turner, slope, intercept, r_value):
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import matplotlib.cm as cm
# make histogram plot
params = {'legend.fontsize': 'x-large',
'figure.figsize': (7, 5),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
#fig, axes = plt.subplots(3, 1, sharex=False, sharey=False)
fig = plt.figure()
plt.xlabel("Energy of Structure in Simple Model [kcal/mol]")
plt.ylabel("Energy of Structure in Turner Model [kcal/mol]")
fig.legend().set_visible(False)
color_i = 0.0
for t in slope.keys():
plt.plot(simple[:,t], turner[:,t], '.', color=cm.viridis(color_i), label=str(t), alpha=0.5)
plt.plot(simple[:,t], slope[t] * simple[:,t]+ intercept[t], '-', color=cm.viridis(color_i))
fig.text(0.16, 0.81-color_i/7, "$R^{2}$ = "+"{0:.3f}".format(r_value[t]), color=cm.viridis(color_i))
# increment color
color_i += 0.3
#plt.legend(loc='upper left')
fig.savefig('regression.svg', dpi=300)
def Sample(sampler, nstr, target_energies, target_GC, args, target_energy_eps = 0.1, target_GC_eps=0.10, maxiterations=15, number=1000):
# weights = [math.exp(1/((args.temperature + 273.15)*0.00198717))] * nstr
AdmissibleSample = []
# count iterations
count = 0
while count < maxiterations:
count += 1
# get new sequences
if args.debug:
print("# Weights: ", sampler.weights)
print('# GC weight: ', sampler.gcweight)
newsample, energies = sampler.dump_new_stack()
# get average structue energies for newsample
eos = np.zeros( (len(newsample), nstr) )
GC_freq = []
for i, s in enumerate(newsample):
# count GC content
c = Counter(s)
sigma = 2.0 # one per nucleotide, laplace
GC = (c['G'] + c['C'] + sigma) / (len(s) + 2*sigma)
GC_freq.append(GC)
# add if it is eps-admissible
admissible = True
if not (1-target_GC_eps <= GC/target_GC <= 1+target_GC_eps):
admissible = False
for t in range(0, nstr):
# add to eos np array in any case
eos[i,t] = energies[i][t]
# check if eps admissible
#print(eos[i,t], eos[i,t]/target_energies[t], target_energies[t])
if not (1-target_energy_eps <= eos[i,t]/target_energies[t] <= 1+target_energy_eps):
admissible = False
if admissible:
#print('# is admissible:', eos[i,:], GC)
AdmissibleSample.append({'seq': s, 'energies': energies[i]})
# update weights
for t in range(0, nstr):
e_mean = np.mean(eos[:,t])
if args.debug:
print('# Energy mean: ', str(t), e_mean)
# exp version
sampler.weights[t] = sampler.weights[t] * (1.1**(e_mean-target_energies[t]))
# Yann old version without positive e_mean
#weights[t] = weights[t]*target_energies[t]/e_mean
# update gcweight
GC_mean = np.mean(GC_freq)
if args.debug:
print('# GC mean: ', GC_mean)
sampler.gcweight = sampler.gcweight * target_GC/GC_mean
# return if large enough
if args.debug:
print('# Found for current Target: ', len(AdmissibleSample)/float(number), '%')
if len(AdmissibleSample) >= number:
break
return AdmissibleSample
def local_optimization(design, objective, args):
# in case we want to optimize unpaired positions we can call this here
dg = rbp.DependencyGraphMT(design.structures)
start = time.clock()
(score, number_of_mutations) = adaptive_walk_fixed(dg, design, objective_function=objective, number=args.stop, mode='sample_clocal', progress=args.progress)
sample_time = time.clock() - start
return score, number_of_mutations, sample_time
def squared_objective(design, weight=0.5):
return calculate_objective_1(design) + weight * calculate_objective_2_squared(design)
if __name__ == "__main__":
main()
| 43.501832
| 190
| 0.60997
|
1f20f4d06db3ddc8eedf758807068421dff2ebb2
| 27,533
|
py
|
Python
|
grr/server/grr_response_server/gui/api_plugins/client.py
|
JiYonG-Lee/grr
|
57fef67080ac6b8fd3de3ba0adfca064d34b7689
|
[
"Apache-2.0"
] | 1
|
2020-06-25T14:25:51.000Z
|
2020-06-25T14:25:51.000Z
|
grr/server/grr_response_server/gui/api_plugins/client.py
|
JiYonG-Lee/grr
|
57fef67080ac6b8fd3de3ba0adfca064d34b7689
|
[
"Apache-2.0"
] | 3
|
2021-05-11T20:18:38.000Z
|
2022-03-02T09:33:56.000Z
|
grr/server/grr_response_server/gui/api_plugins/client.py
|
JiYonG-Lee/grr
|
57fef67080ac6b8fd3de3ba0adfca064d34b7689
|
[
"Apache-2.0"
] | 1
|
2020-06-25T14:25:54.000Z
|
2020-06-25T14:25:54.000Z
|
#!/usr/bin/env python
# Lint as: python3
"""API handlers for accessing and searching clients and managing labels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import ipaddress
import re
from urllib import parse as urlparse
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import cloud as rdf_cloud
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import collection
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import precondition
from grr_response_proto.api import client_pb2
from grr_response_server import action_registry
from grr_response_server import client_index
from grr_response_server import data_store
from grr_response_server import fleetspeak_connector
from grr_response_server import fleetspeak_utils
from grr_response_server import flow
from grr_response_server import ip_resolver
from grr_response_server import timeseries
from grr_response_server.databases import db
from grr_response_server.flows.general import discovery
from grr_response_server.gui import api_call_handler_base
from grr_response_server.gui import api_call_handler_utils
from grr_response_server.gui.api_plugins import stats as api_stats
from grr_response_server.rdfvalues import objects as rdf_objects
from fleetspeak.src.server.proto.fleetspeak_server import admin_pb2
def UpdateClientsFromFleetspeak(clients):
"""Updates ApiClient records to include info from Fleetspeak."""
if not fleetspeak_connector.CONN or not fleetspeak_connector.CONN.outgoing:
# FS not configured, or an outgoing connection is otherwise unavailable.
return
id_map = {}
for client in clients:
if client.fleetspeak_enabled:
id_map[fleetspeak_utils.GRRIDToFleetspeakID(client.client_id)] = client
if not id_map:
return
res = fleetspeak_connector.CONN.outgoing.ListClients(
admin_pb2.ListClientsRequest(client_ids=list(id_map.keys())))
for read in res.clients:
api_client = id_map[read.client_id]
api_client.last_seen_at = fleetspeak_utils.TSToRDFDatetime(
read.last_contact_time)
api_client.last_clock = fleetspeak_utils.TSToRDFDatetime(read.last_clock)
class InterrogateOperationNotFoundError(
api_call_handler_base.ResourceNotFoundError):
"""Raised when an interrogate operation could not be found."""
class ApiClientId(rdfvalue.RDFString):
"""Class encapsulating client ids."""
CLIENT_ID_RE = re.compile(r"^C\.[0-9a-fA-F]{16}$")
def __init__(self, initializer=None):
if isinstance(initializer, rdf_client.ClientURN):
initializer = initializer.Basename()
super().__init__(initializer=initializer)
# TODO(user): move this to a separate validation method when
# common RDFValues validation approach is implemented.
if self._value:
re_match = self.CLIENT_ID_RE.match(self._value)
if not re_match:
raise ValueError("Invalid client id: %s" % str(self._value))
def ToString(self):
if not self._value:
raise ValueError("Can't call ToString() on an empty client id.")
return self._value
class ApiClient(rdf_structs.RDFProtoStruct):
"""API client object."""
protobuf = client_pb2.ApiClient
rdf_deps = [
rdf_objects.ClientLabel,
ApiClientId,
rdfvalue.ByteSize,
rdf_client.ClientInformation,
rdf_client.ClientURN,
rdf_cloud.CloudInstance,
rdf_client.HardwareInfo,
rdf_client_network.Interface,
rdf_client.KnowledgeBase,
rdfvalue.RDFDatetime,
rdf_client.Uname,
rdf_client.User,
rdf_client_fs.Volume,
]
def InitFromClientObject(self, client_obj):
# TODO(amoser): Deprecate all urns.
self.urn = client_obj.client_id
self.client_id = client_obj.client_id
self.agent_info = client_obj.startup_info.client_info
self.hardware_info = client_obj.hardware_info
os_info = rdf_client.Uname()
if client_obj.os_version:
os_info.version = client_obj.os_version
if client_obj.os_release:
os_info.release = client_obj.os_release
if client_obj.kernel:
os_info.kernel = client_obj.kernel
if client_obj.arch:
os_info.machine = client_obj.arch
if client_obj.install_time:
os_info.install_date = client_obj.install_time
kb = client_obj.knowledge_base
if kb:
self.knowledge_base = kb
if kb.os:
os_info.system = kb.os
if kb.fqdn:
os_info.fqdn = kb.fqdn
# TODO(amoser): Deprecate this field in favor of the kb.
if kb.users:
self.users = sorted(kb.users, key=lambda user: user.username)
self.os_info = os_info
if client_obj.interfaces:
self.interfaces = client_obj.interfaces
if client_obj.volumes:
self.volumes = client_obj.volumes
if client_obj.cloud_instance:
self.cloud_instance = client_obj.cloud_instance
self.age = client_obj.timestamp
if client_obj.memory_size:
self.memory_size = client_obj.memory_size
if client_obj.startup_info.boot_time:
self.last_booted_at = client_obj.startup_info.boot_time
return self
def InitFromClientInfo(self, client_info):
self.InitFromClientObject(client_info.last_snapshot)
# If we have it, use the boot_time / agent info from the startup
# info which might be more recent than the interrogation
# results. At some point we should have a dedicated API for
# startup information instead of packing it into the API client
# object.
if client_info.last_startup_info.boot_time:
self.last_booted_at = client_info.last_startup_info.boot_time
if client_info.last_startup_info.client_info:
self.agent_info = client_info.last_startup_info.client_info
md = client_info.metadata
if md:
if md.first_seen:
self.first_seen_at = md.first_seen
if md.ping:
self.last_seen_at = md.ping
if md.clock:
self.last_clock = md.clock
if md.last_crash_timestamp:
self.last_crash_at = md.last_crash_timestamp
self.fleetspeak_enabled = md.fleetspeak_enabled
self.labels = client_info.labels
return self
def ObjectReference(self):
return rdf_objects.ObjectReference(
reference_type=rdf_objects.ObjectReference.Type.CLIENT,
client=rdf_objects.ClientReference(client_id=str(self.client_id)))
class ApiClientActionRequest(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiClientActionRequest
rdf_deps = [
rdf_flows.GrrMessage,
rdfvalue.RDFDatetime,
rdfvalue.RDFURN,
]
class ApiSearchClientsArgs(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiSearchClientsArgs
class ApiSearchClientsResult(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiSearchClientsResult
rdf_deps = [
ApiClient,
]
class ApiSearchClientsHandler(api_call_handler_base.ApiCallHandler):
"""Renders results of a client search."""
args_type = ApiSearchClientsArgs
result_type = ApiSearchClientsResult
def Handle(self, args, token=None):
end = args.count or db.MAX_COUNT
keywords = compatibility.ShlexSplit(args.query)
api_clients = []
index = client_index.ClientIndex()
# LookupClients returns a sorted list of client ids.
clients = index.LookupClients(keywords)[args.offset:args.offset + end]
client_infos = data_store.REL_DB.MultiReadClientFullInfo(clients)
for client_info in client_infos.values():
api_clients.append(ApiClient().InitFromClientInfo(client_info))
UpdateClientsFromFleetspeak(api_clients)
return ApiSearchClientsResult(items=api_clients)
class ApiLabelsRestrictedSearchClientsHandler(
api_call_handler_base.ApiCallHandler):
"""Renders results of a client search."""
args_type = ApiSearchClientsArgs
result_type = ApiSearchClientsResult
def __init__(self, labels_whitelist=None, labels_owners_whitelist=None):
super().__init__()
self.labels_whitelist = set(labels_whitelist or [])
self.labels_owners_whitelist = set(labels_owners_whitelist or [])
def _CheckClientLabels(self, client_obj, token=None):
for label in client_obj.GetLabels():
if (label.name in self.labels_whitelist and
label.owner in self.labels_owners_whitelist):
return True
return False
def _VerifyLabels(self, labels):
for label in labels:
if (label.name in self.labels_whitelist and
label.owner in self.labels_owners_whitelist):
return True
return False
def Handle(self, args, token=None):
if args.count:
end = args.offset + args.count
# Read <count> clients ahead in case some of them fail to open / verify.
batch_size = end + args.count
else:
end = db.MAX_COUNT
batch_size = end
keywords = compatibility.ShlexSplit(args.query)
api_clients = []
index = client_index.ClientIndex()
# TODO(amoser): We could move the label verification into the
# database making this method more efficient. Label restrictions
# should be on small subsets though so this might not be worth
# it.
all_client_ids = set()
for label in self.labels_whitelist:
label_filter = ["label:" + label] + keywords
all_client_ids.update(index.LookupClients(label_filter))
index = 0
for cid_batch in collection.Batch(sorted(all_client_ids), batch_size):
client_infos = data_store.REL_DB.MultiReadClientFullInfo(cid_batch)
for _, client_info in sorted(client_infos.items()):
if not self._VerifyLabels(client_info.labels):
continue
if index >= args.offset and index < end:
api_clients.append(ApiClient().InitFromClientInfo(client_info))
index += 1
if index >= end:
UpdateClientsFromFleetspeak(api_clients)
return ApiSearchClientsResult(items=api_clients)
UpdateClientsFromFleetspeak(api_clients)
return ApiSearchClientsResult(items=api_clients)
class ApiVerifyAccessArgs(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiVerifyAccessArgs
rdf_deps = [
ApiClientId,
]
class ApiVerifyAccessResult(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiVerifyAccessResult
rdf_deps = []
class ApiVerifyAccessHandler(api_call_handler_base.ApiCallHandler):
"""Dummy handler that renders empty message."""
args_type = ApiVerifyAccessArgs
result_type = ApiVerifyAccessResult
def Handle(self, args, token=None):
return ApiVerifyAccessResult()
class ApiGetClientArgs(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiGetClientArgs
rdf_deps = [
ApiClientId,
rdfvalue.RDFDatetime,
]
class ApiGetClientHandler(api_call_handler_base.ApiCallHandler):
"""Renders summary of a given client."""
args_type = ApiGetClientArgs
result_type = ApiClient
def Handle(self, args, token=None):
client_id = str(args.client_id)
info = data_store.REL_DB.ReadClientFullInfo(client_id)
if info is None:
raise api_call_handler_base.ResourceNotFoundError()
if args.timestamp:
# Assume that a snapshot for this particular timestamp exists.
snapshots = data_store.REL_DB.ReadClientSnapshotHistory(
client_id, timerange=(args.timestamp, args.timestamp))
if snapshots:
info.last_snapshot = snapshots[0]
info.last_startup_info = snapshots[0].startup_info
api_client = ApiClient().InitFromClientInfo(info)
UpdateClientsFromFleetspeak([api_client])
return api_client
class ApiGetClientVersionsArgs(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiGetClientVersionsArgs
rdf_deps = [
ApiClientId,
rdfvalue.RDFDatetime,
]
class ApiGetClientVersionsResult(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiGetClientVersionsResult
rdf_deps = [
ApiClient,
]
class ApiGetClientVersionsHandler(api_call_handler_base.ApiCallHandler):
"""Retrieves a multiple versions of a given client."""
args_type = ApiGetClientVersionsArgs
result_type = ApiGetClientVersionsResult
def Handle(self, args, token=None):
end_time = args.end or rdfvalue.RDFDatetime.Now()
start_time = args.start or end_time - rdfvalue.Duration.From(
3, rdfvalue.MINUTES)
items = []
client_id = str(args.client_id)
history = data_store.REL_DB.ReadClientSnapshotHistory(
client_id, timerange=(start_time, end_time))
labels = data_store.REL_DB.ReadClientLabels(client_id)
for client in history[::-1]:
c = ApiClient().InitFromClientObject(client)
c.labels = labels
items.append(c)
return ApiGetClientVersionsResult(items=items)
class ApiGetClientVersionTimesArgs(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiGetClientVersionTimesArgs
rdf_deps = [
ApiClientId,
]
class ApiGetClientVersionTimesResult(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiGetClientVersionTimesResult
rdf_deps = [
rdfvalue.RDFDatetime,
]
class ApiGetClientVersionTimesHandler(api_call_handler_base.ApiCallHandler):
"""Retrieves a list of versions for the given client."""
args_type = ApiGetClientVersionTimesArgs
result_type = ApiGetClientVersionTimesResult
def Handle(self, args, token=None):
# TODO(amoser): Again, this is rather inefficient,if we moved
# this call to the datastore we could make it much
# faster. However, there is a chance that this will not be
# needed anymore once we use the relational db everywhere, let's
# decide later.
client_id = str(args.client_id)
history = data_store.REL_DB.ReadClientSnapshotHistory(client_id)
times = [h.timestamp for h in history]
return ApiGetClientVersionTimesResult(times=times)
class ApiInterrogateClientArgs(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiInterrogateClientArgs
rdf_deps = [
ApiClientId,
]
class ApiInterrogateClientResult(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiInterrogateClientResult
class ApiInterrogateClientHandler(api_call_handler_base.ApiCallHandler):
"""Interrogates the given client."""
args_type = ApiInterrogateClientArgs
result_type = ApiInterrogateClientResult
def Handle(self, args, token=None):
flow_id = flow.StartFlow(
flow_cls=discovery.Interrogate,
client_id=str(args.client_id),
creator=token.username)
# TODO(user): don't encode client_id inside the operation_id, but
# rather have it as a separate field.
return ApiInterrogateClientResult(operation_id=flow_id)
class ApiGetInterrogateOperationStateArgs(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiGetInterrogateOperationStateArgs
rdf_deps = [
ApiClientId,
]
class ApiGetInterrogateOperationStateResult(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiGetInterrogateOperationStateResult
class ApiGetInterrogateOperationStateHandler(
api_call_handler_base.ApiCallHandler):
"""Retrieves the state of the interrogate operation."""
args_type = ApiGetInterrogateOperationStateArgs
result_type = ApiGetInterrogateOperationStateResult
def Handle(self, args, token=None):
client_id = str(args.client_id)
flow_id = str(args.operation_id)
precondition.ValidateClientId(client_id)
precondition.ValidateFlowId(flow_id)
# TODO(user): test both exception scenarios below.
try:
flow_obj = data_store.REL_DB.ReadFlowObject(client_id, flow_id)
except db.UnknownFlowError:
raise InterrogateOperationNotFoundError("Operation with id %s not found" %
args.operation_id)
expected_flow_name = compatibility.GetName(discovery.Interrogate)
if flow_obj.flow_class_name != expected_flow_name:
raise InterrogateOperationNotFoundError("Operation with id %s not found" %
args.operation_id)
complete = flow_obj.flow_state != flow_obj.FlowState.RUNNING
result = ApiGetInterrogateOperationStateResult()
if complete:
result.state = ApiGetInterrogateOperationStateResult.State.FINISHED
else:
result.state = ApiGetInterrogateOperationStateResult.State.RUNNING
return result
class ApiGetLastClientIPAddressArgs(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiGetLastClientIPAddressArgs
rdf_deps = [
ApiClientId,
]
class ApiGetLastClientIPAddressResult(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiGetLastClientIPAddressResult
def _GetAddrFromFleetspeak(client_id):
res = fleetspeak_connector.CONN.outgoing.ListClients(
admin_pb2.ListClientsRequest(
client_ids=[fleetspeak_utils.GRRIDToFleetspeakID(client_id)]))
if not res.clients or not res.clients[0].last_contact_address:
return "", None
# last_contact_address typically includes a port
parsed = urlparse.urlparse("//{}".format(res.clients[0].last_contact_address))
ip_str = parsed.hostname
return ip_str, ipaddress.ip_address(ip_str)
class ApiGetLastClientIPAddressHandler(api_call_handler_base.ApiCallHandler):
"""Retrieves the last ip a client used for communication with the server."""
args_type = ApiGetLastClientIPAddressArgs
result_type = ApiGetLastClientIPAddressResult
def Handle(self, args, token=None):
client_id = str(args.client_id)
md = data_store.REL_DB.ReadClientMetadata(client_id)
if md.fleetspeak_enabled:
ip_str, ipaddr_obj = _GetAddrFromFleetspeak(client_id)
else:
try:
ipaddr_obj = md.ip.AsIPAddr()
ip_str = str(ipaddr_obj)
except ValueError:
ipaddr_obj = None
ip_str = ""
status, info = ip_resolver.IP_RESOLVER.RetrieveIPInfo(ipaddr_obj)
return ApiGetLastClientIPAddressResult(ip=ip_str, info=info, status=status)
class ApiListClientCrashesArgs(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiListClientCrashesArgs
rdf_deps = [
ApiClientId,
]
class ApiListClientCrashesResult(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiListClientCrashesResult
rdf_deps = [
rdf_client.ClientCrash,
]
class ApiListClientCrashesHandler(api_call_handler_base.ApiCallHandler):
"""Returns a list of crashes for the given client."""
args_type = ApiListClientCrashesArgs
result_type = ApiListClientCrashesResult
def Handle(self, args, token=None):
crashes = data_store.REL_DB.ReadClientCrashInfoHistory(str(args.client_id))
total_count = len(crashes)
result = api_call_handler_utils.FilterList(
crashes, args.offset, count=args.count, filter_value=args.filter)
return ApiListClientCrashesResult(items=result, total_count=total_count)
class ApiAddClientsLabelsArgs(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiAddClientsLabelsArgs
rdf_deps = [
ApiClientId,
]
class ApiAddClientsLabelsHandler(api_call_handler_base.ApiCallHandler):
"""Adds labels to a given client."""
args_type = ApiAddClientsLabelsArgs
def Handle(self, args, token=None):
for api_client_id in args.client_ids:
cid = str(api_client_id)
data_store.REL_DB.AddClientLabels(cid, token.username, args.labels)
idx = client_index.ClientIndex()
idx.AddClientLabels(cid, args.labels)
class ApiRemoveClientsLabelsArgs(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiRemoveClientsLabelsArgs
rdf_deps = [
ApiClientId,
]
class ApiRemoveClientsLabelsHandler(api_call_handler_base.ApiCallHandler):
"""Remove labels from a given client."""
args_type = ApiRemoveClientsLabelsArgs
def RemoveClientLabels(self, client, labels_names):
"""Removes labels with given names from a given client object."""
affected_owners = set()
for label in client.GetLabels():
if label.name in labels_names and label.owner != "GRR":
affected_owners.add(label.owner)
for owner in affected_owners:
client.RemoveLabels(labels_names, owner=owner)
def Handle(self, args, token=None):
for client_id in args.client_ids:
cid = str(client_id)
data_store.REL_DB.RemoveClientLabels(cid, token.username, args.labels)
labels_to_remove = set(args.labels)
existing_labels = data_store.REL_DB.ReadClientLabels(cid)
for label in existing_labels:
labels_to_remove.discard(label.name)
if labels_to_remove:
idx = client_index.ClientIndex()
idx.RemoveClientLabels(cid, labels_to_remove)
class ApiListClientsLabelsResult(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiListClientsLabelsResult
rdf_deps = [
rdf_objects.ClientLabel,
]
class ApiListClientsLabelsHandler(api_call_handler_base.ApiCallHandler):
"""Lists all the available clients labels."""
result_type = ApiListClientsLabelsResult
def Handle(self, args, token=None):
labels = data_store.REL_DB.ReadAllClientLabels()
label_objects = []
for name in set(l.name for l in labels):
label_objects.append(rdf_objects.ClientLabel(name=name))
return ApiListClientsLabelsResult(
items=sorted(label_objects, key=lambda l: l.name))
class ApiListKbFieldsResult(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiListKbFieldsResult
class ApiListKbFieldsHandler(api_call_handler_base.ApiCallHandler):
"""Lists all the available clients knowledge base fields."""
result_type = ApiListKbFieldsResult
def Handle(self, args, token=None):
fields = rdf_client.KnowledgeBase().GetKbFieldNames()
return ApiListKbFieldsResult(items=sorted(fields))
class ApiListClientActionRequestsArgs(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiListClientActionRequestsArgs
rdf_deps = [
ApiClientId,
]
class ApiListClientActionRequestsResult(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiListClientActionRequestsResult
rdf_deps = [
ApiClientActionRequest,
]
class ApiListClientActionRequestsHandler(api_call_handler_base.ApiCallHandler):
"""Lists pending client action requests."""
args_type = ApiListClientActionRequestsArgs
result_type = ApiListClientActionRequestsResult
REQUESTS_NUM_LIMIT = 1000
def Handle(self, args, token=None):
result = ApiListClientActionRequestsResult()
request_cache = {}
for r in data_store.REL_DB.ReadAllClientActionRequests(str(args.client_id)):
stub = action_registry.ACTION_STUB_BY_ID[r.action_identifier]
client_action = compatibility.GetName(stub)
request = ApiClientActionRequest(
leased_until=r.leased_until,
session_id="%s/%s" % (r.client_id, r.flow_id),
client_action=client_action)
result.items.append(request)
if not args.fetch_responses:
continue
if r.flow_id not in request_cache:
req_res = data_store.REL_DB.ReadAllFlowRequestsAndResponses(
str(args.client_id), r.flow_id)
request_cache[r.flow_id] = req_res
for req, responses in request_cache[r.flow_id]:
if req.request_id == r.request_id:
res = []
for resp_id in sorted(responses):
m = responses[resp_id].AsLegacyGrrMessage()
res.append(m)
request.responses = res
return result
class ApiGetClientLoadStatsArgs(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiGetClientLoadStatsArgs
rdf_deps = [
ApiClientId,
rdfvalue.RDFDatetime,
]
class ApiGetClientLoadStatsResult(rdf_structs.RDFProtoStruct):
protobuf = client_pb2.ApiGetClientLoadStatsResult
rdf_deps = [
api_stats.ApiStatsStoreMetricDataPoint,
]
class ApiGetClientLoadStatsHandler(api_call_handler_base.ApiCallHandler):
"""Returns client load stats data."""
args_type = ApiGetClientLoadStatsArgs
result_type = ApiGetClientLoadStatsResult
# pyformat: disable
GAUGE_METRICS = [
ApiGetClientLoadStatsArgs.Metric.CPU_PERCENT,
ApiGetClientLoadStatsArgs.Metric.MEMORY_PERCENT,
ApiGetClientLoadStatsArgs.Metric.MEMORY_RSS_SIZE,
ApiGetClientLoadStatsArgs.Metric.MEMORY_VMS_SIZE
]
# pyformat: enable
MAX_SAMPLES = 100
def Handle(self, args, token=None):
start_time = args.start
end_time = args.end
if not end_time:
end_time = rdfvalue.RDFDatetime.Now()
if not start_time:
start_time = end_time - rdfvalue.Duration.From(30, rdfvalue.MINUTES)
stat_values = data_store.REL_DB.ReadClientStats(
client_id=str(args.client_id),
min_timestamp=start_time,
max_timestamp=end_time)
points = []
for stat_value in reversed(stat_values):
if args.metric == args.Metric.CPU_PERCENT:
points.extend(
(s.cpu_percent, s.timestamp) for s in stat_value.cpu_samples)
elif args.metric == args.Metric.CPU_SYSTEM:
points.extend(
(s.system_cpu_time, s.timestamp) for s in stat_value.cpu_samples)
elif args.metric == args.Metric.CPU_USER:
points.extend(
(s.user_cpu_time, s.timestamp) for s in stat_value.cpu_samples)
elif args.metric == args.Metric.IO_READ_BYTES:
points.extend(
(s.read_bytes, s.timestamp) for s in stat_value.io_samples)
elif args.metric == args.Metric.IO_WRITE_BYTES:
points.extend(
(s.write_bytes, s.timestamp) for s in stat_value.io_samples)
elif args.metric == args.Metric.IO_READ_OPS:
points.extend(
(s.read_count, s.timestamp) for s in stat_value.io_samples)
elif args.metric == args.Metric.IO_WRITE_OPS:
points.extend(
(s.write_count, s.timestamp) for s in stat_value.io_samples)
elif args.metric == args.Metric.NETWORK_BYTES_RECEIVED:
points.append((stat_value.bytes_received, stat_value.timestamp))
elif args.metric == args.Metric.NETWORK_BYTES_SENT:
points.append((stat_value.bytes_sent, stat_value.timestamp))
elif args.metric == args.Metric.MEMORY_PERCENT:
points.append((stat_value.memory_percent, stat_value.timestamp))
elif args.metric == args.Metric.MEMORY_RSS_SIZE:
points.append((stat_value.RSS_size, stat_value.timestamp))
elif args.metric == args.Metric.MEMORY_VMS_SIZE:
points.append((stat_value.VMS_size, stat_value.timestamp))
else:
raise ValueError("Unknown metric.")
# Points collected from "cpu_samples" and "io_samples" may not be correctly
# sorted in some cases (as overlaps between different stat_values are
# possible).
points.sort(key=lambda x: x[1])
ts = timeseries.Timeseries()
ts.MultiAppend(points)
if args.metric not in self.GAUGE_METRICS:
ts.MakeIncreasing()
if len(stat_values) > self.MAX_SAMPLES:
sampling_interval = rdfvalue.Duration.From(
((end_time - start_time).ToInt(rdfvalue.SECONDS) // self.MAX_SAMPLES)
or 1, rdfvalue.SECONDS)
if args.metric in self.GAUGE_METRICS:
mode = timeseries.NORMALIZE_MODE_GAUGE
else:
mode = timeseries.NORMALIZE_MODE_COUNTER
ts.Normalize(sampling_interval, start_time, end_time, mode=mode)
result = ApiGetClientLoadStatsResult()
for value, timestamp in ts.data:
dp = api_stats.ApiStatsStoreMetricDataPoint(
timestamp=timestamp, value=float(value))
result.data_points.append(dp)
return result
| 32.202339
| 80
| 0.743835
|
7438d0cf411d6ecea8a20bfbb737b01ed5398f7b
| 11,286
|
py
|
Python
|
microbenthos/utils/loader.py
|
achennu/microbenthos
|
c83fb60d05b5614546466601e973721640e685d4
|
[
"MIT"
] | 3
|
2018-04-23T13:51:36.000Z
|
2021-08-05T01:53:51.000Z
|
microbenthos/utils/loader.py
|
achennu/microbenthos
|
c83fb60d05b5614546466601e973721640e685d4
|
[
"MIT"
] | 5
|
2018-04-09T20:08:40.000Z
|
2018-05-04T23:08:50.000Z
|
microbenthos/utils/loader.py
|
achennu/microbenthos
|
c83fb60d05b5614546466601e973721640e685d4
|
[
"MIT"
] | 2
|
2018-04-09T20:11:50.000Z
|
2021-08-06T03:34:06.000Z
|
import logging
from collections.abc import Mapping
import cerberus
import pkg_resources
from fipy import PhysicalField
from sympy import Symbol, SympifyError, sympify
from .yaml_setup import yaml
# TODO: Allow equation with no diffusion term
physical_unit_type = cerberus.TypeDefinition('physical_unit', (PhysicalField,), ())
class MicroBenthosSchemaValidator(cerberus.Validator):
"""
A :mod:`cereberus` validator for schema.yml in MicroBenthos
"""
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
logger.propagate = False
types_mapping = cerberus.Validator.types_mapping.copy()
types_mapping['physical_unit'] = physical_unit_type
# def __init__(self, *args, **kwargs):
# # self.logger.propagate = False
# super(MicroBenthosSchemaValidator, self).__init__(*args, **kwargs)
def _check_with_importpath(self, field, value):
"""
Validates if the value is a usable import path for an entity class
Valid examples are:
* pkg1.pkg2.mod1.class
* class_name
Invalid examples:
* .class_name
Args:
value: A string
Returns:
True if valid
"""
self.logger.debug('Validating importpath: {}'.format(value))
parts = [s.isidentifier() for s in value.split('.')]
if not all(parts):
self._error(field, "Must be a python import path")
# def _validate_type_physical_unit(self, value):
# """ Enables validation for `unit` schema attribute.
# :param value: field value.
# """
# self.logger.debug('Validating physical_unit: {}'.format(value))
# if isinstance(value, PhysicalField):
# if value.unit.name() != '1':
# return True
def _check_with_unit_name(self, field, value):
"""
Checks that the string can be used as units
"""
self.logger.debug('Validating unit_name: {}'.format(value))
try:
PhysicalField(1, value)
except TypeError:
self._error(field, 'Must be str of physical units')
def _validate_like_unit(self, unit, field, value):
"""
Test that the given value has compatible units
Args:
unit: A string useful with :class:`PhysicalField`
field:
value: An instance of a physical unit
Returns:
boolean if validated
The rule's arguments are validated against this schema:
{'type': 'string'}
"""
self.logger.debug('Validating like_unit: {} {} {}'.format(unit, field, value))
if not isinstance(value, PhysicalField):
self._error(field, 'Must be a PhysicalField, not {}'.format(type(value)))
try:
value.inUnitsOf(unit)
except:
self._error(field, 'Must be compatible with units {}'.format(unit))
def _validate_like_unit(self, unit, field, value):
"""
Test that the given value has compatible units
Args:
unit: A string useful with :class:`PhysicalField`
field:
value: An instance of a physical unit
Returns:
boolean if validated
The rule's arguments are validated against this schema:
{'type': 'string'}
"""
self.logger.debug('Validating like_unit: {} {} {}'.format(unit, field, value))
if not isinstance(value, PhysicalField):
self._error(field, 'Must be a PhysicalField, not {}'.format(type(value)))
try:
value.inUnitsOf(unit)
except:
self._error(field, 'Must be compatible with units {}'.format(unit))
def _check_with_sympify(self, field, value):
self.logger.debug(f'Checking if {value} usable with sympify')
try:
e = sympify(value)
except SympifyError:
self._error(field, "Must be str compatible with sympify")
# def _validate_type_sympifyable(self, value):
# """
# A string that can be run through sympify
# """
# self.logger.debug('Validating sympifyable: {}'.format(value))
# if not isinstance(value, (str, int, float)):
# return False
# try:
# e = sympify(value)
# self.logger.debug('Sympified: {}'.format(e))
# return True
# except:
# return False
def _check_with_sympy_symbol(self, field, value):
"""
String that can be run through sympify and only has one
variable symbol in it.
"""
self.logger.debug(f'Check if {value} is a sympy symbol')
try:
e = sympify(value)
valid = isinstance(e, Symbol)
except SympifyError:
valid = False
if not valid:
self._error(field, "Must be a single symbol in sympy")
# def _validate_type_symbolable(self, value):
# """
# String that can be run through sympify and only has one variable symbol in it.
# """
# self.logger.debug('Validating symbolable: {}'.format(value))
# try:
# e = sympify(value)
# return isinstance(e, Symbol)
# except:
# return False
def _check_with_model_path(self, field, value):
"""
Validate that the value of the field is a model store path
Value should be of type:
* domain.oxy
* env.oxy.var
* microbes.cyano.processes.oxyPS
The rule's arguments are validated against this schema:
{'type': 'string'}
"""
if '.' not in value:
self._error(field, 'Model path should be a dotted string')
parts = value.split('.')
if not all([len(p) for p in parts]):
self._error(field, 'Model path must not have empty parts')
ALLOWED_ROOTS = ('env', 'domain', 'microbes')
if parts[0] not in ALLOWED_ROOTS:
self._error(field, f'Model path root should be in {ALLOWED_ROOTS}')
if parts[0] == 'microbes':
MICROBE_SUBPARTS = ('features', 'processes')
if len(parts) < 4:
self._error(field,
'Microbes model path needs atleast 4 path '
'parts')
if parts[2] not in MICROBE_SUBPARTS:
self._error(field,
'Microbes model path should be of type {}'.format(
MICROBE_SUBPARTS))
# def _validate_model_store(self, jnk, field, value):
# """
# Validate that the value of the field is a model store path
#
# Value should be of type:
# * domain.oxy
# * env.oxy.var
# * microbes.cyano.processes.oxyPS
#
# The rule's arguments are validated against this schema:
# {'type': 'string'}
# """
# self.logger.debug('Validating model_store={} for field {!r}: {!r}'.format(
# jnk, field, value
# ))
#
# if '.' not in value:
# self._error(field, 'Model store should be a dotted path, not {}'.format(value))
#
# parts = value.split('.')
#
# if not all([len(p) for p in parts]):
# self._error(field, 'Model store has empty path element: {}'.format(value))
#
# if parts[0] not in ('env', 'domain', 'microbes'):
# self._error(field, 'Model store root should be in (env, domain, microbes)')
#
# if parts[0] in ('domain', 'env'):
# pass
#
# elif parts[0] == 'microbes':
# mtargets = ('features', 'processes')
#
# if len(parts) < 4:
# self._error(field, 'Microbes model store needs atleast 4 path elements')
#
# if parts[2] not in mtargets:
# self._error(field, 'Microbes model store should be of type {}'.format(mtargets))
def _normalize_coerce_float(self, value):
return float(value)
def validate_yaml(stream, key = None, schema = None, schema_stream = None):
logger = logging.getLogger(__name__)
logger.info('Loading definition with yaml')
inp_dict = yaml.unsafe_load(stream)
if key:
inp_dict = inp_dict[key]
return validate_dict(inp_dict, key=key, schema=schema, schema_stream=schema_stream)
def validate_dict(inp_dict, key, schema = None, schema_stream = None):
logger = logging.getLogger(__name__)
logger.info('Loading definition from: {}'.format(inp_dict.keys()))
logger.debug('Using schema key {!r} from schema_stream={}'.format(key, schema_stream))
if schema is None:
schema = get_schema(schema_stream=schema_stream)
else:
if not isinstance(schema, Mapping):
raise TypeError('Supplied schema should be a mapping, not {!r}'.format(type(schema)))
if key:
schema = schema[key]
logger.debug('Schema with entries: {}'.format(schema.keys()))
validator = MicroBenthosSchemaValidator()
validated = validator.validated(inp_dict, schema)
if not validated:
logger.propagate = True
logger.error('Input definition not validated for schema {!r}!'.format(key))
from pprint import pformat
logger.warning(pformat(validator.errors))
for path, errmsg in _denest_errors(validator.errors, [], []):
logger.error('Error: {} :: {}'.format(path, errmsg))
raise ValueError('Definition of {!r} invalid!'.format(key))
else:
logger.info('{} definition successfully loaded: {}'.format(key, validated.keys()))
return validated
def _denest_errors(D, paths, all_items):
for k in D:
# print('descending into {}'.format(k))
v = D[k]
paths.append(k)
for item in v:
if isinstance(item, dict):
_denest_errors(item, paths, all_items)
if paths:
paths.pop(-1)
elif isinstance(item, str):
# full_path = '.'.join([str(_) for _ in paths])
# print(f'{full_path}: {item}')
all_items.append(('.'.join(str(_) for _ in paths), item))
if paths:
paths.pop(-1)
return all_items
def get_schema(schema_stream = None):
"""
Returns the inbuilt model schema
"""
# INBUILT = pkg_resources.resource_stream(__name__, 'schema.yml')
if schema_stream:
schema = yaml.unsafe_load(schema_stream)
else:
with pkg_resources.resource_stream(__name__, 'schema.yml') as INBUILT:
schema = yaml.unsafe_load(INBUILT)
return schema
def find_subclasses_recursive(baseclass, subclasses = None):
"""
Find subclasses recursively. `subclasses` should be a set into which to add the subclasses
"""
if subclasses is None:
subclasses = set()
if not isinstance(baseclass, type):
raise ValueError('Need a class, but received: {} of type {}'.format(
baseclass, type(baseclass)))
for sclass in baseclass.__subclasses__():
subclasses.add(sclass)
find_subclasses_recursive(sclass, subclasses)
return subclasses
| 32.618497
| 98
| 0.584707
|
8b5eae90bfc6ed839b7200cffb3c1eb3d075f8c6
| 9,329
|
py
|
Python
|
openaddr/util/__init__.py
|
andrewharvey/batch-machine
|
a5f6463ff7929b84c1131a3449e4796c3cdc692d
|
[
"0BSD"
] | 1
|
2021-09-02T04:13:10.000Z
|
2021-09-02T04:13:10.000Z
|
openaddr/util/__init__.py
|
andrewharvey/batch-machine
|
a5f6463ff7929b84c1131a3449e4796c3cdc692d
|
[
"0BSD"
] | 12
|
2020-10-05T17:22:34.000Z
|
2022-03-28T14:04:17.000Z
|
openaddr/util/__init__.py
|
andrewharvey/batch-machine
|
a5f6463ff7929b84c1131a3449e4796c3cdc692d
|
[
"0BSD"
] | 1
|
2021-04-19T10:53:10.000Z
|
2021-04-19T10:53:10.000Z
|
import logging; _L = logging.getLogger('openaddr.util')
from urllib.parse import urlparse, parse_qsl, urljoin
from datetime import datetime, timedelta, date
from os.path import join, basename, splitext, dirname, exists
from operator import attrgetter
from tempfile import mkstemp
from os import close, getpid
import glob
import collections
import ftplib
import httmock
import io
import zipfile
import time
import re
RESOURCE_LOG_INTERVAL = timedelta(seconds=30)
RESOURCE_LOG_FORMAT = 'Resource usage: {{ user: {user:.0f}%, system: {system:.0f}%, ' \
'memory: {memory:.0f}MB, read: {read:.0f}KB, written: {written:.0f}KB, ' \
'sent: {sent:.0f}KB, received: {received:.0f}KB, period: {period:.0f}sec, ' \
'procs: {procs:.0f} }}'
def get_version():
''' Prevent circular imports.
'''
from .. import __version__
return __version__
def prepare_db_kwargs(dsn):
'''
'''
p = urlparse(dsn)
q = dict(parse_qsl(p.query))
assert p.scheme == 'postgres'
kwargs = dict(user=p.username, password=p.password, host=p.hostname, port=p.port)
kwargs.update(dict(database=p.path.lstrip('/')))
if 'sslmode' in q:
kwargs.update(dict(sslmode=q['sslmode']))
return kwargs
def package_output(source, processed_path, website, license):
''' Write a zip archive to temp dir with processed data and optional .vrt.
'''
_, ext = splitext(processed_path)
handle, zip_path = mkstemp(prefix='util-package_output-', suffix='.zip')
close(handle)
zip_file = zipfile.ZipFile(zip_path, mode='w', compression=zipfile.ZIP_DEFLATED)
template = join(dirname(__file__), 'templates', 'README.txt')
with io.open(template, encoding='utf8') as file:
content = file.read().format(website=website, license=license, date=date.today())
zip_file.writestr('README.txt', content.encode('utf8'))
if ext == '.csv':
# Add virtual format to make CSV readable by QGIS, OGR, etc.
# More information: http://www.gdal.org/drv_vrt.html
template = join(dirname(__file__), 'templates', 'conform-result.vrt')
with io.open(template, encoding='utf8') as file:
content = file.read().format(source=basename(source))
zip_file.writestr(source + '.vrt', content.encode('utf8'))
zip_file.write(processed_path, source + ext)
zip_file.close()
return zip_path
def build_request_ftp_file_callback():
'''
'''
file = io.BytesIO()
callback = lambda bytes: file.write(bytes)
return file, callback
def request_ftp_file(url):
'''
'''
_L.info('Getting {} via FTP'.format(url))
parsed = urlparse(url)
try:
ftp = ftplib.FTP(parsed.hostname)
ftp.login(parsed.username, parsed.password)
file, callback = build_request_ftp_file_callback()
ftp.retrbinary('RETR {}'.format(parsed.path), callback)
file.seek(0)
except Exception as e:
_L.warning('Got an error from {}: {}'.format(parsed.hostname, e))
return httmock.response(400, b'', headers={'Content-Type': 'application/octet-stream'})
# Using mock response because HTTP responses are expected downstream
return httmock.response(200, file.read(), headers={'Content-Type': 'application/octet-stream'})
def s3_key_url(key):
'''
'''
base = u'https://s3.amazonaws.com'
path = join(key.bucket.name, key.name.lstrip('/'))
return urljoin(base, path)
def get_pidlist(start_pid):
''' Return a set of recursively-found child PIDs of the given start PID.
'''
children = collections.defaultdict(set)
for path in glob.glob('/proc/*/status'):
_, _, pid, _ = path.split('/', 3)
if pid in ('thread-self', 'self'):
continue
with open(path) as file:
for line in file:
if line.startswith('PPid:\t'):
ppid = line[6:].strip()
break
children[int(ppid)].add(int(pid))
parents, pids = [start_pid], set()
while parents:
parent = parents.pop(0)
pids.add(parent)
parents.extend(children[parent])
return pids
def get_cpu_times(pidlist):
''' Return Linux CPU usage times in jiffies.
See http://stackoverflow.com/questions/1420426/how-to-calculate-the-cpu-usage-of-a-process-by-pid-in-linux-from-c
'''
if not exists('/proc/stat') or not exists('/proc/self/stat'):
return None, None, None
with open('/proc/stat') as file:
stat = re.split(r'\s+', next(file).strip())
time_total = sum([int(s) for s in stat[1:]])
utime, stime = 0, 0
for pid in pidlist:
with open('/proc/{}/stat'.format(pid)) as file:
stat = next(file).strip().split(' ')
utime += int(stat[13])
stime += int(stat[14])
return time_total, utime, stime
def get_diskio_bytes(pidlist):
''' Return bytes read and written.
This will measure all bytes read in the process, and so includes
reading in shared libraries, etc; not just our productive data
processing activity.
See http://stackoverflow.com/questions/3633286/understanding-the-counters-in-proc-pid-io
'''
if not exists('/proc/self/io'):
return None, None
read_bytes, write_bytes = 0, 0
for pid in pidlist:
with open('/proc/{}/io'.format(pid)) as file:
for line in file:
bytes = re.split(r':\s+', line.strip())
if 'read_bytes' in bytes:
read_bytes += int(bytes[1])
if 'write_bytes' in bytes:
write_bytes += int(bytes[1])
return read_bytes, write_bytes
def get_network_bytes():
''' Return bytes sent and received.
TODO: This code measures network usage for the whole system.
It'll be better to do this measurement on a per-process basis later.
'''
if not exists('/proc/net/netstat'):
return None, None
sent_bytes, recv_bytes = None, None
with open('/proc/net/netstat') as file:
for line in file:
columns = line.strip().split()
if 'IpExt:' in line:
values = next(file).strip().split()
netstat = {k: int(v) for (k, v) in zip(columns[1:], values[1:])}
sent_bytes, recv_bytes = netstat['OutOctets'], netstat['InOctets']
return sent_bytes, recv_bytes
def get_memory_usage(pidlist):
''' Return Linux memory usage in megabytes.
VMRSS is of interest here too; that's resident memory size.
It will matter if a machine runs out of RAM.
See http://stackoverflow.com/questions/30869297/difference-between-memfree-and-memavailable
and http://stackoverflow.com/questions/131303/how-to-measure-actual-memory-usage-of-an-application-or-process
'''
if not exists('/proc/self/status'):
return None
megabytes = 0
for pid in pidlist:
with open('/proc/{}/status'.format(pid)) as file:
for line in file:
if 'VmSize' in line:
size = re.split(r'\s+', line.strip())
megabytes += int(size[1]) / 1024
break
return megabytes
def log_current_usage(start_time, usercpu_prev, syscpu_prev, totcpu_prev, read_prev, written_prev, sent_prev, received_prev, time_prev):
'''
'''
pidlist = get_pidlist(getpid())
totcpu_curr, usercpu_curr, syscpu_curr = get_cpu_times(pidlist)
read_curr, written_curr = get_diskio_bytes(pidlist)
sent_curr, received_curr = get_network_bytes()
time_curr = time.time()
if totcpu_prev is not None:
# Log resource usage by comparing to previous tick
megabytes_used = get_memory_usage(pidlist)
user_cpu = (usercpu_curr - usercpu_prev) / (totcpu_curr - totcpu_prev)
sys_cpu = (syscpu_curr - syscpu_prev) / (totcpu_curr - totcpu_prev)
if read_curr is None or read_prev is None or written_curr is None or written_prev is None:
read = written = sent = received = 0
else:
read, written = read_curr - read_prev, written_curr - written_prev
sent, received = sent_curr - sent_prev, received_curr - received_prev
percent, K = .01, 1024
_L.info(RESOURCE_LOG_FORMAT.format(
user=user_cpu/percent, system=sys_cpu/percent, memory=megabytes_used,
read=read/K, written=written/K, sent=sent/K, received=received/K,
procs=len(pidlist), period=time_curr - time_prev
))
return usercpu_curr, syscpu_curr, totcpu_curr, read_curr, written_curr, sent_curr, received_curr, time_curr
def log_process_usage(lock):
'''
'''
start_time = time.time()
next_measure = start_time
previous = (None, None, None, None, None, None, None, None)
while True:
time.sleep(.05)
if lock.acquire(False):
# Got the lock, we are done. Log one last time and get out.
log_current_usage(start_time, *previous)
return
if time.time() <= next_measure:
# Not yet time to measure and log usage.
continue
previous = log_current_usage(start_time, *previous)
next_measure += RESOURCE_LOG_INTERVAL.seconds + RESOURCE_LOG_INTERVAL.days * 86400
| 34.047445
| 136
| 0.632222
|
06ff0fd7dfa4b9a68815f50e9a9a7f9b9b6ba2cc
| 3,053
|
py
|
Python
|
examples/wap/wap_data.py
|
ML-KULeuven/deepstochlog
|
4b71d1e306d9cdbbb6237947533f0facfcc62c3a
|
[
"Apache-2.0"
] | 10
|
2021-12-06T02:07:19.000Z
|
2022-03-24T11:40:10.000Z
|
examples/wap/wap_data.py
|
ML-KULeuven/deepstochlog
|
4b71d1e306d9cdbbb6237947533f0facfcc62c3a
|
[
"Apache-2.0"
] | null | null | null |
examples/wap/wap_data.py
|
ML-KULeuven/deepstochlog
|
4b71d1e306d9cdbbb6237947533f0facfcc62c3a
|
[
"Apache-2.0"
] | null | null | null |
import json
from collections.abc import Sequence
from pathlib import Path
from typing import Union
import typing
from deepstochlog.dataset import ContextualizedTermDataset
from deepstochlog.context import ContextualizedTerm, Context
from deepstochlog.term import Term, List
data_root = Path(__file__).parent / ".." / ".." / "data" / "raw" / "wap"
class WapDataset(ContextualizedTermDataset):
def __init__(
self,
split: str = "train",
size: int = None,
):
with open(data_root / "questions.json", "r") as questions_file:
all_questions: typing.Dict = json.load(questions_file)
with open(data_root / (split + ".txt"), "r") as split_file:
question_answers: typing.List[typing.Tuple[int, str]] = [
(int(float(el[0])), el[1])
for el in [s.split("\t") for s in split_file.readlines()]
]
# for i, q in enumerate(all_questions):
# assert i == q["iIndex"]
with open(data_root / (split + ".tsv")) as ids_file:
idxs = [int(idx) for idx in ids_file.readlines()]
questions = [
{
**all_questions[idx],
"tokenized_question": question_answers[i][1],
}
for i, idx in enumerate(idxs)
]
if size is None:
size = len(questions)
self.ct_term_dataset = []
for idx in range(0, size):
question = questions[idx]
example = create_term(question)
self.ct_term_dataset.append(example)
def __len__(self):
return len(self.ct_term_dataset)
def __getitem__(self, item: Union[int, slice]):
if type(item) is slice:
return (self[i] for i in range(*item.indices(len(self))))
return self.ct_term_dataset[item]
def get_number(question: str, alignment: int):
number_str = ""
while question[alignment].isdigit():
number_str += question[alignment]
alignment += 1
return int(number_str)
def get_numbers(question: str, alignments: typing.List[int]):
return tuple(get_number(question, alignment) for alignment in alignments)
sentence_token = List(Term("sentence_token"))
def create_term(question: typing.Dict) -> ContextualizedTerm:
number1, number2, number3 = get_numbers(
question["sQuestion"], question["lAlignments"]
)
correct_sequence = question["lEquations"][0]
# Remove "X=(" and ")", and then replace all ".0" from numbers
correct_sequence_fixed = correct_sequence[3:-1].replace(".0","")
return ContextualizedTerm(
context=Context(
{Term("sentence_token"): question["tokenized_question"]},
map_default_to_term=True,
),
term=Term(
"s",
Term(str(int(question["lSolutions"][0]))),
Term(str(number1)),
Term(str(number2)),
Term(str(number3)),
sentence_token,
),
meta=correct_sequence,
)
| 30.227723
| 77
| 0.59548
|
bc72d2abd90f4f8885a80918629b2c164bb4eeb7
| 8,612
|
py
|
Python
|
backend/substrapp/tests/common.py
|
ClementGautier/substra-backend
|
e096e7c5abedc6847307353ffc3e6db28b047032
|
[
"Apache-2.0"
] | null | null | null |
backend/substrapp/tests/common.py
|
ClementGautier/substra-backend
|
e096e7c5abedc6847307353ffc3e6db28b047032
|
[
"Apache-2.0"
] | null | null | null |
backend/substrapp/tests/common.py
|
ClementGautier/substra-backend
|
e096e7c5abedc6847307353ffc3e6db28b047032
|
[
"Apache-2.0"
] | null | null | null |
from http.cookies import SimpleCookie
from io import StringIO, BytesIO
import os
import base64
from django.contrib.auth.models import User
from django.core.files.uploadedfile import InMemoryUploadedFile
from rest_framework.test import APIClient
# This function helper generate a basic authentication header with given credentials
# Given username and password it returns "Basic GENERATED_TOKEN"
from users.serializers import CustomTokenObtainPairSerializer
def generate_basic_auth_header(username, password):
return 'Basic ' + base64.b64encode(f'{username}:{password}'.encode()).decode()
def generate_jwt_auth_header(jwt):
return 'JWT ' + jwt
class AuthenticatedClient(APIClient):
def request(self, **kwargs):
# create user
username = 'substra'
password = 'p@$swr0d44'
user, created = User.objects.get_or_create(username=username)
if created:
user.set_password(password)
user.save()
# simulate login
serializer = CustomTokenObtainPairSerializer(data={
'username': username,
'password': password
})
serializer.is_valid()
token = serializer.validated_data
jwt = str(token)
# simulate right httpOnly cookie and Authorization jwt
jwt_auth_header = generate_jwt_auth_header('.'.join(jwt.split('.')[0:2]))
self.credentials(HTTP_AUTHORIZATION=jwt_auth_header)
self.cookies = SimpleCookie({'signature': jwt.split('.')[2]})
return super().request(**kwargs)
def get_temporary_text_file(contents, filename):
"""
Creates a temporary text file
:param contents: contents of the file
:param filename: name of the file
:type contents: str
:type filename: str
"""
f = StringIO()
flength = f.write(contents)
text_file = InMemoryUploadedFile(f, None, filename, 'text', flength, None)
# Setting the file to its start
text_file.seek(0)
return text_file
def get_sample_objective():
dir_path = os.path.dirname(os.path.realpath(__file__))
description_content = "Super objective"
description_filename = "description.md"
description = get_temporary_text_file(description_content, description_filename)
metrics_filename = "metrics.zip"
f = BytesIO(b'')
with open(os.path.join(dir_path,
'../../../fixtures/chunantes/objectives/objective0/metrics.zip'), 'rb') as zip_file:
flength = f.write(zip_file.read())
metrics = InMemoryUploadedFile(f, None, metrics_filename,
'application/zip', flength, None)
metrics.seek(0)
return description, description_filename, metrics, metrics_filename
def get_sample_script():
script_content = "import slidelib\n\ndef read():\n\tpass"
script_filename = "script.py"
script = get_temporary_text_file(script_content, script_filename)
return script, script_filename
def get_sample_datamanager():
description_content = "description"
description_filename = "description.md"
description = get_temporary_text_file(description_content, description_filename)
data_opener_content = "import slidelib\n\ndef read():\n\tpass"
data_opener_filename = "data_opener.py"
data_opener = get_temporary_text_file(data_opener_content, data_opener_filename)
return description, description_filename, data_opener, data_opener_filename
def get_sample_datamanager2():
description_content = "description 2"
description_filename = "description2.md"
description = get_temporary_text_file(description_content, description_filename)
data_opener_content = "import os\nimport slidelib\n\ndef read():\n\tpass"
data_opener_filename = "data_opener2.py"
data_opener = get_temporary_text_file(data_opener_content, data_opener_filename)
return description, description_filename, data_opener, data_opener_filename
def get_sample_zip_data_sample():
dir_path = os.path.dirname(os.path.realpath(__file__))
file_filename = "file.zip"
f = BytesIO(b'foo')
with open(os.path.join(dir_path, '../../../fixtures/owkin/datasamples/datasample4/0024900.zip'), 'rb') as zip_file:
flength = f.write(zip_file.read())
file = InMemoryUploadedFile(f, None, file_filename,
'application/zip', flength, None)
file.seek(0)
return file, file_filename
def get_sample_zip_data_sample_2():
dir_path = os.path.dirname(os.path.realpath(__file__))
file_filename = "file.zip"
f = BytesIO(b'foo')
with open(os.path.join(dir_path, '../../../fixtures/owkin/datasamples/test/0024901.zip'), 'rb') as zip_file:
flength = f.write(zip_file.read())
file = InMemoryUploadedFile(f, None, file_filename,
'application/zip', flength, None)
file.seek(0)
return file, file_filename
def get_sample_tar_data_sample():
dir_path = os.path.dirname(os.path.realpath(__file__))
file_filename = "file.tar.gz"
f = BytesIO()
with open(os.path.join(
dir_path, '../../../fixtures/owkin/datasamples/datasample4/0024900.tar.gz'), 'rb') as tar_file:
flength = f.write(tar_file.read())
file = InMemoryUploadedFile(f, None, file_filename, 'application/zip', flength, None)
file.seek(0)
return file, file_filename
def get_sample_algo():
dir_path = os.path.dirname(os.path.realpath(__file__))
file_filename = "file.tar.gz"
f = BytesIO()
with open(os.path.join(dir_path, '../../../fixtures/chunantes/algos/algo3/algo.tar.gz'), 'rb') as tar_file:
flength = f.write(tar_file.read())
file = InMemoryUploadedFile(f, None, file_filename, 'application/tar+gzip', flength, None)
file.seek(0)
return file, file_filename
def get_sample_algo_zip():
dir_path = os.path.dirname(os.path.realpath(__file__))
file_filename = "file.zip"
f = BytesIO()
with open(os.path.join(dir_path, '../../../fixtures/chunantes/algos/algo0/algo.zip'), 'rb') as tar_file:
flength = f.write(tar_file.read())
file = InMemoryUploadedFile(f, None, file_filename, 'application/zip', flength, None)
file.seek(0)
return file, file_filename
def get_description_algo():
dir_path = os.path.dirname(os.path.realpath(__file__))
file_filename = "file.md"
f = BytesIO()
with open(os.path.join(dir_path, '../../../fixtures/chunantes/algos/algo3/description.md'), 'rb') as desc_file:
flength = f.write(desc_file.read())
file = InMemoryUploadedFile(f, None, file_filename, 'application/text', flength, None)
file.seek(0)
return file, file_filename
def get_sample_model():
model_content = "0.1, 0.2, -1.0"
model_filename = "model.bin"
model = get_temporary_text_file(model_content, model_filename)
return model, model_filename
DEFAULT_PERMISSIONS = {
'process': {
'public': True,
'authorizedIDs': [],
}
}
def get_sample_algo_metadata():
return {
'owner': 'foo',
'permissions': DEFAULT_PERMISSIONS,
}
def get_sample_objective_metadata():
return {
'owner': 'foo',
'permissions': DEFAULT_PERMISSIONS,
}
class FakeMetrics(object):
def __init__(self, filepath='path'):
self.path = filepath
def save(self, p, f):
return
def read(self, *args, **kwargs):
return b'foo'
class FakeObjective(object):
def __init__(self, filepath='path'):
self.metrics = FakeMetrics(filepath)
class FakeOpener(object):
def __init__(self, filepath):
self.path = filepath
self.name = self.path
class FakeDataManager(object):
def __init__(self, filepath):
self.data_opener = FakeOpener(filepath)
class FakeFilterDataManager(object):
def __init__(self, count):
self.count_value = count
def count(self):
return self.count_value
class FakePath(object):
def __init__(self, filepath):
self.path = filepath
class FakeModel(object):
def __init__(self, filepath):
self.file = FakePath(filepath)
class FakeAsyncResult(object):
def __init__(self, status=None, successful=True):
if status is not None:
self.status = status
self.success = successful
self.result = {'res': 'result'}
def successful(self):
return self.success
class FakeRequest(object):
def __init__(self, status, content):
self.status_code = status
self.content = content
class FakeTask(object):
def __init__(self, task_id):
self.id = task_id
| 28.996633
| 119
| 0.680678
|
6aea88bccd2584e816f47d67938d512900944ae1
| 2,232
|
py
|
Python
|
cs28TeamProject/parasitologyTool/migrations/0043_auto_20220219_1321.py
|
Eg3-git/cs28-parasitology-tool
|
4389208ed19f7e348ca931bff48d43263451f7f0
|
[
"CC0-1.0"
] | null | null | null |
cs28TeamProject/parasitologyTool/migrations/0043_auto_20220219_1321.py
|
Eg3-git/cs28-parasitology-tool
|
4389208ed19f7e348ca931bff48d43263451f7f0
|
[
"CC0-1.0"
] | null | null | null |
cs28TeamProject/parasitologyTool/migrations/0043_auto_20220219_1321.py
|
Eg3-git/cs28-parasitology-tool
|
4389208ed19f7e348ca931bff48d43263451f7f0
|
[
"CC0-1.0"
] | null | null | null |
# Generated by Django 3.2.9 on 2022-02-19 13:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parasitologyTool', '0042_researchpost_likes'),
]
operations = [
migrations.AlterField(
model_name='article',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='comment',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='parasite',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='post',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='researchfile',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='researchimage',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='researchpost',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='userprofile',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='userprofile',
name='role',
field=models.CharField(choices=[('clinician', 'Clinician'), ('researcher', 'Researcher'), ('public', 'Public'), ('admin', 'Admin')], default='Public', max_length=50),
),
]
| 37.830508
| 178
| 0.600806
|
cac64d407a97fb8de2d1203aafef19c9e7930ffa
| 351
|
py
|
Python
|
lesson_3.6.2.py
|
aafedotov/stepik_oop
|
177370a2e809090f66b3c71c594c0582754fddc6
|
[
"MIT"
] | null | null | null |
lesson_3.6.2.py
|
aafedotov/stepik_oop
|
177370a2e809090f66b3c71c594c0582754fddc6
|
[
"MIT"
] | null | null | null |
lesson_3.6.2.py
|
aafedotov/stepik_oop
|
177370a2e809090f66b3c71c594c0582754fddc6
|
[
"MIT"
] | null | null | null |
class Quadrilateral:
def __init__(self, *args):
self.width = args[0]
self.height = args[-1]
def __bool__(self):
return self.width == self.height
def __str__(self):
if self:
return f'Куб размером {self.width}х{self.height}'
return f'Прямоугольник размером {self.width}х{self.height}'
| 23.4
| 67
| 0.603989
|
0f4d533331d7426285a5f9944d6d2ade372ff749
| 391
|
py
|
Python
|
crudcbv/asgi.py
|
CleitonCandiotto/Criando-CRUD-Django
|
2ca749546be64af0a105d1337a4393d69454e00a
|
[
"MIT"
] | null | null | null |
crudcbv/asgi.py
|
CleitonCandiotto/Criando-CRUD-Django
|
2ca749546be64af0a105d1337a4393d69454e00a
|
[
"MIT"
] | null | null | null |
crudcbv/asgi.py
|
CleitonCandiotto/Criando-CRUD-Django
|
2ca749546be64af0a105d1337a4393d69454e00a
|
[
"MIT"
] | null | null | null |
"""
ASGI config for crudcbv project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'crudcbv.settings')
application = get_asgi_application()
| 23
| 78
| 0.785166
|
7a73d86793f87b7421eeb500b431f1a5ced3460d
| 11,865
|
py
|
Python
|
intersight/model/storage_physical_disk_extension_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/storage_physical_disk_extension_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/storage_physical_disk_extension_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.asset_device_registration_relationship import AssetDeviceRegistrationRelationship
from intersight.model.inventory_device_info_relationship import InventoryDeviceInfoRelationship
from intersight.model.storage_controller_relationship import StorageControllerRelationship
from intersight.model.storage_physical_disk_relationship import StoragePhysicalDiskRelationship
globals()['AssetDeviceRegistrationRelationship'] = AssetDeviceRegistrationRelationship
globals()['InventoryDeviceInfoRelationship'] = InventoryDeviceInfoRelationship
globals()['StorageControllerRelationship'] = StorageControllerRelationship
globals()['StoragePhysicalDiskRelationship'] = StoragePhysicalDiskRelationship
class StoragePhysicalDiskExtensionAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'STORAGE.PHYSICALDISKEXTENSION': "storage.PhysicalDiskExtension",
},
('object_type',): {
'STORAGE.PHYSICALDISKEXTENSION': "storage.PhysicalDiskExtension",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'bootable': (str,), # noqa: E501
'disk_dn': (str,), # noqa: E501
'disk_id': (int,), # noqa: E501
'disk_state': (str,), # noqa: E501
'health': (str,), # noqa: E501
'inventory_device_info': (InventoryDeviceInfoRelationship,), # noqa: E501
'physical_disk': (StoragePhysicalDiskRelationship,), # noqa: E501
'registered_device': (AssetDeviceRegistrationRelationship,), # noqa: E501
'storage_controller': (StorageControllerRelationship,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'bootable': 'Bootable', # noqa: E501
'disk_dn': 'DiskDn', # noqa: E501
'disk_id': 'DiskId', # noqa: E501
'disk_state': 'DiskState', # noqa: E501
'health': 'Health', # noqa: E501
'inventory_device_info': 'InventoryDeviceInfo', # noqa: E501
'physical_disk': 'PhysicalDisk', # noqa: E501
'registered_device': 'RegisteredDevice', # noqa: E501
'storage_controller': 'StorageController', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""StoragePhysicalDiskExtensionAllOf - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "storage.PhysicalDiskExtension", must be one of ["storage.PhysicalDiskExtension", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "storage.PhysicalDiskExtension", must be one of ["storage.PhysicalDiskExtension", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
bootable (str): The whether disk is bootable or not.. [optional] # noqa: E501
disk_dn (str): The distinguished name of the Physical drive.. [optional] # noqa: E501
disk_id (int): The storage Enclosure slotId.. [optional] # noqa: E501
disk_state (str): The current drive state of disk.. [optional] # noqa: E501
health (str): The current drive state of disk.. [optional] # noqa: E501
inventory_device_info (InventoryDeviceInfoRelationship): [optional] # noqa: E501
physical_disk (StoragePhysicalDiskRelationship): [optional] # noqa: E501
registered_device (AssetDeviceRegistrationRelationship): [optional] # noqa: E501
storage_controller (StorageControllerRelationship): [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "storage.PhysicalDiskExtension")
object_type = kwargs.get('object_type', "storage.PhysicalDiskExtension")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.class_id = class_id
self.object_type = object_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 53.687783
| 1,678
| 0.650737
|
dbfc4edbeb59287ed066904195c4d8cbb97d0f99
| 463
|
py
|
Python
|
Input-Output/files and file writing.py
|
rlwheelwright/PY3_StandardLib
|
0d9acc02f5ca934eab774bbdd5acc3c92eff7191
|
[
"Apache-2.0"
] | null | null | null |
Input-Output/files and file writing.py
|
rlwheelwright/PY3_StandardLib
|
0d9acc02f5ca934eab774bbdd5acc3c92eff7191
|
[
"Apache-2.0"
] | null | null | null |
Input-Output/files and file writing.py
|
rlwheelwright/PY3_StandardLib
|
0d9acc02f5ca934eab774bbdd5acc3c92eff7191
|
[
"Apache-2.0"
] | null | null | null |
# Files and File Writing
# Open a file
myFile = open("score.txt", "w")
# w ==> write
# r ==> read
# r+ ==> read and write
# a ==> append
# Show attributes and properties of that file
print("Name" + myFile.name)
print("Mode " + myFile.mode)
# Write to a file
myFile.write("GBJ: 100\nKHD: 99\nBBB: 89")
myFile.close()
# Read the file
myFile = open("score.txt", "r")
print("Reading..." + myFile.read(10))
myFile.seek(0)
print("Reading again" + myFile.read(10))
| 19.291667
| 45
| 0.645788
|
165e67386ae7add637e949082222ccbf49e1ba7e
| 28,510
|
py
|
Python
|
test/functional/wallet_bumpfee.py
|
Cminor-pools/bitcoinvg
|
d47a3cf13e06f4fe03d965826f5309e6d5706470
|
[
"MIT"
] | null | null | null |
test/functional/wallet_bumpfee.py
|
Cminor-pools/bitcoinvg
|
d47a3cf13e06f4fe03d965826f5309e6d5706470
|
[
"MIT"
] | null | null | null |
test/functional/wallet_bumpfee.py
|
Cminor-pools/bitcoinvg
|
d47a3cf13e06f4fe03d965826f5309e6d5706470
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from decimal import Decimal
import io
from test_framework.blocktools import add_witness_commitment, create_block, create_coinbase, send_to_witness
from test_framework.messages import BIP125_SEQUENCE_NUMBER, CTransaction
from test_framework.test_framework import BitcoinVGTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
hex_str_to_bytes,
)
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
# Fee rates (sat/vB)
INSUFFICIENT = 1
ECONOMICAL = 50
NORMAL = 100
HIGH = 500
TOO_HIGH = 100000
class BumpFeeTest(BitcoinVGTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[
"-walletrbf={}".format(i),
"-mintxfee=0.00002",
"-addresstype=bech32",
] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def clear_mempool(self):
# Clear mempool between subtests. The subtests may only depend on chainstate (utxos)
self.nodes[1].generate(1)
self.sync_all()
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 bvg (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for _ in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
for mode in ["default", "fee_rate"]:
test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address)
self.test_invalid_parameters(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(self, rbf_node, dest_address)
test_nonrbf_bumpfee_fails(self, peer_node, dest_address)
test_notmine_bumpfee_fails(self, rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_address)
test_dust_to_fee(self, rbf_node, dest_address)
test_watchonly_psbt(self, peer_node, rbf_node, dest_address)
test_rebumping(self, rbf_node, dest_address)
test_rebumping_not_replaceable(self, rbf_node, dest_address)
test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address)
test_bumpfee_metadata(self, rbf_node, dest_address)
test_locked_wallet_fails(self, rbf_node, dest_address)
test_change_script_match(self, rbf_node, dest_address)
test_settxfee(self, rbf_node, dest_address)
test_maxtxfee_fails(self, rbf_node, dest_address)
# These tests wipe out a number of utxos that are expected in other tests
test_small_output_with_feerate_succeeds(self, rbf_node, dest_address)
test_no_more_inputs_fails(self, rbf_node, dest_address)
def test_invalid_parameters(self, rbf_node, peer_node, dest_address):
self.log.info('Test invalid parameters')
rbfid = spend_one_input(rbf_node, dest_address)
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
for key in ["totalFee", "feeRate"]:
assert_raises_rpc_error(-3, "Unexpected key {}".format(key), rbf_node.bumpfee, rbfid, {key: NORMAL})
# Bumping to just above minrelay should fail to increase the total fee enough.
assert_raises_rpc_error(-8, "Insufficient total fee 0.00000141", rbf_node.bumpfee, rbfid, {"fee_rate": INSUFFICIENT})
self.log.info("Test invalid fee rate settings")
assert_raises_rpc_error(-8, "Insufficient total fee 0.00", rbf_node.bumpfee, rbfid, {"fee_rate": 0})
assert_raises_rpc_error(-4, "Specified or calculated fee 0.141 is too high (cannot be higher than -maxtxfee 0.10",
rbf_node.bumpfee, rbfid, {"fee_rate": TOO_HIGH})
assert_raises_rpc_error(-3, "Amount out of range", rbf_node.bumpfee, rbfid, {"fee_rate": -1})
for value in [{"foo": "bar"}, True]:
assert_raises_rpc_error(-3, "Amount is not a number or string", rbf_node.bumpfee, rbfid, {"fee_rate": value})
assert_raises_rpc_error(-3, "Invalid amount", rbf_node.bumpfee, rbfid, {"fee_rate": ""})
self.log.info("Test explicit fee rate raises RPC error if both fee_rate and conf_target are passed")
assert_raises_rpc_error(-8, "Cannot specify both conf_target and fee_rate. Please provide either a confirmation "
"target in blocks for automatic fee estimation, or an explicit fee rate.",
rbf_node.bumpfee, rbfid, {"conf_target": NORMAL, "fee_rate": NORMAL})
self.log.info("Test explicit fee rate raises RPC error if both fee_rate and estimate_mode are passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate",
rbf_node.bumpfee, rbfid, {"estimate_mode": "economical", "fee_rate": NORMAL})
self.log.info("Test invalid conf_target settings")
assert_raises_rpc_error(-8, "confTarget and conf_target options should not both be set",
rbf_node.bumpfee, rbfid, {"confTarget": 123, "conf_target": 456})
self.log.info("Test invalid estimate_mode settings")
for k, v in {"number": 42, "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type string for estimate_mode, got {}".format(k),
rbf_node.bumpfee, rbfid, {"estimate_mode": v})
for mode in ["foo", Decimal("3.1415"), "sat/B", "BVG/kB"]:
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
rbf_node.bumpfee, rbfid, {"estimate_mode": mode})
self.clear_mempool()
def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
self.log.info('Test simple bumpfee: {}'.format(mode))
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
if mode == "fee_rate":
bumped_psbt = rbf_node.psbtbumpfee(rbfid, {"fee_rate": str(NORMAL)})
bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": NORMAL})
else:
bumped_psbt = rbf_node.psbtbumpfee(rbfid)
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] > -rbftx["fee"]
assert_equal(bumped_tx["origfee"], -rbftx["fee"])
assert "psbt" not in bumped_tx
assert_equal(bumped_psbt["errors"], [])
assert bumped_psbt["fee"] > -rbftx["fee"]
assert_equal(bumped_psbt["origfee"], -rbftx["fee"])
assert "psbt" in bumped_psbt
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
self.sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
self.clear_mempool()
def test_segwit_bumpfee_succeeds(self, rbf_node, dest_address):
self.log.info('Test that segwit-sourcing bumpfee works')
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.getaddressinfo(rbf_node.getnewaddress(address_type='bech32'))
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransactionwithwallet(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
self.clear_mempool()
def test_nonrbf_bumpfee_fails(self, peer_node, dest_address):
self.log.info('Test that we cannot replace a non RBF transaction')
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
self.clear_mempool()
def test_notmine_bumpfee_fails(self, rbf_node, peer_node, dest_address):
self.log.info('Test that it cannot bump fee if non-owned inputs are included')
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
fee = Decimal("0.001")
utxos = [node.listunspent(query_options={'minimumAmount': fee})[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - fee
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransactionwithwallet(rawtx)
signedtx = peer_node.signrawtransactionwithwallet(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
self.clear_mempool()
def test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_address):
self.log.info('Test that fee cannot be bumped when it has descendant')
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransactionwithwallet(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
self.clear_mempool()
def test_small_output_with_feerate_succeeds(self, rbf_node, dest_address):
self.log.info('Testing small output with feerate bump succeeds')
# Make sure additional inputs exist
rbf_node.generatetoaddress(101, rbf_node.getnewaddress())
rbfid = spend_one_input(rbf_node, dest_address)
input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
assert_equal(len(input_list), 1)
original_txin = input_list[0]
self.log.info('Keep bumping until transaction fee out-spends non-destination value')
tx_fee = 0
while True:
input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
new_item = list(input_list)[0]
assert_equal(len(input_list), 1)
assert_equal(original_txin["txid"], new_item["txid"])
assert_equal(original_txin["vout"], new_item["vout"])
rbfid_new_details = rbf_node.bumpfee(rbfid)
rbfid_new = rbfid_new_details["txid"]
raw_pool = rbf_node.getrawmempool()
assert rbfid not in raw_pool
assert rbfid_new in raw_pool
rbfid = rbfid_new
tx_fee = rbfid_new_details["fee"]
# Total value from input not going to destination
if tx_fee > Decimal('0.00050000'):
break
# input(s) have been added
final_input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
assert_greater_than(len(final_input_list), 1)
# Original input is in final set
assert [txin for txin in final_input_list
if txin["txid"] == original_txin["txid"]
and txin["vout"] == original_txin["vout"]]
rbf_node.generatetoaddress(1, rbf_node.getnewaddress())
assert_equal(rbf_node.gettransaction(rbfid)["confirmations"], 1)
self.clear_mempool()
def test_dust_to_fee(self, rbf_node, dest_address):
self.log.info('Test that bumped output that is dust is dropped to fee')
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
# The DER formatting used by BitcoinVG to serialize ECDSA signatures means that signatures can have a
# variable size of 70-72 bytes (or possibly even less), with most being 71 or 72 bytes. The signature
# in the witness is divided by 4 for the vsize, so this variance can take the weight across a 4-byte
# boundary. Thus expected transaction size (p2wpkh, 1 input, 2 outputs) is 140-141 vbytes, usually 141.
if not 140 <= fulltx["vsize"] <= 141:
raise AssertionError("Invalid tx vsize of {} (140-141 expected), full tx: {}".format(fulltx["vsize"], fulltx))
# Bump with fee_rate of 350.25 sat/vB vbytes to create dust.
# Expected fee is 141 vbytes * fee_rate 0.00350250 BVG / 1000 vbytes = 0.00049385 BVG.
# or occasionally 140 vbytes * fee_rate 0.00350250 BVG / 1000 vbytes = 0.00049035 BVG.
# Dust should be dropped to the fee, so actual bump fee is 0.00050000 BVG.
bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": 350.25})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated
assert_equal(full_bumped_tx["vout"][0]['value'], Decimal("0.00050000"))
self.clear_mempool()
def test_settxfee(self, rbf_node, dest_address):
self.log.info('Test settxfee')
assert_raises_rpc_error(-8, "txfee cannot be less than min relay tx fee", rbf_node.settxfee, Decimal('0.000005'))
assert_raises_rpc_error(-8, "txfee cannot be less than wallet min fee", rbf_node.settxfee, Decimal('0.000015'))
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
# check that settxfee respects -maxtxfee
self.restart_node(1, ['-maxtxfee=0.000025'] + self.extra_args[1])
assert_raises_rpc_error(-8, "txfee cannot be more than wallet max tx fee", rbf_node.settxfee, Decimal('0.00003'))
self.restart_node(1, self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
self.connect_nodes(1, 0)
self.clear_mempool()
def test_maxtxfee_fails(self, rbf_node, dest_address):
self.log.info('Test that bumpfee fails when it hits -maxtxfee')
# size of bumped transaction (p2wpkh, 1 input, 2 outputs): 141 vbytes
# expected bump fee of 141 vbytes * 0.00200000 BVG / 1000 vbytes = 0.00002820 BVG
# which exceeds maxtxfee and is expected to raise
self.restart_node(1, ['-maxtxfee=0.000025'] + self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Unable to create transaction. Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)", rbf_node.bumpfee, rbfid)
self.restart_node(1, self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
self.connect_nodes(1, 0)
self.clear_mempool()
def test_watchonly_psbt(self, peer_node, rbf_node, dest_address):
self.log.info('Test that PSBT is returned for bumpfee in watchonly wallets')
priv_rec_desc = "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0/*)#rweraev0"
pub_rec_desc = rbf_node.getdescriptorinfo(priv_rec_desc)["descriptor"]
priv_change_desc = "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/*)#j6uzqvuh"
pub_change_desc = rbf_node.getdescriptorinfo(priv_change_desc)["descriptor"]
# Create a wallet with private keys that can sign PSBTs
rbf_node.createwallet(wallet_name="signer", disable_private_keys=False, blank=True)
signer = rbf_node.get_wallet_rpc("signer")
assert signer.getwalletinfo()['private_keys_enabled']
reqs = [{
"desc": priv_rec_desc,
"timestamp": 0,
"range": [0,1],
"internal": False,
"keypool": False # Keys can only be imported to the keypool when private keys are disabled
},
{
"desc": priv_change_desc,
"timestamp": 0,
"range": [0, 0],
"internal": True,
"keypool": False
}]
if self.options.descriptors:
result = signer.importdescriptors(reqs)
else:
result = signer.importmulti(reqs)
assert_equal(result, [{'success': True}, {'success': True}])
# Create another wallet with just the public keys, which creates PSBTs
rbf_node.createwallet(wallet_name="watcher", disable_private_keys=True, blank=True)
watcher = rbf_node.get_wallet_rpc("watcher")
assert not watcher.getwalletinfo()['private_keys_enabled']
reqs = [{
"desc": pub_rec_desc,
"timestamp": 0,
"range": [0, 10],
"internal": False,
"keypool": True,
"watchonly": True,
"active": True,
}, {
"desc": pub_change_desc,
"timestamp": 0,
"range": [0, 10],
"internal": True,
"keypool": True,
"watchonly": True,
"active": True,
}]
if self.options.descriptors:
result = watcher.importdescriptors(reqs)
else:
result = watcher.importmulti(reqs)
assert_equal(result, [{'success': True}, {'success': True}])
funding_address1 = watcher.getnewaddress(address_type='bech32')
funding_address2 = watcher.getnewaddress(address_type='bech32')
peer_node.sendmany("", {funding_address1: 0.001, funding_address2: 0.001})
peer_node.generate(1)
self.sync_all()
# Create single-input PSBT for transaction to be bumped
psbt = watcher.walletcreatefundedpsbt([], {dest_address: 0.0005}, 0, {"fee_rate": 1}, True)['psbt']
psbt_signed = signer.walletprocesspsbt(psbt=psbt, sign=True, sighashtype="ALL", bip32derivs=True)
psbt_final = watcher.finalizepsbt(psbt_signed["psbt"])
original_txid = watcher.sendrawtransaction(psbt_final["hex"])
assert_equal(len(watcher.decodepsbt(psbt)["tx"]["vin"]), 1)
# Bump fee, obnoxiously high to add additional watchonly input
bumped_psbt = watcher.psbtbumpfee(original_txid, {"fee_rate": HIGH})
assert_greater_than(len(watcher.decodepsbt(bumped_psbt['psbt'])["tx"]["vin"]), 1)
assert "txid" not in bumped_psbt
assert_equal(bumped_psbt["origfee"], -watcher.gettransaction(original_txid)["fee"])
assert not watcher.finalizepsbt(bumped_psbt["psbt"])["complete"]
# Sign bumped transaction
bumped_psbt_signed = signer.walletprocesspsbt(psbt=bumped_psbt["psbt"], sign=True, sighashtype="ALL", bip32derivs=True)
bumped_psbt_final = watcher.finalizepsbt(bumped_psbt_signed["psbt"])
assert bumped_psbt_final["complete"]
# Broadcast bumped transaction
bumped_txid = watcher.sendrawtransaction(bumped_psbt_final["hex"])
assert bumped_txid in rbf_node.getrawmempool()
assert original_txid not in rbf_node.getrawmempool()
rbf_node.unloadwallet("watcher")
rbf_node.unloadwallet("signer")
self.clear_mempool()
def test_rebumping(self, rbf_node, dest_address):
self.log.info('Test that re-bumping the original tx fails, but bumping successor works')
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"fee_rate": NORMAL})
rbf_node.bumpfee(bumped["txid"], {"fee_rate": NORMAL})
self.clear_mempool()
def test_rebumping_not_replaceable(self, rbf_node, dest_address):
self.log.info('Test that re-bumping non-replaceable fails')
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"fee_rate": NORMAL})
self.clear_mempool()
def test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address):
self.log.info('Test that unconfirmed outputs from bumped txns are not spendable')
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then invalidate the block so the rbf tx will be put back in the mempool.
# This makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
# Can not abandon conflicted tx
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: rbf_node.abandontransaction(txid=bumpid))
rbf_node.invalidateblock(block.hash)
# Call abandon to make sure the wallet doesn't attempt to resubmit
# the bump tx and hope the wallet does not rebroadcast before we call.
rbf_node.abandontransaction(bumpid)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
self.clear_mempool()
def test_bumpfee_metadata(self, rbf_node, dest_address):
self.log.info('Test that bumped txn metadata persists to new txn record')
assert(rbf_node.getbalance() < 49)
rbf_node.generatetoaddress(101, rbf_node.getnewaddress())
rbfid = rbf_node.sendtoaddress(dest_address, 49, "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
self.clear_mempool()
def test_locked_wallet_fails(self, rbf_node, dest_address):
self.log.info('Test that locked wallet cannot bump txn')
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
self.clear_mempool()
def test_change_script_match(self, rbf_node, dest_address):
self.log.info('Test that the same change addresses is used for the replacement transaction when possible')
def get_change_address(tx):
tx_details = rbf_node.getrawtransaction(tx, 1)
txout_addresses = [txout['scriptPubKey']['addresses'][0] for txout in tx_details["vout"]]
return [address for address in txout_addresses if rbf_node.getaddressinfo(address)["ischange"]]
# Check that there is only one change output
rbfid = spend_one_input(rbf_node, dest_address)
change_addresses = get_change_address(rbfid)
assert_equal(len(change_addresses), 1)
# Now find that address in each subsequent tx, and no other change
bumped_total_tx = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL})
assert_equal(change_addresses, get_change_address(bumped_total_tx['txid']))
bumped_rate_tx = rbf_node.bumpfee(bumped_total_tx["txid"])
assert_equal(change_addresses, get_change_address(bumped_rate_tx['txid']))
self.clear_mempool()
def spend_one_input(node, dest_address, change_size=Decimal("0.00049000")):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
destinations = {dest_address: Decimal("0.00050000")}
if change_size > 0:
destinations[node.getrawchangeaddress()] = change_size
rawtx = node.createrawtransaction([tx_input], destinations)
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
add_witness_commitment(block)
block.solve()
node.submitblock(block.serialize().hex())
return block
def test_no_more_inputs_fails(self, rbf_node, dest_address):
self.log.info('Test that bumpfee fails when there are no available confirmed outputs')
# feerate rbf requires confirmed outputs when change output doesn't exist or is insufficient
rbf_node.generatetoaddress(1, dest_address)
# spend all funds, no change output
rbfid = rbf_node.sendtoaddress(rbf_node.getnewaddress(), rbf_node.getbalance(), "", "", True)
assert_raises_rpc_error(-4, "Unable to create transaction. Insufficient funds", rbf_node.bumpfee, rbfid)
self.clear_mempool()
if __name__ == "__main__":
BumpFeeTest().main()
| 48.158784
| 175
| 0.714626
|
4cbb594016d6ab1241259e66e8d9cf6b3cb87aae
| 7,398
|
py
|
Python
|
napari/layers/transforms.py
|
danielballan/napari
|
9963d6bf52971f8f240b507be206ec682487fb4a
|
[
"BSD-3-Clause"
] | null | null | null |
napari/layers/transforms.py
|
danielballan/napari
|
9963d6bf52971f8f240b507be206ec682487fb4a
|
[
"BSD-3-Clause"
] | null | null | null |
napari/layers/transforms.py
|
danielballan/napari
|
9963d6bf52971f8f240b507be206ec682487fb4a
|
[
"BSD-3-Clause"
] | null | null | null |
import toolz as tz
from typing import Sequence
import numpy as np
from ..utils.list import ListModel
class Transform:
"""Base transform class.
Defaults to the identity transform.
Parameters
----------
func : callable, Coords -> Coords
A function converting an NxD array of coordinates to NxD'.
name : string
A string name for the transform.
"""
def __init__(self, func=tz.identity, inverse=None, name=None):
self.func = func
self._inverse_func = inverse
self.name = name
if func is tz.identity:
self._inverse_func = tz.identity
def __call__(self, coords):
"""Transform input coordinates to output."""
return self.func(coords)
@property
def inverse(self) -> 'Transform':
if self._inverse_func is not None:
return Transform(self._inverse_func, self.func)
else:
raise ValueError('Inverse function was not provided.')
def compose(self, transform: 'Transform') -> 'Transform':
"""Return the composite of this transform and the provided one."""
raise ValueError('Transform composition rule not provided')
def set_slice(self, axes: Sequence[int]) -> 'Transform':
"""Return a transform subset to the visible dimensions.
Parameters
----------
axes : Sequence[int]
Axes to subset the current transform with.
Returns
-------
Transform
Resulting transform.
"""
raise NotImplementedError('Cannot subset arbitrary transforms.')
def expand_dims(self, axes: Sequence[int]) -> 'Transform':
"""Return a transform with added axes for non-visible dimensions.
Parameters
----------
axes : Sequence[int]
Location of axes to expand the current transform with. Passing a
list allows expansion to occur at specific locations and for
expand_dims to be like an inverse to the set_slice method.
Returns
-------
Transform
Resulting transform.
"""
raise NotImplementedError('Cannot subset arbitrary transforms.')
class TransformChain(ListModel, Transform):
def __init__(self, transforms=[]):
super().__init__(
basetype=Transform,
iterable=transforms,
lookup={str: lambda q, e: q == e.name},
)
def __call__(self, coords):
return tz.pipe(coords, *self)
def __newlike__(self, iterable):
return ListModel(self._basetype, iterable, self._lookup)
@property
def inverse(self) -> 'TransformChain':
"""Return the inverse transform chain."""
return TransformChain([tf.inverse for tf in self[::-1]])
@property
def simplified(self) -> 'Transform':
"""Return the composite of the transforms inside the transform chain."""
if len(self) == 0:
return None
if len(self) == 1:
return self[0]
else:
return tz.pipe(self[0], *[tf.compose for tf in self[1:]])
def set_slice(self, axes: Sequence[int]) -> 'TransformChain':
"""Return a transform chain subset to the visible dimensions.
Parameters
----------
axes : Sequence[int]
Axes to subset the current transform chain with.
Returns
-------
TransformChain
Resulting transform chain.
"""
return TransformChain([tf.set_slice(axes) for tf in self])
def expand_dims(self, axes: Sequence[int]) -> 'Transform':
"""Return a transform chain with added axes for non-visible dimensions.
Parameters
----------
axes : Sequence[int]
Location of axes to expand the current transform with. Passing a
list allows expansion to occur at specific locations and for
expand_dims to be like an inverse to the set_slice method.
Returns
-------
TransformChain
Resulting transform chain.
"""
return TransformChain([tf.expand_dims(axes) for tf in self])
class ScaleTranslate(Transform):
"""n-dimensional scale and translation (shift) class.
Scaling is always applied before translation.
Parameters
----------
scale : 1-D array
A 1-D array of factors to scale each axis by. Scale is broadcast to 1
in leading dimensions, so that, for example, a scale of [4, 18, 34] in
3D can be used as a scale of [1, 4, 18, 34] in 4D without modification.
An empty translation vector implies no scaling.
translate : 1-D array
A 1-D array of factors to shift each axis by. Translation is broadcast
to 0 in leading dimensions, so that, for example, a translation of
[4, 18, 34] in 3D can be used as a translation of [0, 4, 18, 34] in 4D
without modification. An empty translation vector implies no
translation.
name : string
A string name for the transform.
"""
def __init__(self, scale=(1.0,), translate=(0.0,), name=None):
super().__init__(name=name)
self.scale = np.array(scale)
self.translate = np.array(translate)
def __call__(self, coords):
coords = np.atleast_2d(coords)
scale = np.concatenate(
([1.0] * (coords.shape[1] - len(self.scale)), self.scale)
)
translate = np.concatenate(
([0.0] * (coords.shape[1] - len(self.translate)), self.translate)
)
return np.squeeze(scale * coords + translate)
@property
def inverse(self) -> 'ScaleTranslate':
"""Return the inverse transform."""
return ScaleTranslate(1 / self.scale, -1 / self.scale * self.translate)
def compose(self, transform: 'ScaleTranslate') -> 'ScaleTranslate':
"""Return the composite of this transform and the provided one."""
scale = self.scale * transform.scale
translate = self.translate + self.scale * transform.translate
return ScaleTranslate(scale, translate)
def set_slice(self, axes: Sequence[int]) -> 'ScaleTranslate':
"""Return a transform subset to the visible dimensions.
Parameters
----------
axes : Sequence[int]
Axes to subset the current transform with.
Returns
-------
Transform
Resulting transform.
"""
return ScaleTranslate(
self.scale[axes], self.translate[axes], name=self.name
)
def expand_dims(self, axes: Sequence[int]) -> 'ScaleTranslate':
"""Return a transform with added axes for non-visible dimensions.
Parameters
----------
axes : Sequence[int]
Location of axes to expand the current transform with. Passing a
list allows expansion to occur at specific locations and for
expand_dims to be like an inverse to the set_slice method.
Returns
-------
Transform
Resulting transform.
"""
n = len(axes) + len(self.scale)
not_axes = [i for i in range(n) if i not in axes]
scale = np.ones(n)
scale[not_axes] = self.scale
translate = np.zeros(n)
translate[not_axes] = self.translate
return ScaleTranslate(scale, translate, name=self.name)
| 32.88
| 80
| 0.603406
|
de9b5825fbf0a593209dc2e85ad58051c95e0f40
| 22,200
|
py
|
Python
|
tests/unit/backend/wmg/test_query.py
|
chanzuckerberg/dcp-prototype
|
24d2323ba5ae1482395da35ea11c42708e3a52ce
|
[
"MIT"
] | null | null | null |
tests/unit/backend/wmg/test_query.py
|
chanzuckerberg/dcp-prototype
|
24d2323ba5ae1482395da35ea11c42708e3a52ce
|
[
"MIT"
] | 105
|
2020-01-23T22:08:48.000Z
|
2020-05-07T00:04:20.000Z
|
tests/unit/backend/wmg/test_query.py
|
chanzuckerberg/dcp-prototype
|
24d2323ba5ae1482395da35ea11c42708e3a52ce
|
[
"MIT"
] | 1
|
2020-03-20T17:06:54.000Z
|
2020-03-20T17:06:54.000Z
|
import unittest
from typing import NamedTuple
from backend.wmg.api.v1 import build_dot_plot_matrix
from backend.wmg.data.query import WmgQueryCriteria, WmgQuery
from backend.wmg.data.schemas.cube_schema import cube_non_indexed_dims
from tests.unit.backend.wmg.fixtures.test_snapshot import (
create_temp_wmg_snapshot,
all_ones_expression_summary_values,
all_tens_cell_counts_values,
)
# TODO: Test build_* methods separately in test_v1.py. This package's unit tests need only test the raw results of
# WmgQuery methods
class QueryTest(unittest.TestCase):
# FIXME
@unittest.skip("failing with 'realloc(): invalid pointer'")
def test__query_with_no_genes__returns_empty_result(self):
criteria = WmgQueryCriteria(
organism_ontology_term_id="organism_ontology_term_id_0",
tissue_ontology_term_ids=["tissue_ontology_term_id_0"],
)
dim_size = 1
with create_temp_wmg_snapshot(
dim_size=dim_size, expression_summary_vals_fn=all_ones_expression_summary_values
) as snapshot:
query = WmgQuery(snapshot)
result = build_dot_plot_matrix(query.expression_summary(criteria), query.cell_counts(criteria))
expected = {
"cell_type_ontology_term_id": {},
"gene_ontology_term_id": {},
"n_cells": {},
"n_cells_cell_type": {},
"n_cells_tissue": {},
"nnz": {},
"sum": {},
"tissue_ontology_term_id": {},
}
self.assertEqual(expected, result.to_dict())
def test__query_all_indexed_dims_single_value__returns_correct_result(self):
criteria = WmgQueryCriteria(
gene_ontology_term_ids=["gene_ontology_term_id_0"],
organism_ontology_term_id="organism_ontology_term_id_1",
tissue_ontology_term_ids=["tissue_ontology_term_id_2"],
)
dim_size = 3
with create_temp_wmg_snapshot(
dim_size=dim_size,
expression_summary_vals_fn=all_ones_expression_summary_values,
cell_counts_generator_fn=all_tens_cell_counts_values,
) as snapshot:
query = WmgQuery(snapshot)
result = build_dot_plot_matrix(query.expression_summary(criteria), query.cell_counts(criteria))
# sanity check the expected value of the stats (n_cells, nnz, sum) for each data viz point; if this fails, the
# cube test fixture may have changed (e.g. TileDB Array schema) or the logic for creating the test cube fixture
# has changed
expected_cell_count_per_cell_type = dim_size ** (len(cube_non_indexed_dims) - 1)
assert expected_cell_count_per_cell_type == 729
expected = [
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
]
self.assertEqual(
expected,
sorted(
result.to_dict("records"),
key=lambda r: (
r["gene_ontology_term_id"],
r["tissue_ontology_term_id"],
r["cell_type_ontology_term_id"],
),
),
)
def test__query_all_indexed_dims_multi_valued__returns_correct_result(self):
criteria = WmgQueryCriteria(
gene_ontology_term_ids=["gene_ontology_term_id_0", "gene_ontology_term_id_2"],
organism_ontology_term_id="organism_ontology_term_id_0",
tissue_ontology_term_ids=["tissue_ontology_term_id_1", "tissue_ontology_term_id_2"],
)
dim_size = 3
with create_temp_wmg_snapshot(
dim_size=dim_size,
expression_summary_vals_fn=all_ones_expression_summary_values,
cell_counts_generator_fn=all_tens_cell_counts_values,
) as snapshot:
query = WmgQuery(snapshot)
result = build_dot_plot_matrix(query.expression_summary(criteria), query.cell_counts(criteria))
# sanity check the expected value of the stats (n_cells, nnz, sum) for each data viz point; if this fails, the
# cube test fixture may have changed (e.g. TileDB Array schema) or the logic for creating the test cube fixture
# has changed
expected_cell_count_per_cell_type = dim_size ** (len(cube_non_indexed_dims) - 1)
assert expected_cell_count_per_cell_type == 729
expected_cell_count_per_tissue = 10 * (dim_size ** len(cube_non_indexed_dims))
assert expected_cell_count_per_tissue == 21870
expected = [
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_1",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_1",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_1",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_2",
"tissue_ontology_term_id": "tissue_ontology_term_id_1",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_2",
"tissue_ontology_term_id": "tissue_ontology_term_id_1",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_2",
"tissue_ontology_term_id": "tissue_ontology_term_id_1",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_2",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_2",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_2",
"tissue_ontology_term_id": "tissue_ontology_term_id_2",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 729,
"n_cells_cell_type": 7290,
"n_cells_tissue": 21870,
"nnz": 729,
"sum": 729.0,
},
]
self.assertEqual(
expected,
sorted(
result.to_dict("records"),
key=lambda r: (
r["gene_ontology_term_id"],
r["tissue_ontology_term_id"],
r["cell_type_ontology_term_id"],
),
),
)
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.maxDiff = None
def test__query_non_indexed_dim_single_valued__returns_correct_result(self):
criteria = WmgQueryCriteria(
gene_ontology_term_ids=["gene_ontology_term_id_0"],
organism_ontology_term_id="organism_ontology_term_id_0",
tissue_ontology_term_ids=["tissue_ontology_term_id_0"],
dataset_ids=["dataset_id_1"], # <-- non-indexed dim, single-valued
)
dim_size = 3
with create_temp_wmg_snapshot(
dim_size=dim_size,
expression_summary_vals_fn=all_ones_expression_summary_values,
cell_counts_generator_fn=all_tens_cell_counts_values,
) as snapshot:
query = WmgQuery(snapshot)
result = build_dot_plot_matrix(query.expression_summary(criteria), query.cell_counts(criteria))
# sanity check the expected value of the stats (n_cells, nnz, sum) for each data viz point; if this fails, the
# cube test fixture may have changed (e.g. TileDB Array schema) or the logic for creating the test cube fixture
# has changed
expected_cell_count_per_cell_type = dim_size ** (len(cube_non_indexed_dims) - 2)
assert expected_cell_count_per_cell_type == 243
expected_cell_count_per_tissue = 10 * (dim_size ** (len(cube_non_indexed_dims) - 1))
assert expected_cell_count_per_tissue == 7290
expected = [
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 243,
"n_cells_cell_type": 2430,
"n_cells_tissue": 7290,
"nnz": 243,
"sum": 243.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 243,
"n_cells_cell_type": 2430,
"n_cells_tissue": 7290,
"nnz": 243,
"sum": 243.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 243,
"n_cells_cell_type": 2430,
"n_cells_tissue": 7290,
"nnz": 243,
"sum": 243.0,
},
]
self.assertEqual(
expected,
sorted(
result.to_dict("records"),
key=lambda r: (
r["gene_ontology_term_id"],
r["tissue_ontology_term_id"],
r["cell_type_ontology_term_id"],
),
),
)
def test__query_non_indexed_dim_multi_valued__returns_correct_result(self):
criteria = WmgQueryCriteria(
gene_ontology_term_ids=["gene_ontology_term_id_0"],
organism_ontology_term_id="organism_ontology_term_id_0",
tissue_ontology_term_ids=["tissue_ontology_term_id_0"],
dataset_ids=["dataset_id_1", "dataset_id_0"], # <-- non-indexed dim, multi-valued
)
dim_size = 3
with create_temp_wmg_snapshot(
dim_size=dim_size,
expression_summary_vals_fn=all_ones_expression_summary_values,
cell_counts_generator_fn=all_tens_cell_counts_values,
) as snapshot:
query = WmgQuery(snapshot)
result = build_dot_plot_matrix(query.expression_summary(criteria), query.cell_counts(criteria))
# sanity check the expected value of the stats (n_cells, nnz, sum) for each data viz point; if this fails, the
# cube test fixture may have changed (e.g. TileDB Array schema) or the logic for creating the test cube fixture
# has changed
expected_cell_count_per_cell_type = dim_size ** (len(cube_non_indexed_dims) - 2) * 2
assert expected_cell_count_per_cell_type == 486
expected_cell_count_per_tissue = 10 * (dim_size ** (len(cube_non_indexed_dims) - 1) * 2)
assert expected_cell_count_per_tissue == 14580
expected = [
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 486,
"n_cells_cell_type": 4860,
"n_cells_tissue": 14580,
"nnz": 486,
"sum": 486.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 486,
"n_cells_cell_type": 4860,
"n_cells_tissue": 14580,
"nnz": 486,
"sum": 486.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 486,
"n_cells_cell_type": 4860,
"n_cells_tissue": 14580,
"nnz": 486,
"sum": 486.0,
},
]
self.assertEqual(
expected,
sorted(
result.to_dict("records"),
key=lambda r: (
r["gene_ontology_term_id"],
r["tissue_ontology_term_id"],
r["cell_type_ontology_term_id"],
),
),
)
def test__query_non_indexed_dim_single_and_multi_valued__returns_correct_result(self):
criteria = WmgQueryCriteria(
gene_ontology_term_ids=["gene_ontology_term_id_0"],
organism_ontology_term_id="organism_ontology_term_id_0",
tissue_ontology_term_ids=["tissue_ontology_term_id_0"],
ethnicity_ontology_term_ids=["ethnicity_ontology_term_id_1"], # <-- non-indexed dim, single-valued
dataset_ids=["dataset_id_1", "dataset_id_0"], # <-- non-indexed dim, multi-valued
)
dim_size = 3
with create_temp_wmg_snapshot(
dim_size=dim_size,
expression_summary_vals_fn=all_ones_expression_summary_values,
cell_counts_generator_fn=all_tens_cell_counts_values,
) as snapshot:
query = WmgQuery(snapshot)
result = build_dot_plot_matrix(query.expression_summary(criteria), query.cell_counts(criteria))
# sanity check the expected value of the stats (n_cells, nnz, sum) for each data viz point; if this fails, the
# cube test fixture may have changed (e.g. TileDB Array schema) or the logic for creating the test cube fixture
# has changed
expected_cell_count_per_cell_type = dim_size ** (len(cube_non_indexed_dims) - 3) * 1 * 2
assert expected_cell_count_per_cell_type == 162
expected_cell_count_per_tissue = 10 * (dim_size ** (len(cube_non_indexed_dims) - 2) * 1 * 2)
assert expected_cell_count_per_tissue == 4860
expected = [
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_0",
"n_cells": 162,
"n_cells_cell_type": 1620,
"n_cells_tissue": 4860,
"nnz": 162,
"sum": 162.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_1",
"n_cells": 162,
"n_cells_cell_type": 1620,
"n_cells_tissue": 4860,
"nnz": 162,
"sum": 162.0,
},
{
"gene_ontology_term_id": "gene_ontology_term_id_0",
"tissue_ontology_term_id": "tissue_ontology_term_id_0",
"cell_type_ontology_term_id": "cell_type_ontology_term_id_2",
"n_cells": 162,
"n_cells_cell_type": 1620,
"n_cells_tissue": 4860,
"nnz": 162,
"sum": 162.0,
},
]
self.assertEqual(
expected,
sorted(
result.to_dict("records"),
key=lambda r: (
r["gene_ontology_term_id"],
r["tissue_ontology_term_id"],
r["cell_type_ontology_term_id"],
),
),
)
class QueryPrimaryFilterDimensionsTest(unittest.TestCase):
def test__single_dimension__returns_all_dimension_and_terms(self):
dim_size = 3
with create_temp_wmg_snapshot(dim_size=dim_size) as snapshot:
result = WmgQuery(snapshot).list_primary_filter_dimension_term_ids("gene_ontology_term_id")
self.assertEquals(["gene_ontology_term_id_0", "gene_ontology_term_id_1", "gene_ontology_term_id_2"], result)
def test__multiple_dimensions__returns_all_dimensions_and_terms_as_tuples(self):
dim_size = 3
def exclude_one_gene_per_organism(logical_coord: NamedTuple) -> bool:
# HACK: method called during building of both "expr summary" and "cell count" cubes, but the latter does not
# include gene_ontology_term_id
if "gene_ontology_term_id" not in logical_coord._fields:
return False
return logical_coord.gene_ontology_term_id == logical_coord.organism_ontology_term_id.replace(
"organism", "gene"
)
with create_temp_wmg_snapshot(
dim_size=dim_size, exclude_logical_coord_fn=exclude_one_gene_per_organism
) as snapshot:
result = WmgQuery(snapshot).list_grouped_primary_filter_dimensions_term_ids(
"gene_ontology_term_id", "organism_ontology_term_id"
)
self.assertEquals(
{
"organism_ontology_term_id_0": ["gene_ontology_term_id_1", "gene_ontology_term_id_2"],
"organism_ontology_term_id_1": ["gene_ontology_term_id_0", "gene_ontology_term_id_2"],
"organism_ontology_term_id_2": ["gene_ontology_term_id_0", "gene_ontology_term_id_1"],
},
result,
)
| 42.205323
| 120
| 0.578514
|
b570aac2c75f5ef17950506f72322fbcf40d633f
| 1,836
|
py
|
Python
|
src/brushlib/generate.py
|
stippi/WonderBrush-v3
|
7f88c3e4fbe0bca9ef2df7a92d44789877edfe1e
|
[
"MIT"
] | 30
|
2018-11-01T18:31:04.000Z
|
2021-12-27T17:00:59.000Z
|
src/brushlib/generate.py
|
pulkomandy/WonderBrush-v3
|
7878ff8ee627de5f47519d7b55a710a544f36f12
|
[
"MIT"
] | 5
|
2018-11-01T20:01:41.000Z
|
2021-12-06T11:15:12.000Z
|
src/brushlib/generate.py
|
pulkomandy/WonderBrush-v3
|
7878ff8ee627de5f47519d7b55a710a544f36f12
|
[
"MIT"
] | 1
|
2020-05-04T10:33:13.000Z
|
2020-05-04T10:33:13.000Z
|
#!/usr/bin/env python
# brushlib - The MyPaint Brush Library
# Copyright (C) 2007-2008 Martin Renold <martinxyz@gmx.ch>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"Code generator, part of the build process."
import os, sys
import brushsettings
def writefile(filename, s):
"write generated code if changed"
s = '// DO NOT EDIT - autogenerated by ' + sys.argv[0] + '\n\n' + s
if os.path.exists(filename) and open(filename).read() == s:
print 'Checked', filename
else:
print 'Writing', filename
open(filename, 'w').write(s)
content = ''
for i in brushsettings.inputs:
content += '#define INPUT_%s %d\n' % (i.name.upper(), i.index)
content += '#define INPUT_COUNT %d\n' % len(brushsettings.inputs)
content += '\n'
for s in brushsettings.settings:
content += '#define BRUSH_%s %d\n' % (s.cname.upper(), s.index)
content += '#define BRUSH_SETTINGS_COUNT %d\n' % len(brushsettings.settings)
content += '\n'
for s in brushsettings.states:
content += '#define STATE_%s %d\n' % (s.cname.upper(), s.index)
content += '#define STATE_COUNT %d\n' % len(brushsettings.states)
writefile('brushsettings.hpp', content)
| 39.913043
| 76
| 0.715142
|
37daf25d7714d98e2b1a512eda786170b5638c7a
| 24,870
|
py
|
Python
|
pyKneeSPM/knee_spm.py
|
gattia/pyKneeSPM
|
aff17c92119382a1c7ebb124a6e13b12c41967a8
|
[
"MIT"
] | 1
|
2021-05-18T22:13:01.000Z
|
2021-05-18T22:13:01.000Z
|
pyKneeSPM/knee_spm.py
|
gattia/pyKneeSPM
|
aff17c92119382a1c7ebb124a6e13b12c41967a8
|
[
"MIT"
] | 1
|
2022-03-28T16:25:06.000Z
|
2022-03-28T16:25:06.000Z
|
pyKneeSPM/knee_spm.py
|
gattia/pyKneeSPM
|
aff17c92119382a1c7ebb124a6e13b12c41967a8
|
[
"MIT"
] | 1
|
2021-05-18T22:13:02.000Z
|
2021-05-18T22:13:02.000Z
|
import numpy as np
from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy
from .vtk_functions import (read_vtk,
apply_transform,
get_icp_transform,
transfer_mesh_scalars_get_weighted_average_n_closest)
from . import test_statistics
from .clustering import Cluster
from .monte_carlo import *
import pyfocusr
class SPM(object):
def __init__(self,
):
self.dict_meshes = {}
self.participant_idx = 0
self.reference_mesh = {}
self.test_statistic_maps = {}
self.clustered_test_statistic_maps = {}
def get_test_statistic_maps(self):
return self.test_statistic_maps
def get_clustered_statistic_maps(self):
return self.clustered_test_statistic_maps
class SingleStatisticSPM(SPM):
def __init__(self,
*args,
map_name='test_map',
map_threshold=2.33,
find_ref_mesh=False,
find_ref_mesh_mode='similarity',
compute_min_cluster_size=True,
mc_cluster_method='permutation',
mc_cluster_extent_significance=0.05,
mc_point_significance=0.05,
n_monte_carlo_iterations=10000,
idx_no_data=None,
percent_participants_with_data_to_include_vertex=0.5,
registration_max_iterations=1000,
**kwargs
):
super().__init__(*args, **kwargs)
self.clustered_test_statistic_maps[map_name] = {}
self.map_name = map_name
self.map_threshold = map_threshold if type(map_threshold) in (list, tuple) else [map_threshold,]
self.find_ref_mesh = find_ref_mesh
self.find_ref_mesh_mode = find_ref_mesh_mode
self.reference_mesh = {}
self.change_values = None
self.n_participants = None
self.n_points = None
# Cluster size
self.compute_min_cluster_size = compute_min_cluster_size
self.mc_cluster_method = mc_cluster_method
self.mc_cluster_extent_significance = mc_cluster_extent_significance
self.n_monte_carlo_iterations = n_monte_carlo_iterations
# Individual voxel statistic
self.mc_point_significance = mc_point_significance
self.threshold_test_statistic = {}
self.idx_no_data = idx_no_data
self.percent_participants_with_data_to_include_vertex = percent_participants_with_data_to_include_vertex
self.added_change_values_directly = False
# Registration parameters
self.registration_max_iterations = registration_max_iterations
self.threshold_cluster_distribution = {}
self.threshold_cluster_size = {}
self.threshold_test_statistic = {}
self.sig_clusters = {}
def calc_significant_clusters(self):
# This first line is specific to the onesample test... need to extend for multi statistic tests
if self.threshold_cluster_size is None:
# The following function doesnt exist....
# self.compute_threshold_clustersize()
self.compute_mc_thresholds()
for stat_key in self.clustered_test_statistic_maps.keys():
self.sig_clusters[stat_key] = {}
for thresh_key in self.clustered_test_statistic_maps[stat_key].keys():
self.sig_clusters[stat_key][thresh_key] = {}
for clust_key in self.clustered_test_statistic_maps[stat_key][thresh_key].keys():
if self.clustered_test_statistic_maps[stat_key][thresh_key][clust_key]['area'] >= self.threshold_cluster_size[thresh_key]:
self.sig_clusters[stat_key][thresh_key][clust_key] = self.clustered_test_statistic_maps[stat_key][thresh_key][clust_key]
def get_cluster_distributions(self):
return self.threshold_cluster_distribution
def get_significant_clusters(self):
self.calc_significant_clusters()
return self.sig_clusters
def get_n_significant_clusters(self):
# This first line is specific to the onesample test... need to extend for multi statistic tests
self.calc_significant_clusters()
n_sig_clusters = {}
for stat_key in self.clustered_test_statistic_maps.keys():
n_sig_clusters[stat_key] = {}
for thresh_key in self.clustered_test_statistic_maps[stat_key].keys():
n_sig_clusters[stat_key][thresh_key] = len(self.sig_clusters[stat_key][thresh_key])
return n_sig_clusters
def get_threshold_cluster_size(self):
return self.threshold_cluster_size
def get_threshold_test_statistic(self):
return self.threshold_test_statistic
def get_n_significant_individual_points(self):
scalars = vtk_to_numpy(self.test_statistic_maps[self.map_name].GetPointData().GetAray(self.map_name))
n_sig_points = len(np.where(scalars > self.threshold_test_statistic[self.map_name])[0])
return n_sig_points
def add_map_threshold(self, threshold):
self.map_threshold.append(threshold)
def add_reference_change_mesh(self,
filepath,
id=None):
self.reference_mesh = {'filename': filepath,
'mesh': read_vtk(filepath),
'id': id
}
def add_reference_pre_mesh(self,
filepath,
id=None):
self.reference_mesh = {'filename': filepath,
'mesh': read_vtk(filepath),
'id': id
}
def add_change_filepath(self,
change_filepath,
participant_identifier=None,
reference_mesh=False):
id = participant_identifier or self.participant_idx
if id in self.dict_meshes:
self.dict_meshes[id]['change'] = {'filename': change_filepath}
else:
self.dict_meshes[id] = {'change': {'filename': change_filepath}}
if reference_mesh is True:
self.reference_mesh = {'filename': change_filepath,
'mesh': read_vtk(change_filepath),
'id': id}
self.participant_idx += 1
def add_pre_post_filepaths(self,
pre_filepath,
post_filepath,
participant_identifier=None,
reference_mesh=False):
id = participant_identifier or self.participant_idx
if id in self.dict_meshes:
self.dict_meshes[id]['pre'] = {'filename': pre_filepath}
self.dict_meshes[id]['post'] = {'filename': post_filepath}
else:
self.dict_meshes[id] = {'pre': {'filename': pre_filepath},
'post': {'filename': post_filepath}}
if reference_mesh is True:
self.reference_mesh = {'filename': pre_filepath,
'mesh': read_vtk(pre_filepath),
'id': id}
self.participant_idx += 1
def add_change_data_directly(self, change_data):
self.added_change_values_directly = True
self.change_values = change_data
def calculate_change_mesh(self):
"""
Place holder function for a future one that will calculate the change mesh (if it doesnt exist).
:return:
"""
def compile_data(self):
"""
Placeholder - classes that inherit must define this
:return:
"""
def compute_test_statistics(self):
"""
Placeholder - classes that inherit must define this
:return:
"""
def cluster_test_statistics(self):
for threshold in self.map_threshold:
clust = Cluster(statistic_mesh=self.test_statistic_maps[self.map_name],
statistic_threshold=threshold,
threshold_type='two_sided',
clust_names='cluster',
clust_idx=0
)
self.clustered_test_statistic_maps[self.map_name][threshold] = clust.get_clusters()
def compute_mc_thresholds(self):
"""
Placeholder - classes that inherit must define this
:return:
"""
def update(self):
if self.added_change_values_directly is False:
self.compile_data()
print('Finished Loading & Compiling all Data')
self.compute_test_statistics()
print('Finished Calculating Test Statistics')
self.cluster_test_statistics()
print('Finished Clustering')
if self.compute_min_cluster_size is True:
self.compute_mc_thresholds()
class SimpleTimeDifference(SingleStatisticSPM):
def __init__(self,
*args,
map_name='z_statistic',
**kwargs
):
kwargs['map_name'] = map_name
super().__init__(*args, **kwargs)
def compile_data(self):
# ADD LOGIC TO TEST IF CHANGE MESHES EXIST. IF THEY DO NOT, COMPUTE THEM USING FOCUSR.
if bool(self.reference_mesh) is False:
if self.find_ref_mesh is True:
# Find the template mesh ... this could take a long time.
raise Exception('Find template mesh not yet implemented')
else:
ref_key = sorted(self.dict_meshes.keys())[0]
if 'change' in self.dict_meshes[ref_key]:
self.reference_mesh = self.dict_meshes[ref_key]['change']
elif 'pre' in self.dict_meshes[ref_key]:
self.reference_mesh = self.dict_meshes[ref_key]['pre']
else:
raise Exception('No Pre or Change mesh defined. MUST have at least one of them')
self.reference_mesh['id'] = ref_key
self.n_participants = len(self.dict_meshes.keys())
self.n_points = self.reference_mesh['mesh'].GetNumberOfPoints()
self.change_values = np.zeros((self.n_participants,
self.n_points))
for participant_idx, participant_id in enumerate(self.dict_meshes.keys()):
print('Loading mesh number: {}'.format(participant_idx))
target_mesh = read_vtk(self.dict_meshes[participant_id]['change']['filename'])
transform = get_icp_transform(source=target_mesh,
target=self.reference_mesh['mesh'],
reg_mode='similarity')
target_mesh = apply_transform(source=target_mesh,
transform=transform)
reg = pyfocusr.Focusr(vtk_mesh_target=target_mesh,
vtk_mesh_source=self.reference_mesh['mesh'],
n_spectral_features=3,
n_extra_spectral=3,
get_weighted_spectral_coords=False,
list_features_to_calc=['curvature'], # 'curvatures', min_curvature' 'max_curvature'
rigid_reg_max_iterations=100,
non_rigid_alpha=0.01,
non_rigid_beta=50,
non_rigid_n_eigens=100,
non_rigid_max_iterations=self.registration_max_iterations,
rigid_before_non_rigid_reg=False,
projection_smooth_iterations=30,
graph_smoothing_iterations=300,
feature_smoothing_iterations=30,
include_points_as_features=False,
norm_physical_and_spectral=True,
feature_weights=np.diag([.1, .1]),
n_coords_spectral_ordering=10000,
n_coords_spectral_registration=1000,
initial_correspondence_type='kd',
final_correspondence_type='kd') # 'kd' 'hungarian'
reg.align_maps()
reg.get_source_mesh_transformed_weighted_avg()
ref_mesh_transformed_to_target = reg.weighted_avg_transformed_mesh
target_change_smoothed_on_ref = transfer_mesh_scalars_get_weighted_average_n_closest(ref_mesh_transformed_to_target,
target_mesh,
n=3)
self.change_values[participant_idx, :] = target_change_smoothed_on_ref
# Get all non finite values and assign to be zeros.
self.change_values[np.isnan(self.change_values)] = 0
self.change_values[np.isinf(self.change_values)] = 0
self.change_values[np.isneginf(self.change_values)] = 0
def compute_test_statistics(self):
n_ppl_with_data_change_per_point = np.sum(self.change_values != 0, axis=0)
self.idx_no_data = np.where(n_ppl_with_data_change_per_point <
self.percent_participants_with_data_to_include_vertex*self.change_values.shape[0])
test = test_statistics.OneSampleZTest(self.change_values,
self.reference_mesh['mesh'],
return_new_mesh=True,
idx_not_to_include=self.idx_no_data
)
test.compute_statistics_per_node()
self.test_statistic_maps[self.map_name] = test.get_statistics_mesh()
def compute_mc_thresholds(self):
for threshold in self.map_threshold:
mc_sim = MonteCarloClusterOneSampleTest(self.reference_mesh['mesh'],
self.change_values, # shape = (participants, pts, other... factors)
method=self.mc_cluster_method,
threshold=threshold,
n_iterations=self.n_monte_carlo_iterations,
idx_not_to_include=self.idx_no_data,
idx_to_include=None)
mc_sim.update()
self.threshold_cluster_distribution[threshold] = mc_sim.get_distribution_of_max_clustersizes()
self.threshold_cluster_size[threshold] = mc_sim.get_threshold_clustersize(threshold=self.mc_cluster_extent_significance)
self.threshold_test_statistic[threshold] = mc_sim.get_threshold_test_statistic(threshold=self.mc_point_significance)
class SimpleCorrelation(SingleStatisticSPM):
def __init__(self,
*args,
map_name='t_statistic',
**kwargs
):
kwargs['map_name'] = map_name
super().__init__(*args, **kwargs)
def add_change_filepath(self,
*args,
secondary_data=None,
participant_identifier=None,
**kwargs):
"""
Could potentially have just implemented secondary data to the base version? Even if superfluous?
:param args:
:param secondary_data:
:param participant_identifier:
:param kwargs:
:return:
"""
kwargs['participant_identifier'] = participant_identifier
super().add_change_filepath(*args, **kwargs)
id = participant_identifier or (self.participant_idx-1) # Previous version will have already incremented idx
if secondary_data is not None:
self.dict_meshes[id]['change']['secondary_data'] = secondary_data
def add_pre_post_filepaths(self,
*args,
pre_secondary_data=None,
post_secondary_data=None,
participant_identifier=None,
**kwargs):
"""
Could potentially have just implemented secondary data to the base version? Even if superfluous?
:param args:
:param pre_secondary_data:
:param post_secondary_data:
:param participant_identifier:
:param kwargs:
:return:
"""
kwargs['participant_identifier'] = participant_identifier
super().add_pre_post_filepaths(*args, **kwargs)
id = participant_identifier or (self.participant_idx - 1) # Previous version will have already incremented idx
if pre_secondary_data is not None:
self.dict_meshes[id]['pre']['secondary_data'] = pre_secondary_data
if post_secondary_data is not None:
self.dict_meshes[id]['post']['secondary_data'] = post_secondary_data
def compile_data(self):
# ADD LOGIC TO TEST IF CHANGE MESHES EXIST. IF THEY DO NOT, COMPUTE THEM USING FOCUSR.
if bool(self.reference_mesh) is False:
if self.find_ref_mesh is True:
# Find the template mesh ... this could take a long time.
raise Exception('Find template mesh not yet implemented')
else:
ref_key = sorted(self.dict_meshes.keys())[0]
if 'change' in self.dict_meshes[ref_key]:
self.reference_mesh = self.dict_meshes[ref_key]['change']
elif 'pre' in self.dict_meshes[ref_key]:
self.reference_mesh = self.dict_meshes[ref_key]['pre']
else:
raise Exception('No Pre or Change mesh defined. MUST have at least one of them')
self.reference_mesh['id'] = ref_key
self.n_participants = len(self.dict_meshes.keys())
self.n_points = self.reference_mesh['mesh'].GetNumberOfPoints()
self.change_values = np.zeros((self.n_participants,
self.n_points,
2))
for participant_idx, participant_id in enumerate(self.dict_meshes.keys()):
print('Loading mesh number: {}'.format(participant_idx))
target_mesh = read_vtk(self.dict_meshes[participant_id]['change']['filename'])
transform = get_icp_transform(source=target_mesh,
target=self.reference_mesh['mesh'],
reg_mode='similarity')
target_mesh = apply_transform(source=target_mesh,
transform=transform)
reg = pyfocusr.Focusr(vtk_mesh_target=target_mesh,
vtk_mesh_source=self.reference_mesh['mesh'],
n_spectral_features=3,
n_extra_spectral=3,
get_weighted_spectral_coords=False,
list_features_to_calc=['curvature'], # 'curvatures', min_curvature' 'max_curvature'
rigid_reg_max_iterations=100,
non_rigid_alpha=0.01,
non_rigid_beta=50,
non_rigid_n_eigens=100,
non_rigid_max_iterations=self.registration_max_iterations,
rigid_before_non_rigid_reg=False,
projection_smooth_iterations=30,
graph_smoothing_iterations=300,
feature_smoothing_iterations=30,
include_points_as_features=False,
norm_physical_and_spectral=True,
feature_weights=np.diag([.1, .1]),
n_coords_spectral_ordering=10000,
n_coords_spectral_registration=1000,
initial_correspondence_type='kd',
final_correspondence_type='kd') # 'kd' 'hungarian'
reg.align_maps()
reg.get_source_mesh_transformed_weighted_avg()
ref_mesh_transformed_to_target = reg.weighted_avg_transformed_mesh
target_change_smoothed_on_ref = transfer_mesh_scalars_get_weighted_average_n_closest(ref_mesh_transformed_to_target,
target_mesh,
n=3)
if 'secondary_data' in self.dict_meshes[participant_id]['change']:
data = self.dict_meshes[participant_id]['change']['secondary_data']
if isinstance(data, (int, float)) and not isinstance(data, bool):
self.change_values[participant_idx, :, 1] = data
elif isinstance(data, (list, np.ndarray)):
if (len(data) == 1) or (len(data) == self.n_points):
self.change_values[participant_idx, :, 1] = data
else:
raise Exception('Secondary data of type {} is wrong length, len={},'
'mesh has {} points'.format(type(data), len(data), self.n_points))
else:
raise Exception('Data is type: {}, require: int, float, list, or np.ndarray'.format(type(data)))
else:
raise Exception('No secondary data inputted when providing mesh locations. Future work will allow'
'secondary data to be on the mesh - this does not exist yet.')
# To the above warning. Could append data to the mesh with a known array name and then extract.
self.change_values[participant_idx, :, 0] = target_change_smoothed_on_ref
def compute_test_statistics(self):
"""
This is essentially the same as the logic in the SimpleTimeDifference class. Should look at combining
:return:
"""
# test to see points with primary outcome only (ignoring secondary for now)
n_ppl_with_data_change_per_point = np.sum(self.change_values[:, :, 0] != 0, axis=0)
self.idx_no_data = np.where(n_ppl_with_data_change_per_point <
self.percent_participants_with_data_to_include_vertex*self.n_participants)
test = test_statistics.CorrelationTTest(self.change_values,
self.reference_mesh['mesh'],
return_new_mesh=True,
idx_not_to_include=self.idx_no_data
)
test.compute_statistics_per_node()
test_mesh = test.get_statistics_mesh()
test_mesh.GetPointData().SetActiveScalars(self.map_name)
self.test_statistic_maps[self.map_name] = test_mesh
def compute_mc_thresholds(self):
"""
This is essentially the same as the logic in the SimpleTimeDifference class. Should look at combining
:return:
"""
for threshold in self.map_threshold:
mc_sim = MonteCarloClusterCorrelationTest(self.reference_mesh['mesh'],
self.change_values, # shape = (participants, pts, other... factors)
method=self.mc_cluster_method,
threshold=threshold,
n_iterations=self.n_monte_carlo_iterations,
idx_not_to_include=self.idx_no_data,
idx_to_include=None)
mc_sim.update()
self.threshold_cluster_distribution[threshold] = mc_sim.get_distribution_of_max_clustersizes()
self.threshold_cluster_size[threshold] = mc_sim.get_threshold_clustersize(threshold=self.mc_cluster_extent_significance)
self.threshold_test_statistic[threshold] = mc_sim.get_threshold_test_statistic(threshold=self.mc_point_significance)
| 49.640719
| 144
| 0.563691
|
877563710956abe77683fa9ab3678401c98bb799
| 1,378
|
py
|
Python
|
nipy/labs/glm/tests/test_glm.py
|
bpinsard/nipy
|
d49e8292adad6619e3dac710752131b567efe90e
|
[
"BSD-3-Clause"
] | 8
|
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
nipy/labs/glm/tests/test_glm.py
|
bpinsard/nipy
|
d49e8292adad6619e3dac710752131b567efe90e
|
[
"BSD-3-Clause"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
nipy/labs/glm/tests/test_glm.py
|
bpinsard/nipy
|
d49e8292adad6619e3dac710752131b567efe90e
|
[
"BSD-3-Clause"
] | 1
|
2020-07-17T12:49:49.000Z
|
2020-07-17T12:49:49.000Z
|
#!/usr/bin/env python
from __future__ import absolute_import
import numpy as np
from ..glm import glm
from numpy.testing import assert_almost_equal, TestCase
class TestFitting(TestCase):
def make_data(self):
dimt = 100
dimx = 10
dimy = 11
dimz = 12
self.y = np.random.randn(dimt, dimx, dimy, dimz)
X = np.array([np.ones(dimt), list(range(dimt))])
self.X = X.transpose() ## the design matrix X must have dimt lines
def ols(self, axis):
y = np.rollaxis(self.y, 0, axis+1) ## time index is axis
X = self.X
m = glm(y, X, axis=axis)
m1 = glm(y, X, axis=axis, method='kalman')
b = m.beta
b1 = m1.beta
tcon = m.contrast([1,0])
tcon1 = m1.contrast([1,0])
z = tcon.zscore()
z1 = tcon1.zscore()
assert_almost_equal(b, b1)
##assert_almost_equal(v, v1, decimal=2)
##assert_almost_equal(z, z1, decimal=3)
def test_ols_axis0(self):
self.make_data()
self.ols(0)
def test_ols_axis1(self):
self.make_data()
self.ols(1)
def test_ols_axis2(self):
self.make_data()
self.ols(2)
def test_ols_axis3(self):
self.make_data()
self.ols(3)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
| 24.607143
| 74
| 0.560958
|
fa0fbd04fca9b545dff827b7cdaa5faf246a611c
| 412
|
py
|
Python
|
String/python/leetcode434_Number_of_Segments_in_a_String.py
|
wenxinjie/leetcode
|
c459a01040c8fe0783e15a16b8d7cca4baf4612a
|
[
"Apache-2.0"
] | null | null | null |
String/python/leetcode434_Number_of_Segments_in_a_String.py
|
wenxinjie/leetcode
|
c459a01040c8fe0783e15a16b8d7cca4baf4612a
|
[
"Apache-2.0"
] | null | null | null |
String/python/leetcode434_Number_of_Segments_in_a_String.py
|
wenxinjie/leetcode
|
c459a01040c8fe0783e15a16b8d7cca4baf4612a
|
[
"Apache-2.0"
] | null | null | null |
# Count the number of segments in a string, where a segment is defined to be a contiguous sequence of non-space characters.
# Please note that the string does not contain any non-printable characters.
# Example:
# Input: "Hello, my name is John"
# Output: 5
class Solution(object):
def countSegments(self, s):
"""
:type s: str
:rtype: int
"""
return len(s.split())
| 25.75
| 123
| 0.648058
|
e979c996b291d12d107dc787e950b991b416e02a
| 3,094
|
py
|
Python
|
kheops_client/_utils.py
|
hirsch-lab/kheops-client
|
ce40f2aa94eb12e47b3cd5417a6cc5f915c39eb1
|
[
"MIT"
] | 1
|
2021-03-19T10:28:15.000Z
|
2021-03-19T10:28:15.000Z
|
kheops_client/_utils.py
|
hirsch-lab/kheops-client
|
ce40f2aa94eb12e47b3cd5417a6cc5f915c39eb1
|
[
"MIT"
] | null | null | null |
kheops_client/_utils.py
|
hirsch-lab/kheops-client
|
ce40f2aa94eb12e47b3cd5417a6cc5f915c39eb1
|
[
"MIT"
] | null | null | null |
import time
import pandas as pd
import pydicom as dicom
from pathlib import Path
def ensure_dir(path, forced):
path = Path(path)
if not path.is_dir():
path.mkdir(parents=True, exist_ok=forced)
return path.is_dir()
def flatten(value):
return value[0] if value is not None and len(value)==1 else value
def keyword_to_tag(keyword):
tag = dicom.tag.Tag(keyword)
tag = "%04X%04X" % (tag.group, tag.element)
return tag
def extract_value(data, keyword, key="Value"):
"""
Extract a key by its name from the DICOM dict returned by DICOMWeb.
data: dictionary mapping DICOM tags to its values (= inner dict)
keyword: name of keyword to extract
key: key to extract the information from the inner dict
"""
entry = data[keyword_to_tag(keyword)]
return flatten(entry.get(key, None))
def dicomize_json_result(data, meta_only=True):
"""
data: dict
"""
if meta_only:
def handler(uri): return b""
else:
handler = None
return dicom.dataset.Dataset.from_json(
data, bulk_data_uri_handler=handler)
def dicomize_json_results(data, meta_only=True):
"""
data: list of dict
"""
ret = [dicomize_json_result(d) for d in data]
return ret
def dicoms_to_frame(dicoms, keywords=None):
"""
Convert a list of DICOM dicts (pydicom.dataset.Dataset)
into a pandas DataFrame.
Keywords can be a list of DICOM keywords or DICOM tags.
The function makes use of pydicom.dataset.Dataset.get().
By default, the following keywords are extracted:
StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID, Modality.
"""
if keywords is None:
# Default keywords to collect from dicoms.
keywords = ["StudyInstanceUID",
"SeriesInstanceUID",
"SOPInstanceUID",
"Modality"]
data = dict()
for keyword in keywords:
data[keyword] = [d.get(keyword, default=None) for d in dicoms]
df = pd.DataFrame(data, columns=keywords)
return df
def sort_frame_by_uid(df, by):
def uid_key(uid):
"""
Convert a uid into a tuple of integers.
Example: "123.456.789" --> (123,456,789)
"""
uid = uid.split(".")
try:
uid = tuple(map(int, uid))
except ValueError as e:
if not "invalid literal for int()" in e:
raise
return uid
def sort_key(series):
return series.apply(uid_key)
# This requires pandas>=1.1.0
df = df.sort_values(by=by, key=sort_key)
return df
def sizeof_fmt(size, suffix="b"):
"""
"""
for unit in ["", "k", "M", "G", "T", "P", "E", "Z"]:
if abs(size) < 1024.0:
return "%3.1f%s%s" % (size, unit, suffix)
size /= 1024.0
return "%.1f%s%s" % (size, "Y", suffix)
def strip_strings(df, cols=None):
if cols is None:
cols = df.columns
for c in cols:
try:
df[c] = df[c].str.strip()
except AttributeError:
pass
return df
| 25.783333
| 74
| 0.600194
|
1571ae5ae92387dd2627dd76c40db6ffa8e704ed
| 35,058
|
py
|
Python
|
chalicelib/aws_data_api.py
|
awslabs/aws-data-api
|
81f6ad1fd89935fcec600ced2b404f37d87254fe
|
[
"Apache-2.0"
] | 90
|
2020-11-20T12:35:03.000Z
|
2021-07-29T21:20:55.000Z
|
chalicelib/aws_data_api.py
|
awslabs/aws-data-api
|
81f6ad1fd89935fcec600ced2b404f37d87254fe
|
[
"Apache-2.0"
] | 1
|
2020-11-20T13:11:09.000Z
|
2020-11-20T13:11:09.000Z
|
chalicelib/aws_data_api.py
|
awslabs/aws-data-api
|
81f6ad1fd89935fcec600ced2b404f37d87254fe
|
[
"Apache-2.0"
] | 6
|
2020-11-23T11:09:04.000Z
|
2022-03-16T14:40:52.000Z
|
from chalicelib.exceptions import *
import chalicelib.utils as utils
import chalicelib.parameters as params
from chalicelib.utils import identity_trace
import logging
from chalicelib.api_metadata import ApiMetadata
from chalicelib.gremlin_handler import GremlinHandler
import sys
import os
import urllib.parse as parser
import json
import boto3
from elasticsearch import Elasticsearch
from aws_xray_sdk.core import patch
__version__ = "0.9.0b1"
# patch boto3 with xray instrumentation if the environment is configured
if utils.strtobool(os.getenv(params.XRAY_ENABLED, 'false')) is True:
patch(['boto3'])
log = None
# non-class method to get the status of an API, as the Class version represents a fully online API, while non-class
# methods can be used for API's in process
def get_api_status(api_name: str, stage: str, region: str, logger: logging.Logger = None) -> dict:
"""
Method to return the status of an API Namespace for the specified Stage in a Region.
:param api_name: The API Namespace to get the status of
:param stage: The Stage to query for the Namespace status
:param region: The AWS Region in which the Stage is provisioned
:return: dict:
Status: The Status of the API Namespace in the stage
"""
global log
if logger is None:
log = utils.setup_logging()
else:
log = logger
api_metadata_handler = ApiMetadata(region, log)
s = "Status"
return {s: api_metadata_handler.get_api_metadata(api_name=api_name, stage=stage).get(s)}
# non-class method to get all registered api endpoints
def get_registry(region: str, stage: str, logger: logging.Logger = None) -> list:
"""
Method to return the list of all API Namespaces in the Stage and Region
:param region: The AWS Region to query
:param stage: The STage to query for Namespaces
:return: list:
str: Namespace name
"""
# create an API Metadata Handler
global log
if logger is None:
logging.basicConfig()
log = logging.getLogger(params.AWS_DATA_API_NAME)
log.setLevel(logging.INFO)
else:
log = logger
api_metadata_handler = ApiMetadata(region, log)
all_apis = api_metadata_handler.get_all_apis()
return_apis = []
for a in all_apis:
if f"-{stage}" in a:
return_apis.append(a.replace(f"-{stage}", ""))
return return_apis
# method to stand up a new namespace for Data API's via an async lambda
def async_provision(api_name: str, stage: str, region: str, logger: logging.Logger, **kwargs) -> None:
"""
Method to provision a new API Namespace in the specified Stage and Region. Get Namespace status with :func:`~aws_data_api.get_api_status`.
:param api_name: The API Namespace name to create
:param stage: The Stage where the Namespace should be created
:param region: The AWS Region where the Stage is deployed
:param kwargs: Dictionary of provisioning arguments. This can include:
:return: None
"""
kwargs['ApiName'] = api_name
lambda_client = boto3.client("lambda", region_name=region)
f = f"{params.AWS_DATA_API_NAME}-{stage}-{params.PROVISIONER_NAME}"
logger.debug(f"Requesting async provision of API {api_name}")
logger.debug(kwargs)
response = lambda_client.invoke(FunctionName=f, InvocationType='Event',
Payload=json.dumps(kwargs))
if "FunctionError" in response:
if response.get("FunctionError") == "Handled":
raise DetailedException(response.get("Payload"))
else:
raise DetailedException("Unhandled Error Occurred during submission of Async Provisioning Request")
else:
return response.get("Status")
# method to load an instance of the Data API class from a metadata dict
def load_api(**kwargs):
"""
Static method to instantiate a new instance of :func:`~aws_data_api.AwsDataAPI`
:param kwargs: Dictionary of setup arguments
:return: :func:`~aws_data_api.AwsDataAPI`
"""
# remove bits from the keyword args that will bork the metadata call
utils.remove_internal_attrs(kwargs)
# now create the API instance
api = AwsDataAPI(**kwargs)
return api
# Main class implementing Data API functionality
class AwsDataAPI:
_full_config = None
_app = None
_region = None
_deployment_stage = None
_deployed_account = None
_gremlin_address = None
_gremlin_endpoint = None
_es_client = None
_search_config = None
_storage_handler = None
_catalog_database = None
_api_name = None
_table_name = None
_pk_name = None
_sts_client = None
_cwl_client = None
_log_group_name = None
_last_log_info = None
_caller_identity = None
_simple_identity = None
_logger = None
_delete_mode = None
_allow_runtime_delete_mode_change = False
_crawler_rolename = None
_table_indexes = None
_metadata_indexes = None
_schema_validation_refresh_hitcount = None
_allow_non_itemmaster_writes = None
_strict_occv = None
_dynamo_helper = None
_lambda_client = None
_cloudwatch_emitter = None
_api_metadata_handler = None
def __init__(self, **kwargs):
self._region = kwargs.get(params.REGION, os.getenv('AWS_REGION'))
self._full_config = kwargs
self._api_name = kwargs.get('ApiName')
self._table_name = kwargs.get(params.STORAGE_TABLE)
# setup instance logger
self._logger = utils.setup_logging(params.AWS_DATA_API_NAME)
global log
log = self._logger
# create the API metadata handler
self._api_metadata_handler = ApiMetadata(self._region, self._logger, kwargs.get(params.KMS_KEY_ARN))
log.debug("Instantiating new Data API Namespace")
log.debug(kwargs)
# Load class properties from any supplied metadata. These will be populated when hydrating an existing API
# namespace from DynamoDB
self._app = kwargs.get(params.APP, None)
self._deployment_stage = kwargs.get(params.STAGE)
self._pk_name = kwargs.get(params.PRIMARY_KEY, None)
self._delete_mode = kwargs.get(params.DELETE_MODE, params.DEFAULT_DELETE_MODE)
self._allow_runtime_delete_mode_change = kwargs.get(params.ALLOW_RUNTIME_DELETE_MODE_CHANGE,
params.DEFAULT_ALLOW_RUNTIME_DELETE_MODE_CHANGE)
self._crawler_rolename = kwargs.get(params.CRAWLER_ROLENAME, None)
self._table_indexes = kwargs.get(params.TABLE_INDEXES, None)
self._metadata_indexes = kwargs.get(params.METADATA_INDEXES, None)
self._schema_validation_refresh_hitcount = kwargs.get(params.SCHEMA_VALIDATION_REFRESH_HITCOUNT,
params.DEFAULT_SCHEMA_VALIDATION_REFRESH_HITCOUNT)
self._gremlin_address = kwargs.get(params.GREMLIN_ADDRESS, None)
self._allow_non_itemmaster_writes = kwargs.get(params.NON_ITEM_MASTER_WRITES_ALLOWED,
params.DEFAULT_NON_ITEM_MASTER_WRITE_ALLOWED)
self._strict_occv = kwargs.get(params.STRICT_OCCV, params.DEFAULT_STRICT_OCCV)
self._catalog_database = kwargs.get(params.CATALOG_DATABASE, params.DEFAULT_CATALOG_DATABASE)
# setup the storage handler which implements the backend data api functionality
self._storage_handler = self._get_storage_handler(table_name=self._table_name,
primary_key_attribute=self._pk_name,
region=self._region,
delete_mode=self._delete_mode,
allow_runtime_delete_mode_change=self._allow_runtime_delete_mode_change,
table_indexes=self._table_indexes,
metadata_indexes=self._metadata_indexes,
schema_validation_refresh_hitcount=self._schema_validation_refresh_hitcount,
crawler_rolename=self._crawler_rolename,
catalog_database=self._catalog_database,
allow_non_itemmaster_writes=self._allow_non_itemmaster_writes,
strict_occv=self._strict_occv,
deployed_account=kwargs.get(params.DEPLOYED_ACCOUNT, None),
handler_name=kwargs[params.STORAGE_HANDLER],
pitr_enabled=bool(kwargs.get(params.PITR_ENABLED,
params.DEFAULT_PITR_ENABLED)),
kms_key_arn=kwargs.get(params.STORAGE_CRYPTO_KEY_ARN, None))
# setup the gremlin integration if one has been provided
if self._gremlin_address is not None:
log.info(f"Binding new Gremlin Handler to address {self._gremlin_address}")
tokens = self._gremlin_address.split(":")
self._gremlin_endpoint = GremlinHandler(url=tokens[0], port=tokens[1])
if "SearchConfig" in kwargs:
self._search_config = kwargs.get("SearchConfig")
log.info(f"AWS Data API for {self._catalog_database}.{self._table_name} Online.")
# method which writes a set of object references to the Gremlin helper class
def _put_references(self, id: str, reference_doc: list):
g = self._gremlin_endpoint
if g is not None:
from_id = utils.get_arn(id, self._table_name, self._deployed_account)
ctr = 0
exceptions = []
for r in reference_doc:
if params.RESOURCE not in r:
raise InvalidArgumentsException(f"Malformed Reference: {r}. Must Contain a {params.RESOURCE}")
else:
to_id = r[params.RESOURCE]
# remove the resource and ID keys so we can use the rest of the document for extra properties
del r[params.RESOURCE]
try:
g.create_relationship(label=params.REFERENCES, from_id=from_id, to_id=to_id, extra_properties=r)
ctr += 1
except Exception as e:
exceptions.append({
"ID": to_id,
"Message": e.message
})
response = {"ReferenceCount": ctr}
if len(exceptions) > 0:
response["Exceptions"] = exceptions
return response
else:
raise UnimplementedFeatureException(NO_GREMLIN)
def _get_storage_handler(self, table_name, primary_key_attribute, region, delete_mode,
allow_runtime_delete_mode_change, table_indexes,
metadata_indexes, schema_validation_refresh_hitcount, crawler_rolename, catalog_database,
allow_non_itemmaster_writes, strict_occv, deployed_account, handler_name,
pitr_enabled=None, kms_key_arn=None):
"""
Method to load a Storage Handler class based upon the configured handler name.
:param table_name:
:param primary_key_attribute:
:param region:
:param delete_mode:
:param allow_runtime_delete_mode_change:
:param table_indexes:
:param metadata_indexes:
:param schema_validation_refresh_hitcount:
:param crawler_rolename:
:param catalog_database:
:param allow_non_itemmaster_writes:
:param strict_occv:
:param deployed_account:
:param handler_name:
:param pitr_enabled:
:param kms_key_arn:
:return:
"""
log.info(f"Creating new Data API Storage Handler from {handler_name}")
sys.path.append("chalicelib")
storage_module = __import__(handler_name)
storage_class = getattr(storage_module, "DataAPIStorageHandler")
return storage_class(table_name, primary_key_attribute,
region,
delete_mode,
allow_runtime_delete_mode_change,
table_indexes,
metadata_indexes,
schema_validation_refresh_hitcount,
crawler_rolename,
catalog_database,
allow_non_itemmaster_writes,
strict_occv,
deployed_account,
pitr_enabled,
kms_key_arn)
# simple accessor method for the pk_name attribute, which is required in some cases for API integration
def get_primary_key(self):
return self._pk_name
# access method that returns a boolean outcome based upon if the provided ID is valid
# @evented(api_operation="Check")
@identity_trace
def check(self, id):
return self._storage_handler.check(id=id)
# return a paginated list of elements from the API
# @evented(api_operation="List")
@identity_trace
def list(self, **kwargs):
return self._storage_handler.list_items(**kwargs)
# return information about storage usage for this API namespace
# @evented(api_operation="Usage")
@identity_trace
def get_usage(self):
resources = self._storage_handler.get_usage(table_name=self._table_name)
metadata = self._storage_handler.get_usage(table_name=utils.get_metaname(self._table_name))
references = None
# TODO figure out why the gremlin connection is failing
# if self._gremlin_endpoint is not None:
# references = self._gremlin_endpoint.get_usage()
usage = {
params.RESOURCE: resources,
params.METADATA: metadata
}
if references is not None:
usage[params.REFERENCES] = {"Count": references}
return usage
# run the natural language understanding integration, which attaches new Metadata to the Resource
# @evented(api_operation="Understand")
@identity_trace
def understand(self, id, storage_location=None):
fetch_id = self._validate_arn_id(id)
# validate the attribute that stores the location of the object
if storage_location is None:
storage_location = params.DEFAULT_STORAGE_LOCATION_ATTRIBUTE
# fetch the resource
item = self._storage_handler.get(id=fetch_id)
storage_loc = None
if item is None:
raise ResourceNotFoundException(f"Unable to find Resource with ID {fetch_id}")
else:
if storage_location in item[params.RESOURCE]:
storage_loc = item.get(params.RESOURCE).get(storage_location)
else:
# storage location may be in metadata
meta = self._storage_handler.get_metadata(id)
if storage_location in meta[params.METADATA]:
storage_loc = meta.get(params.METADATA).get(storage_location)
if storage_loc is None:
raise DetailedException(
f"Unable to run Metadata Resolver without a Storage Location Attribute in Item Resource or Metadata (Default {params.DEFAULT_STORAGE_LOCATION_ATTRIBUTE})")
if self._lambda_client is None:
self._lambda_client = boto3.client("lambda", region_name=self._region)
# run the understander and metadata update through an async lambda
f = f"{params.AWS_DATA_API_NAME}-{self._deployment_stage}-{params.UNDERSTANDER_NAME}"
args = {
"prefix": storage_loc,
"id": fetch_id,
"caller": self._simple_identity,
"primary_key_attribute": self._pk_name,
"ApiName": self._api_name,
"ApiStage": self._deployment_stage
}
response = self._lambda_client.invoke(FunctionName=f, InvocationType='Event', Payload=json.dumps(args))
if "FunctionError" in response:
if response.get("FunctionError") == "Handled":
raise DetailedException(response.get("Payload"))
else:
raise DetailedException("Unhandled error occurred during submission of async Understanding request")
else:
return response.get("StatusCode")
def _validate_arn_id(self, id):
# decode the ID as it forms part of the request url
decoded_id = parser.unquote(id)
log.debug(f"Validating Resource ARN {id}")
if utils.get_arn_base() in decoded_id:
# validate arn structure and then fetch by id
arn = utils.shred_arn(decoded_id)
if arn is None:
raise ResourceNotFoundException(f"Invalid ARN format {decoded_id}")
if utils.get_caller_account() != arn[params.ARN_ACCOUNT]:
raise ResourceNotFoundException("Requested resource not available from Data API Account")
if self._table_name != arn[params.ARN_TABLE]:
self.exception = ResourceNotFoundException(
f"Requested resource {arn[params.ARN_TABLE]} not available from Data API {self._table_name}")
raise self.exception
if arn[params.ARN_REGION] != self._region:
raise ResourceNotFoundException(f"ARN Valid Region {arn[params.ARN_REGION]}")
fetch_id = arn[params.ARN_ID]
else:
fetch_id = decoded_id
return fetch_id
# get the Resource, which may include or prefer the Item Master
# @evented(api_operation="GetResource")
@identity_trace
def get(self, id, master_option, suppress_meta_fetch: bool = False, only_attributes: list = None,
not_attributes: list = None):
fetch_id = self._validate_arn_id(id)
response = {}
item = self._storage_handler.get(id=fetch_id, suppress_meta_fetch=suppress_meta_fetch,
only_attributes=only_attributes, not_attributes=not_attributes)
# set the 'Item' in the response unless master_option = prefer
if params.ITEM_MASTER_ID not in item[params.RESOURCE] or \
master_option is None or \
master_option.lower() == params.ITEM_MASTER_INCLUDE.lower():
response["Item"] = item
# extract the master if there is one, and the provided master option is 'include' or 'prefer'
# TODO Test what happens if we have very large Item Master hierarchies here
if params.ITEM_MASTER_ID in item[params.RESOURCE] and master_option is not None and master_option.lower() in [
params.ITEM_MASTER_INCLUDE.lower(),
params.ITEM_MASTER_PREFER.lower()]:
master = self._storage_handler.get(id=item[params.RESOURCE][params.ITEM_MASTER_ID])
response["Master"] = master
return response
# undelete a Data API Resource that has been soft deleted (non-Tombstone)
# @evented(api_operation="Restore")
@identity_trace
def restore(self, id):
fetch_id = self._validate_arn_id(id)
return self._storage_handler.restore(id=fetch_id, caller_identity=self._simple_identity)
# get the Metadata for a Resource
# @evented(api_operation="GetMetadata")
@identity_trace
def get_metadata(self, id):
fetch_id = self._validate_arn_id(id)
return self._storage_handler.get_metadata(id=fetch_id)
# Delete a Resource and Metadata based upon the specified deletion mode of the system or in the request
# @evented(api_operation="Delete")
@identity_trace
def delete(self, id, **kwargs):
fetch_id = self._validate_arn_id(id)
return self._storage_handler.delete(id=fetch_id, caller_identity=self._simple_identity, **kwargs)
# Update a Data API Resource
# @evented(api_operation="Update")
@identity_trace
def update_item(self, id, **kwargs):
response = {}
def _wrap_response(type, type_res):
response[type] = {
params.DATA_MODIFIED: True if type_res is not None else False
}
if type_res is not None and "Messages" in type_res:
response["Messages"] = type_res.get("Messages")
fetch_id = self._validate_arn_id(id)
if params.REFERENCES in kwargs:
log.debug("Creating Reference Links")
_wrap_response(params.REFERENCES, self._put_references(id, kwargs.get(params.REFERENCES)))
# update the item, which may update metadata and resources
item_response = self._storage_handler.update_item(caller_identity=self._simple_identity, id=fetch_id,
**kwargs)
_wrap_response(params.METADATA, item_response.get(params.METADATA))
_wrap_response(params.RESOURCE, item_response.get(params.RESOURCE))
return response
# Drop an entire API Namespace. This will do a backup before dropping the underlying storage tables
# @evented(api_operation="DropAPI")
@identity_trace
def drop(self, do_export=True):
# drop tables with final backup
self._storage_handler.drop_table(table_name=self._table_name, do_export=do_export)
self._storage_handler.drop_table(table_name=utils.get_metaname(self._table_name), do_export=do_export)
# delete API information
self._api_metadata_handler.delete_all_api_metadata(self._api_name, self._deployment_stage)
# Perform a search request against the Resource or Metadata, based upon provided query args
# @evented(api_operation="Find")
@identity_trace
def find(self, **kwargs):
return self._storage_handler.find(**kwargs)
def _get_es_endpoint(self):
return self._search_config.get("ElasticSearchDomain").get("ElasticSearchEndpoint")
# private lazy loader method for es client to ensure that we don't get constructor stalls if VPC connections are weird
def _get_es_client(self):
if self._es_client is None:
# setup a reference to ElasticSearch if a SearchConfig is setup
self._es_client = Elasticsearch(hosts=[self._get_es_endpoint()])
return self._es_client
# Perform a search request against the configured ES endpoint
# @evented(api_operation="Search")
@identity_trace
def search(self, search_type, **kwargs):
if self._es_client is None:
raise UnimplementedFeatureException("No ElasticSearch Endpoint Configured")
else:
response = {}
def _add_results(result_type):
index_name = utils.get_es_index_name(self._table_name, result_type)
doc = utils.get_es_type_name(self._table_name, result_type),
response[result_type] = self._get_es_client().search(index=index_name, doc_type=doc,
body=kwargs.get("query"))
if search_type is not None:
# perform a search just for the specified type of data
_add_results(search_type)
else:
# perform a search across both Resource and Metadata indexes
_add_results(params.RESOURCE)
_add_results(params.METADATA)
return response
# Return the API's underlying storage implementations, including tables in use, Dynamo Streams that can be processed
# and references to Gremlin and ElasticSearch endpoints in use
# @evented(api_operation="Endpoints")
@identity_trace
def get_endpoints(self):
endpoints = self._storage_handler.get_streams()
if self._gremlin_address is not None:
endpoints['GraphURL'] = self._gremlin_address
if self._search_config is not None:
endpoints['Elasticsearch'] = self._get_es_endpoint()
return endpoints
# Return the JSON schema for an API Namespace
# @evented(api_operation="GetSchema")
@identity_trace
def get_schema(self, schema_type):
return self._api_metadata_handler.get_schema(api_name=self._api_name, stage=self._deployment_stage,
schema_type=schema_type)
# Create or Update a JSON Schema for the API Namespace Resources or Metadata
# @evented(api_operation="PutSchema")
@identity_trace
def put_schema(self, schema_type, schema):
return self._api_metadata_handler.put_schema(api_name=self._api_name, stage=self._deployment_stage,
schema_type=schema_type,
caller_identity=self._simple_identity, schema=schema).get(
params.DATA_MODIFIED)
# Remove the JSON Schema from the Namespace for Resources or Metadata
# @evented(api_operation="DeleteSchema")
@identity_trace
def remove_schema(self, schema_type):
if schema_type.lower() == params.RESOURCE.lower():
set_schema_type = params.CONTROL_TYPE_RESOURCE_SCHEMA
elif schema_type.lower() == params.METADATA.lower():
set_schema_type = params.CONTROL_TYPE_METADATA_SCHEMA
else:
raise InvalidArgumentsException(
f"Schema Type {schema_type} invalid. Use {params.CONTROL_TYPE_METADATA_SCHEMA} or {params.CONTROL_TYPE_RESOURCE_SCHEMA}")
return self._api_metadata_handler.delete_metadata(api_name=self._api_name, stage=self._deployment_stage,
metadata_type=set_schema_type,
caller_identity=self._simple_identity)
# Setup the Item Master for a given Resource
# @evented(api_operation="SetItemMaster")
@identity_trace
def item_master_update(self, **kwargs):
return self._storage_handler.item_master_update(caller_identity=self._simple_identity, **kwargs)
# Remote the specified Item Master for a given Resource
# @evented(api_operation="RemoveItemMaster")
@identity_trace
def item_master_delete(self, **kwargs):
item_id = kwargs.get(self._pk_name)
if item_id is None:
raise ResourceNotFoundException
else:
# validate that this item actually has the correct item master set
current = self._storage_handler.get(id=item_id)
assert_item_master = kwargs.get(params.ITEM_MASTER_ID)
current_master = current.get(params.RESOURCE).get(params.ITEM_MASTER_ID, None)
if current_master is None:
return True
elif current_master != assert_item_master:
raise InvalidArgumentsException("Item Master {assert_item_master} does not match actual Item Master")
else:
return self._storage_handler.remove_resource_attributes(id=item_id,
resource_attributes=[params.ITEM_MASTER_ID],
caller_identity=self._simple_identity)
# Extract the Metadata for the API itself
# @evented(api_operation="GetApiMetadata")
@identity_trace
def get_table_metadata(self, attribute_filters=None):
return self._api_metadata_handler.get_api_metadata(api_name=self._api_name, stage=self._deployment_stage,
attribute_filters=attribute_filters)
# Create or Update API Metadata
# @evented(api_operation="CreateApiMetadata")
@identity_trace
def create_table_metadata(self, caller_identity=None, **kwargs):
try:
return self._dynamo_helper.create_table_metadata(api_name=self._table_name,
caller_identity=self._simple_identity if caller_identity is None else caller_identity,
**kwargs)
except Exception as e:
raise DetailedException(e)
# Perform a search for all References in the Gremlin DB for objects that directly or indirectly reference an API Item
# @evented(api_operation="GetDownstreamReferences")
@identity_trace
def get_downstream(self, id, search_depth=1):
if self._gremlin_endpoint is not None:
if id is None:
raise InvalidArgumentsException("Must have ID to run lineage search")
else:
try:
return self._gremlin_endpoint.get_outbound(
id=utils.get_arn(id, self._table_name, self._deployed_account),
search_depth=search_depth)
except ResourceNotFoundException:
return None
except Exception as e:
raise DetailedException(e)
else:
raise UnimplementedFeatureException(params.NO_GREMLIN)
# Perform a search for all References that the provided API Item references, directly or indirectly
# @evented(api_operation="GetUpstreamReferences")
@identity_trace
def get_upstream(self, id, search_depth=1):
if self._gremlin_endpoint is not None:
if id is None:
raise InvalidArgumentsException("Must have ID to run lineage search")
else:
try:
return self._gremlin_endpoint.get_inbound(
id=utils.get_arn(id, self._table_name, self._deployed_account),
search_depth=search_depth)
except ResourceNotFoundException:
return None
except Exception as e:
raise DetailedException(e)
else:
raise UnimplementedFeatureException(params.NO_GREMLIN)
def _do_ddb_export_to_s3(self, table_name, export_path, log_path, read_pct, dpu,
kms_key_arn, setup_crawler, catalog_database=None):
if setup_crawler is True and self._crawler_rolename is None:
raise InvalidArgumentsException(
"Cannot Setup Crawler for Exported Dataset as API is not configured with a Crawler Role")
set_table_name = f"{table_name}_{utils.get_date_now()}"
export = utils.run_glue_export(table_name=set_table_name, s3_export_path=export_path,
kms_key_arn=kms_key_arn,
read_pct=read_pct, log_path=log_path, export_role=self._crawler_rolename,
dpu=dpu)
if setup_crawler is not None:
crawler = utils.create_s3_crawler(target_entity_name=set_table_name,
crawler_name=f"{table_name}-export",
crawler_rolename=self._crawler_rolename,
catalog_db=f"{self._catalog_database}-export" if catalog_database is None else catalog_database,
s3_path=export_path,
and_run=True)
if crawler is not None:
export['Crawler'] = crawler
else:
msg = "Unable to configure Export Location Crawler"
export['Errors'] = [{"Error": msg}]
raise DetailedException(message=msg, detail=export)
return export
# Get the status of an API Export to S3
# @evented(api_operation="GetExportStatus")
@identity_trace
def get_export_job_status(self, job_name, run_id):
return utils.get_glue_job_status(job_name=job_name, run_id=run_id)
# Get a list of all export jobs running
# @evented(api_operation="GetExportJobs")
@identity_trace
def get_running_export_jobs(self, job_name):
return utils.get_running_export_jobs(job_name=job_name)
# Start an export of API Data to S3
# @evented(api_operation="StartExport")
@identity_trace
def export_to_s3(self, **kwargs):
EXPORT_DATA = 'Data'
EXPORT_META = 'Metadata'
EXPORT_ALL = 'All'
export_path = kwargs.get(params.EXPORT_S3_PATH)
if export_path is None:
raise Exception("Cannot export without S3 Export Path")
dpu = int(kwargs.get(params.EXPORT_JOB_DPU, params.DEFAULT_EXPORT_DPU))
kms_key_arn = kwargs.get(params.KMS_KEY_ARN, None)
read_pct = int(kwargs.get(params.EXPORT_READ_PCT, 50))
log_path = kwargs.get(params.EXPORT_LOG_PATH)
export_type = kwargs.get(params.EXPORT_TYPE, EXPORT_DATA)
catalog_database = kwargs.get(params.CATALOG_DATABASE)
export_types = [EXPORT_DATA, EXPORT_META, EXPORT_ALL]
if not any(x in export_type for x in export_types):
raise InvalidArgumentsException("ExportType must be one of {0}, {1}, or {2}" % tuple(export_types))
def _fix_path(path):
if path[:1] != "/":
path += "/"
_fix_path(export_path)
crawl = kwargs.get(params.EXPORT_SETUP_CRAWLER, None)
out = {}
# export main data to s3 location
if export_type == EXPORT_DATA or export_type == EXPORT_ALL:
result = self._do_ddb_export_to_s3(table_name=self._table_name, export_path=export_path,
log_path=log_path,
read_pct=read_pct, dpu=dpu,
kms_key_arn=kms_key_arn, setup_crawler=crawl,
catalog_database=catalog_database)
if result is not None:
out[EXPORT_DATA] = result
# export metadata to S3
if export_type == EXPORT_META or export_type == EXPORT_ALL:
result = self._do_ddb_export_to_s3(table_name=utils.get_metaname(self._table_name),
export_path=export_path,
log_path=log_path,
read_pct=read_pct, dpu=dpu,
kms_key_arn=kms_key_arn, setup_crawler=crawl,
catalog_database=catalog_database)
if result is not None:
out[EXPORT_META] = result
return out
| 44.265152
| 171
| 0.62602
|
ba036309723816995f4173198b26bd2ff14b39a0
| 1,779
|
py
|
Python
|
Torch/kerman_circuits.py
|
Shahrukh-Chishti/DiSuQ
|
2306da37b77685f06c598f59484b73cba500d94e
|
[
"Apache-2.0"
] | null | null | null |
Torch/kerman_circuits.py
|
Shahrukh-Chishti/DiSuQ
|
2306da37b77685f06c598f59484b73cba500d94e
|
[
"Apache-2.0"
] | null | null | null |
Torch/kerman_circuits.py
|
Shahrukh-Chishti/DiSuQ
|
2306da37b77685f06c598f59484b73cba500d94e
|
[
"Apache-2.0"
] | null | null | null |
import numpy,sys
from circuit import Circuit, hamiltonianEnergy, phase
from components import J,C,L,pi,h
from numpy.linalg import det
from pyvis import network as pvnet
def transmon(basis,Ej=10,Ec=0.3):
transmon = [J(0,1,Ej)]
transmon += [C(0,1,Ec)]
transmon = Circuit(transmon,basis)
return transmon
def splitTransmon(basis):
transmon = [J(0,1,10),C(0,1,100)]
transmon += [L(1,2,.0003,'I',True)]
transmon += [J(2,0,10),C(2,0,100)]
transmon = Circuit(transmon,basis)
return transmon
def oscillatorLC(basis,El=.00031,Ec=51.6256):
oscillator = [L(0,1,El),C(0,1,Ec)]
return Circuit(oscillator,basis)
def shuntedQubit():
circuit = [J(1,2,10.0),C(1,2,100.0)]
circuit += [J(2,3,10.0),C(2,3,500.0)]
circuit += [J(3,0,10.0),C(3,0,200.0)]
circuit += [L(0,1,.0001,'I',True)]
circuit = Circuit(circuit)
return circuit
def phaseSlip(basis,inductance=[.001,.0005,.00002,.00035,.0005],capacitance=[100,30,30,30,30,40,10]):
La,Lb,Lc,Ld,Le = inductance
Ca,Cb,Cc,Cd,Ce,Cf,Cg = capacitance
circuit = [C(0,1,Ca)]
circuit += [L(1,3,La,'Ltl',True),L(1,4,Lb,'Lbl',True)]
circuit += [J(3,2,10),J(4,2,10)]
circuit += [C(3,2,Cb),C(4,2,Cc)]
circuit += [C(2,5,Cd),C(2,6,Ce)]
circuit += [J(2,5,10),J(2,6,100)]
circuit += [C(2,0,Cf)]
circuit += [L(5,7,Lc,'Ltr',True),L(6,7,Ld,'Lbr',True)]
circuit += [L(1,7,Le,'Ll',True)]
circuit += [C(7,0,Cg)]
circuit = Circuit(circuit,basis)
return circuit
if __name__=='__main__':
circuit = shuntedQubit()
print(circuit.modeDistribution())
circuit.basis = {'O':[10],'I':[0],'J':[10,10]}
H_LC = circuit.kermanHamiltonianLC()
H_J = circuit.kermanHamiltonianJosephson({'I':tensor(.25)})
import ipdb; ipdb.set_trace()
| 30.672414
| 101
| 0.613266
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.