hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
360b21f79c3d1e633d2504158f0ac62516a639e7
| 666
|
py
|
Python
|
bot/welcome_leave.py
|
Thorappan7/loki
|
26bed530997907c93914d6ac42f4a2ad62dc365c
|
[
"BSD-3-Clause"
] | null | null | null |
bot/welcome_leave.py
|
Thorappan7/loki
|
26bed530997907c93914d6ac42f4a2ad62dc365c
|
[
"BSD-3-Clause"
] | null | null | null |
bot/welcome_leave.py
|
Thorappan7/loki
|
26bed530997907c93914d6ac42f4a2ad62dc365c
|
[
"BSD-3-Clause"
] | null | null | null |
from pyrogram import Client as bot, filters, emoji
MENTION = "[{}](tg://user?id={})"
text1="hi{} {} welcome to Group Chat"
group ="jangobotz"
@bot.on_message(filters.chat(group) &filters.new_chat_members)
async def welcome(bot, message):
new_members = [u.mention for u in message.new_chat_members]
TEXT2= text1.format(emoji.SPARKLES,",".join(new_members))
await message.reply_text(TEXT2)
@bot.on_message(filters.command("leave") &filters.group)
def leave(bot, message):
bot.send_message(message.chat.id, "നമ്മളില്ലേ...അല്ലേലും സ്വപ്നത്തിലെ കട്ടുറുമ്പ് ആവൻ ഞാനില്ല ..ഭൂമി ഉരുണ്ടതല്ലെ എവിടേലും വെച്ച് കാണാം")
bot.leave_chat(message.chat.id )
| 33.3
| 140
| 0.683183
| 0
| 0
| 0
| 0
| 687
| 0.82177
| 195
| 0.233254
| 346
| 0.413876
|
360b7ea47f3ce200b5ccf6c834ad2ed52c42e4f9
| 2,079
|
py
|
Python
|
script.deluge/resources/lib/basictypes/xmlgenerator.py
|
ogero/Deluge-Manager-XBMC
|
10c4f2a93ac1fffba01209444ba5e597036b968b
|
[
"MIT"
] | null | null | null |
script.deluge/resources/lib/basictypes/xmlgenerator.py
|
ogero/Deluge-Manager-XBMC
|
10c4f2a93ac1fffba01209444ba5e597036b968b
|
[
"MIT"
] | null | null | null |
script.deluge/resources/lib/basictypes/xmlgenerator.py
|
ogero/Deluge-Manager-XBMC
|
10c4f2a93ac1fffba01209444ba5e597036b968b
|
[
"MIT"
] | null | null | null |
import locale
from xml.sax import saxutils
defaultEncoding = locale.getdefaultlocale()[-1]
class Generator(saxutils.XMLGenerator):
"""Friendly generator for XML code"""
def __init__(self, out=None, encoding="utf-8"):
"""Initialise the generator
Just overrides the default encoding of the base-class
"""
super(self, Generator).__init__(file, encoding)
def startElement(self, name, attributes=None):
"""Start a new element with given attributes"""
super(Generator, self).startElement(name, self._fixAttributes(attributes))
def _fixAttributes(self, attributes=None):
"""Fix an attribute-set to be all unicode strings"""
if attributes is None:
attributes = {}
for key, value in attributes.items():
if not isinstance(value, (str, unicode)):
attributes[key] = unicode(value)
elif isinstance(value, str):
attributes[key] = value.decode(defaultEncoding)
class Store(Generator):
"""Store a set of objects to an XML representation"""
def __init__(self, *arguments, **named):
"""Initialise the store"""
super(Store, self).__init__(*arguments, **named)
self.classMapping = {
}
self.rClassMapping = {
}
self.todo = []
self.alreadyDone = {}
def classToElementName(self, classObject):
"""Get the element name for a given object"""
name = classObject.__name__
if self.rClassMapping.has_key(name):
return self.rClassMapping.get(name)
short = name.split('.')[-1]
count = 2
while self.classMapping.has_key(short):
short = short + str(count)
count += 1
self.classMapping[short] = name
self.rClassMapping[name] = short
return short
def encodeInAttributes(self, property, client):
"""Determine whether to encode this property as an element attribute"""
def handleObject(self, object):
"""Produce code for a single object"""
| 32.484375
| 82
| 0.619529
| 1,981
| 0.952862
| 0
| 0
| 0
| 0
| 0
| 0
| 481
| 0.231361
|
360ce588463dab38c7d8f02e3de4947c05f44448
| 4,877
|
py
|
Python
|
scrape.py
|
darenr/contemporary-art--rss-scraper
|
92d66d18712e781e6e96980004a17f810568e652
|
[
"MIT"
] | null | null | null |
scrape.py
|
darenr/contemporary-art--rss-scraper
|
92d66d18712e781e6e96980004a17f810568e652
|
[
"MIT"
] | null | null | null |
scrape.py
|
darenr/contemporary-art--rss-scraper
|
92d66d18712e781e6e96980004a17f810568e652
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import codecs
import traceback
import sys
import requests
import requests_cache
import feedparser
import collections
from bs4 import BeautifulSoup
from urlparse import urlparse, urljoin
one_day = 60 * 60 * 24
requests_cache.install_cache(
'rss_cache', backend='sqlite', expire_after=one_day)
headers = {
'User-Agent': 'Mozilla/5.0'
}
def get_entry_formatted(mime_type, value):
if mime_type.lower() == 'text/html':
soup = BeautifulSoup(value, 'html5lib')
return ''.join(line.lstrip() for line in soup.getText().splitlines(True))
else:
return value;
def parse_content(mime_type, value):
if mime_type.lower() == 'text/html':
soup = BeautifulSoup(value, 'html5lib')
# scoop up all the text
result = {
"text": ''.join(line.lstrip() for line in soup.getText().splitlines(True))
}
if soup.find('img'):
result['imgurl'] = soup.find('img')['src']
return result
else:
return value
def get_entry_value(entry, key, feed):
#
# deals with differences between feeds
#
_key = feed['fields'][key] if 'fields' in feed and key in feed['fields'] else key
if _key in entry:
return entry[_key]
else:
print ' *', 'No', _key, "field in", entry
return None
def fetch_page_and_parse(feed, url):
print ' *', 'parsing page link:', url
page = requests.get(url, headers=headers)
result = {}
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html5lib')
if 'selector' in feed:
for img in soup.select(feed['selector']):
src = img['src'] if img.has_attr('src') else None
if not src:
src = img['srcset'] if img.has_attr('srcset') else None
if src:
if src.startswith('/'):
result['imgurl'] = urljoin(feed['url'], src)
else:
result['imgurl'] = src
break
else:
# look for og_image as the default
if soup.find('meta', {"property": "og:image"}):
if 'content' in soup.find('meta', {"property": "og:image"}):
result['imgurl'] = soup.find('meta', {"property": "og:image"})['content']
return result
def validate(record):
mandatory_fields = ['imgurl', 'description', 'title', 'link']
for field in mandatory_fields:
if not (field in record and record[field]):
print ' *', 'Missing field', field
return False
return True
def process_feed(feed):
print ' *', 'processing', feed['url']
rawxml = requests.get(feed['url'], headers=headers)
d = feedparser.parse(rawxml.text)
rows = []
for entry in d['entries']:
# standard fields:
record = {
"organization": feed['organization'],
"link": get_entry_value(entry, 'link', feed),
"title": get_entry_value(entry, 'title', feed),
"date": get_entry_value(entry, 'published', feed),
"user_tags": [],
"description": "",
"imgurl": ""
}
if 'category' in entry and entry['category']:
record['user_tags'].append(get_entry_formatted("text/html", entry["category"]))
if 'summary_detail' in entry and entry['summary_detail']:
m = parse_content(entry["summary_detail"]["type"], entry["summary_detail"]["value"])
if 'text' in m:
record["description"] = m['text']
if 'imgurl' in m:
record["imgurl"] = m['imgurl']
if 'media_thumbnail' in entry and entry['media_thumbnail']:
media_thumbnail = entry['media_thumbnail'][0]
if 'url' in media_thumbnail:
record["imgurl"] = media_thumbnail['url']
if 'tags' in entry and entry['tags']:
for x in entry['tags']:
if 'term' in x:
record['user_tags'].append(x['term'])
record['user_tags'] = list(set(record['user_tags']))
if not record['imgurl']:
m = fetch_page_and_parse(feed, record['link'])
for k in m:
record[k] = m[k]
if validate(record):
#
# any that fail to validate are just ignored
#
rows.append(record)
return rows
if __name__ == "__main__":
with codecs.open('sources.json', 'rb', 'utf-8') as f:
sources = json.loads(f.read().encode('utf-8'))
try:
ingest_rows = []
for feed in sources['feeds']:
ingest_rows += process_feed(feed)
print ' *', 'scraped %d records' % (len(ingest_rows))
except Exception, e:
traceback.print_exc()
print str(e)
| 29.029762
| 96
| 0.552184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,145
| 0.234775
|
360e9e36a16342872103b6bba5218132e5fe10ac
| 3,102
|
py
|
Python
|
src/main/admin_api/endpoint/table_endpoint.py
|
lemilliard/kibo-db
|
2fa1832aa6a8457b428870491aaf64e399cca4d6
|
[
"MIT"
] | null | null | null |
src/main/admin_api/endpoint/table_endpoint.py
|
lemilliard/kibo-db
|
2fa1832aa6a8457b428870491aaf64e399cca4d6
|
[
"MIT"
] | null | null | null |
src/main/admin_api/endpoint/table_endpoint.py
|
lemilliard/kibo-db
|
2fa1832aa6a8457b428870491aaf64e399cca4d6
|
[
"MIT"
] | null | null | null |
from src.main.common.model import endpoint
class TableEndpoint(endpoint.Endpoint):
@classmethod
def do_get(cls, *args, **kwargs):
from src.main.admin_api.utils.descriptor_utils import DescriptorUtils
db_system_name = kwargs.get("db_system_name")
tb_system_name = kwargs.get("tb_system_name", None)
response = None
if tb_system_name is None:
descriptor_dicts = []
descriptors = DescriptorUtils.get_tbs_descriptor(db_system_name)
for d in descriptors:
descriptor_dicts.append(d.to_dict())
response = descriptor_dicts
else:
descriptor = DescriptorUtils.get_tb_descriptor_by_system_name(db_system_name, tb_system_name)
if descriptor is not None:
response = descriptor.to_dict()
return response
@classmethod
def do_post(cls, *args, **kwargs):
from src.main.admin_api.utils.descriptor_utils import DescriptorUtils
from src.main.admin_api.model.table import Table
db_system_name = kwargs.get("db_system_name")
response = None
body = TableEndpoint.get_body()
name = body.get("name", None)
if name is not None:
descriptor = Table.from_json(body)
if not DescriptorUtils.does_tb_descriptor_exist(db_system_name, descriptor):
descriptor.save(db_system_name)
response = descriptor.to_dict()
return response
@classmethod
def do_put(cls, *args, **kwargs):
from src.main.admin_api.utils.descriptor_utils import DescriptorUtils
db_system_name = kwargs.get("db_system_name")
tb_system_name = kwargs.get("tb_system_name")
response = None
body = TableEndpoint.get_body()
if tb_system_name is not None:
descriptor = DescriptorUtils.get_tb_descriptor_by_system_name(db_system_name, tb_system_name)
if descriptor is not None:
name = body.get("name", None)
if name is not None:
descriptor.set_name(name)
description = body.get("description", None)
if description is not None:
descriptor.set_description(description)
fields = body.get("fields", None)
if fields is not None:
descriptor.set_fields(fields)
descriptor.save(db_system_name)
response = descriptor.to_dict()
return response
@classmethod
def do_delete(cls, *args, **kwargs):
from src.main.admin_api.utils.descriptor_utils import DescriptorUtils
db_system_name = kwargs.get("db_system_name")
tb_system_name = kwargs.get("tb_system_name")
response = None
descriptor = DescriptorUtils.get_tb_descriptor_by_system_name(db_system_name, tb_system_name)
if descriptor is not None:
response = descriptor.delete(db_system_name)
return response
| 40.815789
| 106
| 0.624758
| 3,052
| 0.983881
| 0
| 0
| 2,981
| 0.960993
| 0
| 0
| 145
| 0.046744
|
360ffa9621191899023f1d394dd125777d985f49
| 10,326
|
py
|
Python
|
tools/testbed_generator.py
|
vkolli/5.0_contrail-test
|
1793f169a94100400a1b2fafbad21daf5aa4d48a
|
[
"Apache-2.0"
] | null | null | null |
tools/testbed_generator.py
|
vkolli/5.0_contrail-test
|
1793f169a94100400a1b2fafbad21daf5aa4d48a
|
[
"Apache-2.0"
] | 1
|
2021-06-01T22:18:29.000Z
|
2021-06-01T22:18:29.000Z
|
tools/testbed_generator.py
|
lmadhusudhanan/contrail-test
|
bd39ff19da06a20bd79af8c25e3cde07375577cf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import yaml
import json
import sys
import re
import argparse
from distutils.version import LooseVersion
from collections import defaultdict
discovery_port = '5998'
config_api_port = '8082'
analytics_api_port = '8081'
control_port = '8083'
dns_port = '8092'
agent_port = '8085'
def get_neutron_username(params):
plugin_cfgs = [v['yaml_additional_config'] for k, v in params.iteritems()
if type(v) is dict and v.has_key('yaml_additional_config')]
for plugin_cfg in plugin_cfgs:
cfg = yaml.load(plugin_cfg)
try:
return cfg['ldap_service_users']['quantum_settings']['keystone']['admin_user']
except KeyError:
try:
return cfg['quantum_settings']['keystone']['admin_user']
except KeyError:
pass
def parse_astute_6(params):
astute_dict = defaultdict(list)
for node in params['nodes']:
for host in astute_dict['hosts']:
if host['host_name'] == node['name']:
host_dict = host
break
else:
host_dict = {'data_ip': node['private_address'],
'host_name': node['name'],
'mgmt_ip': node['internal_address']
}
host_dict['role'] = set()
if 'nova' in node['role'] or 'controller' in node['role']:
host_dict['role'].add('openstack')
if 'compute' in node['role']:
host_dict['role'].add('compute')
if 'contrail' in node['role']:
host_dict['role'].add('contrail_config')
host_dict['role'].add('contrail_control')
host_dict['role'].add('contrail_db')
if host_dict['role']:
astute_dict['hosts'].append(host_dict)
astute_dict['contrail_vip'] = params['management_vip']
return astute_dict
def parse_astute_7(params):
astute_dict = defaultdict(list)
for name, node_dict in params['network_metadata']['nodes'].iteritems():
host_dict = {'data_ip': node_dict['network_roles']['neutron/mesh'],
'host_name': name,
'mgmt_ip': node_dict['network_roles']['management']
}
host_dict['role'] = set()
for role in node_dict['node_roles']:
if 'nova' in role or 'controller' in role:
host_dict['role'].add('openstack')
if 'compute' in role:
host_dict['role'].add('compute')
if 'contrail-config' in role:
host_dict['role'].add('contrail_config')
if 'contrail-control' in role:
host_dict['role'].add('contrail_control')
if 'contrail-analytics' in role and not 'contrail-analytics-db' in role:
host_dict['role'].add('contrail_analytics')
if 'contrail-db' in role:
host_dict['role'].add('contrail_db')
if host_dict['role']:
astute_dict['hosts'].append(host_dict)
astute_dict['mgmt_vip'] = params['network_metadata']['vips']['management']['ipaddr']
astute_dict['contrail_vip'] = params['network_metadata']['vips']['contrail_priv']['ipaddr']
return astute_dict
def parse_astute(filename, version):
with open(filename, 'r') as fd:
params = yaml.load(fd)
if not version:
version = '7.1' if params.has_key('network_metadata') else '6.1'
if LooseVersion(version) < LooseVersion('7'):
astute_dict = parse_astute_6(params)
else:
astute_dict = parse_astute_7(params)
astute_dict['neutron_username'] = get_neutron_username(params)
return astute_dict
def parse_openrc(filename):
openrc_dict = dict()
openrc_values = dict()
for line in open(filename, 'r').readlines():
obj = re.match("export\s+(\w+)\s*=\s*(.*)", line)
if obj:
val = obj.group(2).strip("'")
val = val.strip('"')
openrc_values.update({obj.group(1):val})
openrc_dict['admin_tenant'] = openrc_values.get('OS_TENANT_NAME', '')
openrc_dict['admin_user'] = openrc_values.get('OS_USERNAME', '')
openrc_dict['admin_password'] = openrc_values.get('OS_PASSWORD', '')
openrc_dict['region_name'] = openrc_values.get('OS_REGION_NAME', '')
url = openrc_values['OS_AUTH_URL']
obj = re.match("(?P<protocol>\w+)://(?P<ip>\S+):(?P<port>\d+)", url)
if obj:
openrc_dict['auth_ip'] = obj.group('ip')
openrc_dict['auth_port'] = obj.group('port')
openrc_dict['auth_protocol'] = obj.group('protocol')
return openrc_dict
def gen_host_name(hostname):
special_char = ['-', ':', '.']
for char in special_char:
hostname = hostname.replace(char, '_')
return 'host_'+hostname
def fixup_tb_string(tb_string, hosts):
for host in hosts:
tb_string = tb_string.replace('"'+host+'"', host)
tb_string = tb_string.replace('null', 'None')
tb_string = tb_string.replace('true', 'True')
tb_string = tb_string.replace('false', 'False')
return tb_string
def create_testbed_file(pargs, astute_dict, openrc_dict):
tb_filename = pargs.tb_filename
host_string = set()
hosts = list()
env_roledefs = defaultdict(list)
control_data = {}
env_keystone = {}
env_test = {}
env_cfgm = {}
env_password = {}
login_name = pargs.login_username
is_analytics_isolated = False
for host_dict in astute_dict['hosts']:
host_name = gen_host_name(host_dict['host_name'])
hosts.append(host_name)
host_string.add("%s = '%s@%s'" %(host_name, login_name, host_dict['mgmt_ip']))
env_roledefs['all'].append(host_name)
env_password.update({host_name : 'c0ntrail123'})
control_data.update({host_name : {'ip': host_dict['data_ip'],
'gw': None}})
if 'openstack' in host_dict['role']:
env_roledefs['openstack'].append(host_name)
if 'contrail_config' in host_dict['role']:
env_roledefs['cfgm'].append(host_name)
env_roledefs['webui'].append(host_name)
if 'contrail_analytics' in host_dict['role']:
env_roledefs['collector'].append(host_name)
is_analytics_isolated = True
if 'contrail_control' in host_dict['role']:
env_roledefs['control'].append(host_name)
if 'contrail_db' in host_dict['role']:
env_roledefs['database'].append(host_name)
if 'compute' in host_dict['role']:
env_roledefs['compute'].append(host_name)
if not is_analytics_isolated:
for host_dict in astute_dict['hosts']:
if 'contrail_config' in host_dict['role']:
host_name = gen_host_name(host_dict['host_name'])
env_roledefs['collector'].append(host_name)
for k,v in env_roledefs.iteritems():
env_roledefs[k] = list(set(v))
env_keystone.update({'keystone_ip': openrc_dict['auth_ip']})
env_keystone.update({'auth_protocol': openrc_dict['auth_protocol']})
env_keystone.update({'auth_port': openrc_dict['auth_port']})
env_keystone.update({'admin_user': openrc_dict['admin_user']})
env_keystone.update({'admin_password': openrc_dict['admin_password']})
env_keystone.update({'admin_tenant': openrc_dict['admin_tenant']})
env_keystone.update({'region_name': openrc_dict['region_name']})
env_keystone.update({'insecure': 'True'})
env_test.update({'discovery_ip': astute_dict['contrail_vip']})
env_test.update({'config_api_ip': astute_dict['contrail_vip']})
env_test.update({'analytics_api_ip': astute_dict['contrail_vip']})
env_test.update({'discovery_port': discovery_port})
env_test.update({'config_api_port': config_api_port})
env_test.update({'analytics_api_port': analytics_api_port})
env_test.update({'control_port': control_port})
env_test.update({'dns_port': dns_port})
env_test.update({'agent_port': agent_port})
env_test.update({'user_isolation': False})
env_test.update({'neutron_username': astute_dict['neutron_username']})
env_test.update({'availability_zone': pargs.availability_zone_name})
if pargs.use_ssl:
env_cfgm.update({'auth_protocol': 'https'})
env_cfgm.update({'insecure': 'True'})
tb_list = list()
tb_list.append("env.test = %s"%json.dumps(env_test, sort_keys=True, indent=4))
tb_list.append("env.keystone = %s"%json.dumps(env_keystone, sort_keys=True, indent=4))
tb_list.append("env.cfgm = %s"%json.dumps(env_cfgm, sort_keys=True, indent=4))
tb_list.append("control_data = %s"%json.dumps(control_data, sort_keys=True, indent=4))
tb_list.append("env.roledefs = %s"%json.dumps(env_roledefs, sort_keys=True, indent=4))
tb_list.append("env.openstack_admin_password = '%s'"%
env_keystone['admin_password'])
tb_list.append("env.passwords = %s"%json.dumps(env_password, sort_keys=True, indent=4))
replaced_tb_string = fixup_tb_string('\n'.join(tb_list), hosts)
tb_list = ['from fabric.api import env']
tb_list.extend(sorted(host_string))
tb_list.append(replaced_tb_string)
with open(tb_filename, 'w+') as fd:
fd.write('\n'.join(tb_list))
def parse_cli(args):
parser = argparse.ArgumentParser(description='testbed.py file generator for MOS env')
parser.add_argument('--yaml_file', required=True, help='astute.yaml file path')
parser.add_argument('--openrc_file', help='openrc file path')
parser.add_argument('--mos_version', help='mos version in use, optional')
parser.add_argument('--availability_zone_name', help='Name of the nova availability zone to use for test', default='nova')
parser.add_argument('--login_username', help='Username to use to login to the hosts (default: root)', default='root')
parser.add_argument('--tb_filename', default='testbed.py', help='Testbed output file name')
parser.add_argument('--use_ssl', action='store_true', help='Use https communication with contrail-api service')
return parser.parse_args(args)
def main(args):
pargs = parse_cli(args)
astute_dict = parse_astute(pargs.yaml_file, pargs.mos_version)
openrc_dict = parse_openrc(pargs.openrc_file)
create_testbed_file(pargs, astute_dict, openrc_dict)
if __name__ == "__main__":
main(sys.argv[1:])
| 43.56962
| 126
| 0.643521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,850
| 0.276002
|
3610620368663e7a20b5544000c84c6865a97120
| 88
|
py
|
Python
|
sum of digits using recursion.py
|
kingRovo/PythonCodingChalenge
|
b62938592df10ccafec9930b69c14c778e19ad37
|
[
"bzip2-1.0.6"
] | 1
|
2021-08-02T16:52:55.000Z
|
2021-08-02T16:52:55.000Z
|
sum of digits using recursion.py
|
kingRovo/PythonCodingChalenge
|
b62938592df10ccafec9930b69c14c778e19ad37
|
[
"bzip2-1.0.6"
] | null | null | null |
sum of digits using recursion.py
|
kingRovo/PythonCodingChalenge
|
b62938592df10ccafec9930b69c14c778e19ad37
|
[
"bzip2-1.0.6"
] | null | null | null |
def rec_sum(n):
if(n<=1):
return n
else:
return(n+rec_sum(n-1))
| 14.666667
| 30
| 0.477273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
36111dceb7e38307b2a633510d6f416394679b79
| 9,292
|
py
|
Python
|
visualization/POF/data/Base2DReader.py
|
alvaro-budria/body2hands
|
0eba438b4343604548120bdb03c7e1cb2b08bcd6
|
[
"BSD-3-Clause"
] | 63
|
2021-05-14T02:55:16.000Z
|
2022-03-13T01:51:12.000Z
|
visualization/POF/data/Base2DReader.py
|
human2b/body2hands
|
8ab4b206dc397c3b326f2b4ec9448c84ee8801fe
|
[
"BSD-3-Clause"
] | 9
|
2021-06-24T09:59:41.000Z
|
2021-12-31T08:15:20.000Z
|
visualization/POF/data/Base2DReader.py
|
human2b/body2hands
|
8ab4b206dc397c3b326f2b4ec9448c84ee8801fe
|
[
"BSD-3-Clause"
] | 9
|
2021-05-17T03:33:28.000Z
|
2022-02-17T02:30:44.000Z
|
import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
class Base2DReader(BaseReader):
# inherit from BaseReader, implement different 2D cropping (cropping from 2D)
def __init__(self, objtype=0, shuffle=True, batch_size=1, crop_noise=False):
super(Base2DReader, self).__init__(objtype, shuffle, batch_size, crop_noise)
def get(self, withPAF=True, read_image=True, imw=1920, imh=1080):
assert type(withPAF) == bool
assert self.objtype in (0, 1)
# produce data from slice_input_producer
flow_list = tf.train.slice_input_producer(list(self.tensor_dict.values()), shuffle=self.shuffle)
flow_dict = {key: flow_list[ik] for ik, key in enumerate(self.tensor_dict.keys())}
# build data dictionary
data_dict = {}
data_dict['img_dir'] = flow_dict['img_dirs']
PAF_given = False
if self.objtype == 0:
body2d = flow_dict['body']
data_dict['body_valid'] = flow_dict['body_valid']
data_dict['keypoint_uv_origin'] = body2d
if 'body_3d' in flow_dict:
data_dict['keypoint_xyz_origin'] = flow_dict['body_3d']
data_dict['keypoint_xyz_local'] = flow_dict['body_3d']
PAF_given = True
elif self.objtype == 1:
cond_left = tf.reduce_any(tf.cast(flow_dict['left_hand_valid'], dtype=tf.bool)) # 0 for right hand, 1 for left hand
hand2d = tf.cond(cond_left, lambda: flow_dict['left_hand'], lambda: flow_dict['right_hand']) # in world coordinate
hand2d = tf.cast(hand2d, tf.float32)
data_dict['keypoint_uv_origin'] = hand2d
data_dict['left_hand_valid'] = flow_dict['left_hand_valid']
data_dict['right_hand_valid'] = flow_dict['right_hand_valid']
if 'left_hand_3d' in flow_dict and 'right_hand_3d' in flow_dict:
hand3d = tf.cond(cond_left, lambda: flow_dict['left_hand_3d'], lambda: flow_dict['right_hand_3d'])
data_dict['keypoint_xyz_origin'] = hand3d
data_dict['keypoint_xyz_local'] = hand3d
PAF_given = True
# read image
if read_image:
img_file = tf.read_file(flow_dict['img_dirs'])
image = tf.image.decode_image(img_file, channels=3)
image = tf.image.pad_to_bounding_box(image, 0, 0, imh, imw)
image.set_shape((imh, imw, 3))
image = tf.cast(image, tf.float32) / 255.0 - 0.5
data_dict['image'] = image
if 'mask_dirs' in flow_dict:
mask_file = tf.read_file(flow_dict['mask_dirs'])
mask = tf.image.decode_image(mask_file, channels=3)
mask = tf.image.pad_to_bounding_box(mask, 0, 0, imh, imw)
mask.set_shape((imh, imw, 3))
mask = mask[:, :, 0]
mask = tf.cast(mask, tf.float32)
else:
mask = tf.ones((imh, imw), dtype=tf.float32)
if 'other_bbox' in flow_dict:
ob = flow_dict['other_bbox']
Xindmap = tf.tile(tf.expand_dims(tf.range(imw, dtype=tf.int32), 0), [imh, 1])
Xindmap = tf.tile(tf.expand_dims(Xindmap, 2), [1, 1, 20])
Yindmap = tf.tile(tf.expand_dims(tf.range(imh, dtype=tf.int32), 1), [1, imw])
Yindmap = tf.tile(tf.expand_dims(Yindmap, 2), [1, 1, 20])
x_out = tf.logical_or(tf.less(Xindmap, ob[:, 0]), tf.greater_equal(Xindmap, ob[:, 2]))
y_out = tf.logical_or(tf.less(Yindmap, ob[:, 1]), tf.greater_equal(Yindmap, ob[:, 3]))
out = tf.cast(tf.logical_or(x_out, y_out), tf.float32)
out = tf.reduce_min(out, axis=2)
mask = tf.minimum(mask, out)
data_dict['mask'] = mask
if self.objtype in (0, 1):
if self.objtype == 0:
keypoints = body2d
valid = flow_dict['body_valid']
elif self.objtype == 1:
keypoints = hand2d
body2d = hand2d
valid = tf.cond(cond_left, lambda: flow_dict['left_hand_valid'], lambda: flow_dict['right_hand_valid'])
data_dict['hand_valid'] = valid
if PAF_given:
body3d = hand3d
crop_center2d, scale2d = self.calc_crop_scale2d(keypoints, valid)
data_dict['crop_center2d'] = crop_center2d
data_dict['scale2d'] = scale2d
if self.rotate_augmentation:
print('using rotation augmentation')
rotate_angle = tf.random_uniform([], minval=-np.pi * 40 / 180, maxval=np.pi * 40 / 180)
R2 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), tf.sin(rotate_angle), tf.cos(rotate_angle)]), [2, 2])
body2d = tf.matmul((body2d - crop_center2d), R2) + crop_center2d
data_dict['keypoint_uv_origin'] = body2d
if PAF_given:
R3 = tf.reshape(tf.stack([tf.cos(rotate_angle), -tf.sin(rotate_angle), 0., tf.sin(rotate_angle), tf.cos(rotate_angle), 0., 0., 0., 1.]), [3, 3])
body3d = tf.matmul(body3d, R3)
data_dict['keypoint_xyz_origin'] = body3d
data_dict['keypoint_xyz_local'] = body3d
body2d_local = self.update_keypoint2d(body2d, crop_center2d, scale2d)
data_dict['keypoint_uv_local'] = body2d_local
if read_image:
image_crop = self.crop_image(image, crop_center2d, scale2d)
data_dict['image_crop'] = image_crop
mask_crop = self.crop_image(tf.stack([mask] * 3, axis=2), crop_center2d, scale2d)
data_dict['mask_crop'] = mask_crop[:, :, 0]
if self.rotate_augmentation:
data_dict['image_crop'] = tf.contrib.image.rotate(data_dict['image_crop'], rotate_angle)
data_dict['mask_crop'] = tf.contrib.image.rotate(data_dict['mask_crop'], rotate_angle)
if self.blur_augmentation:
print('using blur augmentation')
rescale_factor = tf.random_uniform([], minval=0.1, maxval=1.0)
rescale = tf.cast(rescale_factor * self.crop_size, tf.int32)
resized_image = tf.image.resize_images(data_dict['image_crop'], [rescale, rescale])
data_dict['image_crop'] = tf.image.resize_images(resized_image, [self.crop_size, self.crop_size])
# create 2D gaussian map
scoremap2d = self.create_multiple_gaussian_map(body2d_local[:, ::-1], (self.crop_size, self.crop_size), self.sigma, valid_vec=valid, extra=True) # coord_hw, imsize_hw
data_dict['scoremap2d'] = scoremap2d
if withPAF:
from utils.PAF import createPAF
num_keypoint = body2d_local.get_shape().as_list()[0]
zeros = tf.zeros([num_keypoint, 1], dtype=tf.float32)
if PAF_given:
data_dict['PAF'] = createPAF(body2d_local, body3d, self.objtype, (self.crop_size, self.crop_size), normalize_3d=True, valid_vec=valid)
data_dict['PAF_type'] = tf.ones([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
else:
data_dict['PAF'] = createPAF(body2d_local, tf.concat([body2d, zeros], axis=1), self.objtype, (self.crop_size, self.crop_size), normalize_3d=False, valid_vec=valid)
data_dict['PAF_type'] = tf.zeros([], dtype=bool) # 0 for 2D PAF, 1 for 3D PAF
if self.objtype == 1: # this is hand, flip the image if it is right hand
data_dict['image_crop'] = tf.cond(cond_left, lambda: data_dict['image_crop'], lambda: data_dict['image_crop'][:, ::-1, :])
data_dict['mask_crop'] = tf.cond(cond_left, lambda: data_dict['mask_crop'], lambda: data_dict['mask_crop'][:, ::-1])
data_dict['scoremap2d'] = tf.cond(cond_left, lambda: data_dict['scoremap2d'], lambda: data_dict['scoremap2d'][:, ::-1, :])
data_dict['keypoint_uv_local'] = tf.cond(cond_left, lambda: data_dict['keypoint_uv_local'],
lambda: tf.constant([self.crop_size, 0], tf.float32) + tf.constant([-1, 1], tf.float32) * data_dict['keypoint_uv_local'])
if withPAF:
data_dict['PAF'] = tf.cond(cond_left, lambda: data_dict['PAF'],
lambda: (data_dict['PAF'][:, ::-1, :]) * tf.constant([-1, 1, 1] * (data_dict['PAF'].get_shape().as_list()[2] // 3), dtype=tf.float32))
names, tensors = zip(*data_dict.items())
if self.shuffle:
tensors = tf.train.shuffle_batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
min_after_dequeue=50,
enqueue_many=False)
else:
tensors = tf.train.batch_join([tensors],
batch_size=self.batch_size,
capacity=100,
enqueue_many=False)
return dict(zip(names, tensors))
| 57.006135
| 183
| 0.574365
| 9,207
| 0.990852
| 0
| 0
| 0
| 0
| 0
| 0
| 1,328
| 0.142919
|
361199dea80437ba6ce5df8eea417f22ea366fce
| 301
|
py
|
Python
|
api/indexer/tzprofiles_indexer/models.py
|
clehner/tzprofiles
|
e44497bccf28d2d75cfdfa0c417dbecc0f342c12
|
[
"Apache-2.0"
] | null | null | null |
api/indexer/tzprofiles_indexer/models.py
|
clehner/tzprofiles
|
e44497bccf28d2d75cfdfa0c417dbecc0f342c12
|
[
"Apache-2.0"
] | null | null | null |
api/indexer/tzprofiles_indexer/models.py
|
clehner/tzprofiles
|
e44497bccf28d2d75cfdfa0c417dbecc0f342c12
|
[
"Apache-2.0"
] | null | null | null |
from tortoise import Model, fields
class TZProfile(Model):
account = fields.CharField(36, pk=True)
contract = fields.CharField(36)
valid_claims = fields.JSONField()
invalid_claims = fields.JSONField()
errored = fields.BooleanField()
class Meta:
table = "tzprofiles"
| 23.153846
| 43
| 0.69103
| 263
| 0.873754
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.039867
|
3611a8921184c2a719ec2f7a6c28b90498243d94
| 6,006
|
py
|
Python
|
pyscripts/Backups/wikipull.py
|
mrchaos10/AGRICULTURAL-DOMAIN-SPECIES-IDENTIFICATION-AND-SEMI-SUPERVISED-QUERYING-SYSTEM
|
2697c806e4de565767efac276d58b3b3696e4893
|
[
"MIT"
] | null | null | null |
pyscripts/Backups/wikipull.py
|
mrchaos10/AGRICULTURAL-DOMAIN-SPECIES-IDENTIFICATION-AND-SEMI-SUPERVISED-QUERYING-SYSTEM
|
2697c806e4de565767efac276d58b3b3696e4893
|
[
"MIT"
] | null | null | null |
pyscripts/Backups/wikipull.py
|
mrchaos10/AGRICULTURAL-DOMAIN-SPECIES-IDENTIFICATION-AND-SEMI-SUPERVISED-QUERYING-SYSTEM
|
2697c806e4de565767efac276d58b3b3696e4893
|
[
"MIT"
] | null | null | null |
#api for extracting the results from wikidata
#https://www.wikidata.org/w/api.php?search=las&language=en&uselang=en&format=jsonfm&limit=25&action=wbsearchentities
# importing modules
import requests
from lxml import etree
import wikipedia
import sys
import re
import pickle
import numpy as np
import os
import sys
import pandas as pd
import seaborn as sns
import matplotlib as plt
from tensorflow import keras
from nltk.corpus import stopwords
from nltk.corpus import wordnet,words
# Ignore warnings
import warnings
warnings.filterwarnings('ignore')
SEARCHPAGE = str(sys.argv[1])
page=wikipedia.WikipediaPage(SEARCHPAGE)
content=page.content
content_list=content.split('.')
#for i in content_list:
# print(i)
pd.set_option('display.max_columns', None)
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(precision=3)
sns.set(style="darkgrid")
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
max_words = 50
tokenize = keras.preprocessing.text.Tokenizer(num_words=max_words, char_level=False)
#defining the text fields from training set as train_text and similiarly test_text
train_text= pd.DataFrame({'words':content_list})
#print(train_text)
#print("################################### TRAINING DATASET DESCRIPTION ###############################################################")
#print(train_text.describe())
#remove unwanted from the questions
#query = 'What is Nahuatl word for tomato and how did Aztecs called tomato ?'
query=str(sys.argv[2])
stopperwords = ['what','where','when','who','which','whom','whose','why','how','?']
querywords = query.split()
resultwords = [word for word in querywords if word.lower() not in stopperwords]
result = ' '.join(resultwords)
result=result.replace('?','')
#print(result)
stop_words = set(stopwords.words('english'))
word_tokens = result.split(' ')
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
result=filtered_sentence
#print(result)
syn_result=[]
ant_result=[]
def similiarity(X_set,Y_set):
l1 =[];l2 =[]
rvector = X_set.union(Y_set)# form a set containing keywords of both strings
for w in rvector:
if w in X_set: l1.append(1) # create a vector
else: l1.append(0)
if w in Y_set: l2.append(1)
else: l2.append(0)
c = 0
# cosine formula
for i in range(len(rvector)):
c+= l1[i]*l2[i]
cosine = c / float((sum(l1)*sum(l2))**0.5)
return cosine
def jaccard_similarity(query, document):
intersection = query.intersection(document)
union = query.union(document)
return len(intersection)/len(union)
def cosine_distance_wordembedding_method(s1, s2):
import scipy
vector_1 = np.mean([model[word] for word in preprocess(s1)],axis=0)
vector_2 = np.mean([model[word] for word in preprocess(s2)],axis=0)
cosine = scipy.spatial.distance.cosine(vector_1, vector_2)
print('Word Embedding method with a cosine distance asses that our two sentences are similar to',round((1-cosine)*100,2),'%')
def google_encoder_similiarity(sentences):
import tensorflow as tf
import tensorflow_hub as hub
module_url = "https://tfhub.dev/google/universal-sentence-encoder/2"
embed = hub.Module(module_url)
#sentences = ["Python is a good language","Language a good python is"]
similarity_input_placeholder = tf.placeholder(tf.string, shape=(None))
similarity_sentences_encodings = embed(similarity_input_placeholder)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
sentences_embeddings = session.run(similarity_sentences_encodings, feed_dict={similarity_input_placeholder: sentences})
similarity = np.inner(sentences_embeddings[0], sentences_embeddings[1])
print("Similarity is %s" % similarity)
for res in result:
synonyms = []
antonyms = []
for syn in wordnet.synsets(res):
for l in syn.lemmas():
synonyms.append(l.name())
if l.antonyms():
antonyms.append(l.antonyms()[0].name())
syn_result.append(synonyms)
ant_result.append(antonyms)
#print(syn_result)
simil=[]
jaccard_simil=[]
for ind in train_text.index:
sentence=str(train_text['words'][ind])
stop_words = set(stopwords.words('english'))
word_tokens = re.sub(r"[^a-zA-Z0-9]+", ' ', sentence).split(' ')
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words and len(w)>=3 :
#print(w)
filtered_sentence.append(w)
#print(filtered_sentence)
X_set = {w for w in filtered_sentence}
Y_set = {w for w in result}
if len(filtered_sentence)>=1:
sim=similiarity(X_set,Y_set)
simil.append(sim)
jaccard_simil.append(jaccard_similarity(X_set,Y_set))
else:
simil.append(0)
jaccard_simil.append(0)
#str1=" ";str2=" "
#QA=[str1.join(filtered_sentence),str2.join(result)]
#print(QA)
#google_encoder_similiarity(QA)
#cosine similiarity of question with each sentence is found
#print(simil)
result_text= pd.DataFrame({'sentence':content_list,'cosine_similiarity':simil,'jaccard_similiarity':jaccard_simil})
#print(result_text)
result_text.to_csv('simils.csv')
#for visualization purposes
result_text.plot(x='sentence', y='cosine_similiarity')
result_text.plot(x='sentence', y='jaccard_similiarity')
max=result_text.max()
max_cos=max.cosine_similiarity
max_jac=max.jaccard_similiarity
filter1 = result_text['cosine_similiarity']==max_cos
filter2 = result_text['jaccard_similiarity']==max_jac
res_record=result_text.loc[(result_text['cosine_similiarity'] == max_cos) & (result_text['jaccard_similiarity']==max_jac)]
res_sent=res_record.sentence.item()
print(res_sent)
| 32.290323
| 141
| 0.705295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,671
| 0.278222
|
3612229195c84fc7e099e8d1a5caa6236355676b
| 327
|
py
|
Python
|
alice.py
|
atamurad/coinflip
|
ded3877c808baae843b55c1cfa4685459ba71b29
|
[
"MIT"
] | 1
|
2022-02-24T09:29:53.000Z
|
2022-02-24T09:29:53.000Z
|
alice.py
|
atamurad/coinflip
|
ded3877c808baae843b55c1cfa4685459ba71b29
|
[
"MIT"
] | null | null | null |
alice.py
|
atamurad/coinflip
|
ded3877c808baae843b55c1cfa4685459ba71b29
|
[
"MIT"
] | null | null | null |
from Crypto.Util.number import getRandomRange
from sympy.ntheory.residue_ntheory import jacobi_symbol
N = int(input("N ? "))
x = getRandomRange(2, N)
x2 = (x*x) % N
J = jacobi_symbol(x, N)
print(f"x2 = {x2}")
guess = int(input("j_guess ? "))
print(f"x = {x}")
print("Outcome = Heads" if guess == J else "Outcome = Tails")
| 20.4375
| 61
| 0.663609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 74
| 0.2263
|
36134c0670c8fbaeb545400c9c8d63641cf7bd8e
| 248
|
py
|
Python
|
accounts/management/commands/run-stats.py
|
ChristianJStarr/Scratch-Bowling-Series-Website
|
283c7b1b38ffce660464889de3f4dc8050b4008c
|
[
"MIT"
] | 1
|
2021-05-19T19:30:40.000Z
|
2021-05-19T19:30:40.000Z
|
accounts/management/commands/run-stats.py
|
ChristianJStarr/Scratch-Bowling-Series-Website
|
283c7b1b38ffce660464889de3f4dc8050b4008c
|
[
"MIT"
] | null | null | null |
accounts/management/commands/run-stats.py
|
ChristianJStarr/Scratch-Bowling-Series-Website
|
283c7b1b38ffce660464889de3f4dc8050b4008c
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand, CommandError
from scoreboard.ranking import calculate_statistics
class Command(BaseCommand):
help = 'Run Statistics'
def handle(self, *args, **options):
calculate_statistics()
| 24.8
| 65
| 0.758065
| 127
| 0.512097
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.064516
|
36136b9058bdd45bb7644ba4b0f512b2d1902d42
| 796
|
py
|
Python
|
LeetCode/TwoSum.py
|
batumoglu/Python_Algorithms
|
f586f386693eaddb64d6a654a89af177fd0e838f
|
[
"MIT"
] | null | null | null |
LeetCode/TwoSum.py
|
batumoglu/Python_Algorithms
|
f586f386693eaddb64d6a654a89af177fd0e838f
|
[
"MIT"
] | null | null | null |
LeetCode/TwoSum.py
|
batumoglu/Python_Algorithms
|
f586f386693eaddb64d6a654a89af177fd0e838f
|
[
"MIT"
] | null | null | null |
class Solution(object):
def twoSum(self, nums, target):
seen = {}
output = []
for i in range(len(nums)):
k = target - nums[i]
if k in seen:
output.append(seen[k])
output.append(i)
del seen[k]
else:
seen[nums[i]] = i
return output
class Solution2(object):
"""
If there is exactly one solution
"""
def twoSum(self, nums, target):
h = {}
for i, num in enumerate(nums):
n = target - num
if n not in h:
h[num] = i
else:
return [h[n], i]
if __name__ == '__main__':
sol = Solution()
print(sol.twoSum([2,7,11,15], 9))
print(sol.twoSum([3,3], 6))
| 24.121212
| 38
| 0.442211
| 675
| 0.84799
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 0.072864
|
3613905669a706db1108a17ee990707e01f2f9a0
| 9,028
|
py
|
Python
|
src/rospy_crazyflie/crazyflie_server/crazyflie_control.py
|
JGSuw/rospy_crazyflie
|
696aef900138c764419d33e2c8d44ca3f3e33fa1
|
[
"BSD-2-Clause-FreeBSD"
] | 5
|
2019-07-26T22:19:53.000Z
|
2021-03-04T12:44:35.000Z
|
src/rospy_crazyflie/crazyflie_server/crazyflie_control.py
|
JGSuw/rospy_crazyflie
|
696aef900138c764419d33e2c8d44ca3f3e33fa1
|
[
"BSD-2-Clause-FreeBSD"
] | 4
|
2021-02-17T23:30:48.000Z
|
2021-11-29T18:33:05.000Z
|
src/rospy_crazyflie/crazyflie_server/crazyflie_control.py
|
JGSuw/rospy_crazyflie
|
696aef900138c764419d33e2c8d44ca3f3e33fa1
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2019-04-24T19:00:31.000Z
|
2019-04-24T19:00:31.000Z
|
"""
Copyright (c) 2018, Joseph Sullivan
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the <project name> project.
"""
import numpy as np
import pickle
import time
from cflib.crazyflie import Crazyflie
from cflib.positioning.motion_commander import MotionCommander
import rospy
import actionlib
from std_msgs.msg import UInt16
from geometry_msgs.msg import Vector3
from rospy_crazyflie.msg import *
from rospy_crazyflie.srv import *
from rospy_crazyflie.motion_commands import *
class CrazyflieControl:
def __init__(self, name, crazyflie):
# Instantiate motion commander
self._cf = crazyflie
self._name = name
self._mc = MotionCommander(self._cf)
# Topic Publishers
self._velocity_setpoint_pub = rospy.Publisher(
self._name + '/velocity_setpoint',
Vector3,
queue_size = 10
)
"""
Services hosted for this crazyflie controller
"""
self._reset_position_estimator_srv = rospy.Service(
self._name + '/reset_position_estimator',
ResetPositionEstimator,
self._reset_position_estimator_cb
)
self._send_hover_setpoint_srv = rospy.Service(
self._name + '/send_hover_setpoint',
SendHoverSetpoint,
self._send_hover_setpoint_cb
)
self._set_param_srv = rospy.Service(
self._name + '/set_param',
SetParam,
self._set_param_cb
)
self._velocity_control_srv = rospy.Service(
self._name + '/velocity_control',
VelocityControl,
self._velocity_control_cb
)
"""
Action servers for this crazyflie controller
"""
self._position_control_as = actionlib.SimpleActionServer(
self._name + '/position_control',
PositionControlAction,
self._position_control_cb,
False
)
self._position_control_as.start()
"""
Service Callbacks
"""
def _reset_position_estimator_cb(self, req):
pass
def _send_hover_setpoint_cb(self, req):
vx = req.vx
vy = req.vy
z = req.z
yaw_rate = req.yaw_rate
self._cf.commander.send_hover_setpoint(vx, vy, yaw_rate, z)
return []
def _set_param_cb(self, req):
self._cf.param.set_value(req.param, req.value)
print("set %s to %s" % (req.param, req.value))
return SetParamResponse()
def _velocity_control_cb(self, req):
try:
obj = pickle.loads(req.pickle)
print(self._mc)
if isinstance(obj, SetVelSetpoint):
self._mc._set_vel_setpoint(obj.vx, obj.vy, obj.vz, obj.rate_yaw)
elif isinstance(obj, StartBack):
self._mc.start_back(velocity = obj.velocity)
elif isinstance(obj, StartCircleLeft):
self._mc.start_circle_left(obj.radius_m, velocity = obj.velocity)
elif isinstance(obj, StartCircleRight):
self._mc.start_turn_right(obj.radius_m, velocity = obj.velocity)
elif isinstance(obj, StartDown):
self._mc.start_down(velocity = obj.velocity)
elif isinstance(obj, StartForward):
self._mc.start_forward(velocity = obj.velocity)
elif isinstance(obj, StartLeft):
self._mc.start_left(velocity = obj.velocity)
elif isinstance(obj, StartLinearMotion):
self._mc.start_linear_motion(obj.vx, obj.vy, obj.vz)
elif isinstance(obj, StartRight):
self._mc.start_right(velocity = obj.velocity)
elif isinstance(obj, StartTurnLeft):
self._mc.start_turn_left(rate = obj.rate)
elif isinstance(obj, StartTurnRight):
self._mc.start_turn_right(rate = obj.rate)
elif isinstance(obj, StartUp):
self._mc.start_up(velocity = obj.velocity)
elif isinstance(obj, Stop):
self._mc.stop()
else:
return 'Object is not a valid velocity command'
except Exception as e:
print(str(e))
raise e
return 'ok'
"""
Action Implementations
"""
def _position_control_cb(self, goal):
try:
obj = pickle.loads(goal.pickle)
if isinstance(obj, Back):
self._mc.back(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, CircleLeft):
self._mc.circle_left(obj.radius_m,
velocity = obj.velocity,
angle_degrees = obj.angle_degrees
)
elif isinstance(obj, CircleRight):
self._mc.circle_right(obj.radius_m,
velocity = obj.velocity,
angle_degrees = obj.angle_degrees
)
elif isinstance(obj, Down):
self._mc.down(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, Forward):
self._mc.forward(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, Land):
self._mc.land(velocity=obj.velocity)
elif isinstance(obj, Left):
self._mc.left(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, MoveDistance):
self._mc.move_distance(obj.x, obj.y, obj.z, velocity=obj.velocity)
elif isinstance(obj, Right):
self._mc.right(obj.distance_m, velocity=obj.velocity)
elif isinstance(obj, TakeOff):
self._mc.take_off(height=obj.height, velocity = obj.velocity)
elif isinstance(obj, TurnLeft):
self._mc.turn_left(obj.angle_degrees, rate=obj.rate)
elif isinstance(obj, TurnRight):
self._mc.turn_right(obj.angle_degrees, rate=obj.rate)
elif isinstance(obj, Up):
self._mc.up(obj.distance_m, velocity=obj.velocity)
except Exception as e:
print('Exception in action server %s' % self._name + '/position_control')
print(str(e))
print('Action aborted')
self._position_control_as.set_aborted()
return
self._position_control_as.set_succeeded()
def _takeoff(self, goal):
try:
self._mc.take_off(height = goal.height)
time.sleep(5)
except BaseException as e:
self._takeoff_as.set_aborted()
print(e)
return
self._takeoff_as.set_succeeded(TakeOffResult(True))
def _land(self, goal):
try:
self._mc.land(velocity=goal.velocity)
except BaseException as e:
self._land_as.set_aborted()
print(e)
return
self._land_as.set_succeeded(LandResult(True))
def _move_distance(self, goal):
try:
x = goal.x
y = goal.y
z = goal.z
velocity = goal.velocity
dist = np.sqrt(x**2 + y**2 + z**2)
vx = x / dist * velocity
vy = y / dist * velocity
vz = z / dist * velocity
# self._velocity_setpoint_pub.publish(Vector3(vx, vy, vz))
self._mc.move_distance(x, y, z, velocity = velocity)
# self._velocity_setpoint_pub.publish(Vector3(vx, vy, vz))
except BaseException as e:
self._move_distance_as.set_aborted()
print(e)
return
self._move_distance_as.set_succeeded()
| 37.305785
| 85
| 0.620735
| 7,131
| 0.789876
| 0
| 0
| 0
| 0
| 0
| 0
| 2,144
| 0.237483
|
3613d5c133ef8f38bb7353d844f6628f9fe5e6c6
| 901
|
py
|
Python
|
examples/imagenet_resnet50.py
|
inaccel/keras
|
bebd0ca930b9e2c2aee320e2e40b3d00cd15e46a
|
[
"Apache-2.0"
] | 1
|
2021-01-27T12:20:35.000Z
|
2021-01-27T12:20:35.000Z
|
examples/imagenet_resnet50.py
|
inaccel/keras
|
bebd0ca930b9e2c2aee320e2e40b3d00cd15e46a
|
[
"Apache-2.0"
] | null | null | null |
examples/imagenet_resnet50.py
|
inaccel/keras
|
bebd0ca930b9e2c2aee320e2e40b3d00cd15e46a
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import time
from inaccel.keras.applications.resnet50 import decode_predictions, ResNet50
from inaccel.keras.preprocessing.image import ImageDataGenerator, load_img
model = ResNet50(weights='imagenet')
data = ImageDataGenerator(dtype='int8')
images = data.flow_from_directory('imagenet/', target_size=(224, 224), class_mode=None, batch_size=64)
begin = time.monotonic()
preds = model.predict(images, workers=16)
end = time.monotonic()
print('Duration for', len(preds), 'images: %.3f sec' % (end - begin))
print('FPS: %.3f' % (len(preds) / (end - begin)))
dog = load_img('data/dog.jpg', target_size=(224, 224))
dog = np.expand_dims(dog, axis=0)
elephant = load_img('data/elephant.jpg', target_size=(224, 224))
elephant = np.expand_dims(elephant, axis=0)
images = np.vstack([dog, elephant])
preds = model.predict(images)
print('Predicted:', decode_predictions(preds, top=1))
| 30.033333
| 102
| 0.739179
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.127636
|
3613f877b238035dadd508a419d964a6d0b3a50e
| 1,084
|
py
|
Python
|
api/permissions.py
|
letsdowork/yamdb_api
|
f493309dc52528d980463047d311d898714f3111
|
[
"MIT"
] | null | null | null |
api/permissions.py
|
letsdowork/yamdb_api
|
f493309dc52528d980463047d311d898714f3111
|
[
"MIT"
] | null | null | null |
api/permissions.py
|
letsdowork/yamdb_api
|
f493309dc52528d980463047d311d898714f3111
|
[
"MIT"
] | null | null | null |
from rest_framework.permissions import BasePermission, SAFE_METHODS
from .models import User
class IsAdminOrReadOnly(BasePermission):
def has_permission(self, request, view):
return bool(
request.method in SAFE_METHODS or
request.user and request.user.is_authenticated and
request.user.is_admin() or
request.user.is_superuser)
class IsOwnerOrAdmin(BasePermission):
def has_object_permission(self, request, view, obj):
return bool(
obj.author == request.user or
request.user and request.user.is_authenticated and
request.user.is_admin() or
request.user.is_superuser)
class IsOwnerOrAllStaff(BasePermission):
ALLOWED_USER_ROLES = (User.Roles.MODERATOR, User.Roles.ADMIN)
def has_object_permission(self, request, view, obj):
return bool(
obj.author == request.user or
request.user and request.user.is_authenticated and
request.user.role in self.ALLOWED_USER_ROLES or
request.user.is_superuser)
| 31.882353
| 67
| 0.681734
| 982
| 0.905904
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3613fd30924745bd186e0751c87237612b35913e
| 8,090
|
py
|
Python
|
morphological_classifier/classifier.py
|
selflect11/morphological_classifier
|
2ef3c3e1e894220238a36b633d4a164a14fe820f
|
[
"MIT"
] | null | null | null |
morphological_classifier/classifier.py
|
selflect11/morphological_classifier
|
2ef3c3e1e894220238a36b633d4a164a14fe820f
|
[
"MIT"
] | null | null | null |
morphological_classifier/classifier.py
|
selflect11/morphological_classifier
|
2ef3c3e1e894220238a36b633d4a164a14fe820f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from morphological_classifier.perceptron import AveragedPerceptron
from morphological_classifier.performance_metrics import PerformanceMetrics
from morphological_classifier.stats_plot import StatsPlotter
from morphological_classifier import constants, utils
import numpy as np
from collections import defaultdict
from sklearn import model_selection
import pickle
import random
class MorphologicalClassifier:
# Tags used for padding, since the _get_features method uses
# two words before and after the current word
START = ['__START__', '__START2__']
END = ['__END__', '__END2__']
def __init__(self, metrics, plotter, save_path, data_path, logging, n_splits):
self.model = AveragedPerceptron()
self.metrics = metrics
self.plotter = plotter
self.tags = constants.TAGS
self.tag_dict = dict()
self.save_path = save_path
self.data_path = data_path
self.n_splits = n_splits
self.logging = logging
self.isTrained = False
def predict(self, phrase):
''':type phrase: str
:rtype: list(tuple(str, str))'''
output = []
tags = []
words = phrase.split()
for i, word in enumerate(words):
tag = self.tag_dict.get(word)
if not tag:
features = self._get_features(words, tags, i)
tag = self.model.predict_tag(features)
output.append((word, tag))
tags.append(tag)
return output
def _get_features(self, words, tags, i):
'''
Map words into a feature representation.
:type words: list(str)
:type tags: list(str)
:type i: int
'''
features = defaultdict(int)
starts_capitalized = words[i][0].isupper()
# Padding the tags, words and index
words = self.START + [self.normalize(w) for w in words] + self.END
tags = self.START + tags
i += len(self.START)
def add_feature(feat_id, *values):
features[str.join(' ', (feat_id,) + tuple(values))] += 1
add_feature('bias')
#add_feature('word_i_pref_1', words[i][0])
add_feature('tag_(i-1)', tags[i-1])
add_feature('tag_(i-2)', tags[i-2])
add_feature('tag_(i-1) tag_(i-2)', tags[i-1], tags[i-2])
add_feature('word_i_suffix', utils.get_suffix(words[i]))
add_feature('word_i', words[i])
add_feature('tag_(i-1) word_i', tags[i-1], words[i])
add_feature('word_(i-1)', words[i-1])
add_feature('word_(i-1)_suffix', utils.get_suffix(words[i-1]))
add_feature('word_(i-2)', words[i-2])
add_feature('word_(i+1)', words[i+1])
add_feature('word_(i+1)_suffix', utils.get_suffix(words[i+1]))
add_feature('word_(i+2)', words[i+2])
#add_feature('word_i_starts_capitalized', str(starts_capitalized))
return features
def _make_tag_dict(self, sentences):
'''Make a tag dictionary for single-tag words.
:param sentences: A list of list of (word, tag) tuples.'''
counts = defaultdict(lambda: defaultdict(int))
for sentence in sentences:
for word, tag in sentence:
counts[word][tag] += 1
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
# Don't add rare words to the tag dictionary
# Only add quite unambiguous words
if n >= freq_thresh and (mode / n) >= ambiguity_thresh:
self.tag_dict[word] = tag
def parse_sentence(self, sentence):
'''Gets "Word1_tag1 word2_tag2 word3_tag3..."
returns [("word1", "tag1"), ("word2", "tag2"), ...]'''
def parse_word_tag(string_element):
'''Parses an element of the form Word_tag1+tag2...|extra_info
into a (word, tags) tuple.'''
word, tags_str = string_element.split('_')
return self.normalize(word), tags_str
parsed_sentence = []
for word_tags in sentence.split():
parsed_sentence.append(parse_word_tag(word_tags))
return parsed_sentence
def normalize(self, word):
'''Normalization used in pre-processing.
- All words are lower cased
- All numeric words are returned as !DIGITS'''
if word.isdigit():
return '!DIGITS'
else:
return word.lower()
def train_test(self):
with open(self.data_path, 'r', encoding=constants.ENCODING) as f:
sentences = f.readlines()
parsed_sentences = np.array([self.parse_sentence(s) for s in sentences])
kf = model_selection.KFold(n_splits=self.n_splits)
for i, (train, test) in enumerate(kf.split(parsed_sentences)):
print('\nStarting train/test {} of {}'.format(i+1, self.n_splits))
self.train(train_sentences=parsed_sentences[train])
self.test(test_sentences=parsed_sentences[test], metrics=self.metrics)
self.reset()
if self.logging:
self.metrics.log()
self.plotter.plot_confusion_matrix(self.metrics.confusion_matrix, normalize=True)
#self.save()
def train(self, train_sentences, nr_iter=5):
if self.isTrained:
print('Classifier already trained')
return
print('Starting training phase...')
self._make_tag_dict(train_sentences)
num_sentences = len(train_sentences)
for iter_ in range(nr_iter):
# Padding
sent_padd = num_sentences * iter_
for sent_num, sentence in enumerate(train_sentences):
if not sentence:
continue
utils.update_progress((sent_num + sent_padd + 1)/(nr_iter * num_sentences))
words, true_tags = zip(*sentence)
guess_tags = []
for i, word in enumerate(words):
guess = self.tag_dict.get(word)
if not guess:
feats = self._get_features(words, guess_tags, i)
guess = self.model.predict_tag(feats)
self.model.update(feats, true_tags[i], guess)
guess_tags.append(guess)
random.shuffle(train_sentences)
self.model.average_weights()
self.erase_useless_data()
self.isTrained = True
def test(self, test_sentences, metrics):
if not self.isTrained:
print('Model not yet trained')
return
print('Starting testing phase...')
# Metrics stuff
num_sentences = len(test_sentences)
metrics.checkin()
for sent_num, sentence in enumerate(test_sentences):
utils.update_progress((sent_num + 1)/num_sentences)
words, true_tags = zip(*sentence)
test_phrase = str.join(' ', words)
wordtag_guess = self.predict(test_phrase)
for index, (word, guess_tag) in enumerate(wordtag_guess):
true_tag = true_tags[index]
metrics.update_predicted(true_tag, guess_tag)
metrics.checkout()
metrics.build_confusion_matrix()
def save(self):
with open(self.save_path, 'wb') as f:
pickle.dump(self.__dict__, f, -1)
def load(self):
with open(self.save_path, 'rb') as f:
self.__dict__ = pickle.load(f)
self.isTrained = True
def erase_useless_data(self):
self.model.erase_useless_data()
def reset(self):
self.model = AveragedPerceptron()
self.tag_dict = dict()
self.isTrained = False
def __getitem__(self, key):
return self.confusion_matrix[key]
| 38.341232
| 92
| 0.583931
| 7,673
| 0.948455
| 0
| 0
| 0
| 0
| 0
| 0
| 1,463
| 0.180841
|
361427d326c18b286127aad246549f8822f63a94
| 4,263
|
py
|
Python
|
autoprover/evaluation/evaluation.py
|
nclab-admin/autoprover
|
3fe5a0bb6132ae320461d538bb06c4f0fd604b27
|
[
"MIT"
] | 1
|
2019-01-10T08:04:58.000Z
|
2019-01-10T08:04:58.000Z
|
autoprover/evaluation/evaluation.py
|
nclab-admin/autoprover
|
3fe5a0bb6132ae320461d538bb06c4f0fd604b27
|
[
"MIT"
] | null | null | null |
autoprover/evaluation/evaluation.py
|
nclab-admin/autoprover
|
3fe5a0bb6132ae320461d538bb06c4f0fd604b27
|
[
"MIT"
] | 1
|
2019-10-08T16:47:58.000Z
|
2019-10-08T16:47:58.000Z
|
"""evaluation function for chromosome
"""
import subprocess
from subprocess import PIPE, STDOUT
from autoprover.evaluation.coqstate import CoqState
def preprocess(theorem, chromosome):
"""
convert chromosome to complete Coq script
Args:
theorem (list): a list of string contains theorem or some pre-provided
tactic.
chromosome (list): a list of string.
Returns:
byte: a byte string will be passed to coqtop
"""
script = b''
script += b'\n'.join(line.encode("utf-8") for line in theorem) + b'\n'
script += b'\n'.join(line.encode("utf-8") for line in chromosome) + b'\n'
script += b'Qed.'
return script
def run_coqtop(script):
"""run Coq script and return output
Args:
script (byte): a coq script
Returns:
string: the output of coqtop
"""
coqtop = subprocess.Popen('coqtop', shell=False,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
# communicate with coqtop
(out, _) = coqtop.communicate(input=script)
return out.decode('utf-8')
def get_coq_states(result, proof, chromosome, threshold=-1):
"""return valid coq states, will ignore useless and error steps
Args:
result (string): Plain text output from coqtop
proof (Proof): Proof instance
chromosome (list): the corresponse chromosome of result
threshold (int): the number of error tactic tolerance, -1 will ignore
all error.
Returns:
list of Coqstate
"""
# the first and the last is useless
splited_result = split_coqtop_result(result, proof.theorem_name)[1:]
offset = proof.offset
coq_states = []
tactics_set = set()
error_count = 0
def check_overlap(coq_states, append_state):
"""If a state is equal to previous state, remove all element from that.
"""
for index, state in enumerate(coq_states):
if state == append_state:
del coq_states[index+1:]
return
coq_states.append(append_state)
for (i, step) in enumerate(splited_result):
if i < offset:
coq_states.append(CoqState(step, proof.pre_feed_tactic[i]))
continue
# create a new state
if i == (len(splited_result)-1):
# lastest step
state = CoqState(step, "Qed.")
else:
state = CoqState(step, chromosome[i-offset])
if state.is_proof:
coq_states.append(state)
break
elif state.is_error_state or state == coq_states[-1]:
error_count += 1
elif proof.tactics.is_unrepeatable(chromosome[i-offset]):
if chromosome[i-offset] in tactics_set:
error_count += 1
check_overlap(coq_states, state)
else:
tactics_set.add(chromosome[i-offset])
check_overlap(coq_states, state)
else:
check_overlap(coq_states, state)
if error_count == threshold:
break
return coq_states
def split_coqtop_result(result, theorem_name):
""" split result into steps
Args:
result (string): the output of coqtop
Returns:
list: a list of states(string) of coqtop
"""
spliter = theorem_name + " <"
return [spliter+x for x in result.split(spliter)]
def calculate_fitness(coq_states, limit_hyp=100, limit_goal=300):
"""calculate fitness from coqstates
score = sum(len(hypothesis)/len(goal))
Args:
coq_states (list): a list of Coqstate
Returns:
double: represent fitness of a gene, higher is better.
If raise ZeroDivisionError, means there is a bug.
"""
score = 0.0
for state in coq_states:
l_hyp = len(state.hypothesis)
l_goal = len(state.goal)
if l_hyp > limit_hyp:
score -= l_hyp / (l_hyp + limit_hyp)
print(state.hypothesis)
continue
if l_goal > limit_goal:
score -= l_goal / (l_goal + limit_goal)
# print(state.goal)
continue
try:
score += l_hyp / l_goal
except ZeroDivisionError:
print(state.data)
exit(1)
return score
| 29
| 79
| 0.601454
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,535
| 0.360075
|
3616727077997c5d64715fd00bfc6be4f8ba4ad8
| 1,323
|
py
|
Python
|
steapy/velocity_field.py
|
Sparsh-Sharma/SteaPy
|
d6f3bee7eb1385c83f65f345d466ef740db4ed3b
|
[
"MIT"
] | 1
|
2017-04-28T13:05:13.000Z
|
2017-04-28T13:05:13.000Z
|
steapy/velocity_field.py
|
Sparsh-Sharma/SteaPy
|
d6f3bee7eb1385c83f65f345d466ef740db4ed3b
|
[
"MIT"
] | null | null | null |
steapy/velocity_field.py
|
Sparsh-Sharma/SteaPy
|
d6f3bee7eb1385c83f65f345d466ef740db4ed3b
|
[
"MIT"
] | null | null | null |
import os
import numpy
from numpy import *
import math
from scipy import integrate, linalg
from matplotlib import pyplot
from pylab import *
from .integral import *
def get_velocity_field(panels, freestream, X, Y):
"""
Computes the velocity field on a given 2D mesh.
Parameters
---------
panels: 1D array of Panel objects
The source panels.
freestream: Freestream object
The freestream conditions.
X: 2D Numpy array of floats
x-coordinates of the mesh points.
Y: 2D Numpy array of floats
y-coordinate of the mesh points.
Returns
-------
u: 2D Numpy array of floats
x-component of the velocity vector field.
v: 2D Numpy array of floats
y-component of the velocity vector field.
"""
# freestream contribution
u = freestream.u_inf * math.cos(freestream.alpha) * numpy.ones_like(X, dtype=float)
v = freestream.u_inf * math.sin(freestream.alpha) * numpy.ones_like(X, dtype=float)
# add the contribution from each source (superposition powers!!!)
vec_intregral = numpy.vectorize(integral)
for panel in panels:
u += panel.sigma / (2.0 * math.pi) * vec_intregral(X, Y, panel, 1, 0)
v += panel.sigma / (2.0 * math.pi) * vec_intregral(X, Y, panel, 0, 1)
return u, v
| 29.4
| 87
| 0.652305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 661
| 0.499622
|
3616ce719b349e94d2bd7c4da3e42707eb0de49d
| 4,125
|
py
|
Python
|
admin/hams_admin/container_manager.py
|
hku-systems/hams
|
3a5720657252c650c9a6c5d9b674f7ea6153e557
|
[
"Apache-2.0"
] | 6
|
2020-08-19T11:46:23.000Z
|
2021-12-24T07:34:15.000Z
|
admin/hams_admin/container_manager.py
|
hku-systems/hams
|
3a5720657252c650c9a6c5d9b674f7ea6153e557
|
[
"Apache-2.0"
] | 1
|
2021-03-25T23:40:15.000Z
|
2021-03-25T23:40:15.000Z
|
admin/hams_admin/container_manager.py
|
hku-systems/hams
|
3a5720657252c650c9a6c5d9b674f7ea6153e557
|
[
"Apache-2.0"
] | 2
|
2020-10-31T16:48:39.000Z
|
2021-03-07T09:14:25.000Z
|
import abc
from .exceptions import HamsException
import logging
# Constants
HAMS_INTERNAL_QUERY_PORT = 1337
HAMS_INTERNAL_MANAGEMENT_PORT = 1338
HAMS_INTERNAL_RPC_PORT = 7000
HAMS_INTERNAL_METRIC_PORT = 1390
HAMS_INTERNAL_REDIS_PORT = 6379
HAMS_DOCKER_LABEL = "ai.hams.container.label"
HAMS_NAME_LABEL = "ai.hams.name"
HAMS_MODEL_CONTAINER_LABEL = "ai.hams.model_container.label"
HAMS_QUERY_FRONTEND_CONTAINER_LABEL = "ai.hams.query_frontend.label"
HAMS_MGMT_FRONTEND_CONTAINER_LABEL = "ai.hams.management_frontend.label"
HAMS_QUERY_FRONTEND_ID_LABEL = "ai.hams.query_frontend.id"
CONTAINERLESS_MODEL_IMAGE = "NO_CONTAINER"
HAMS_DOCKER_PORT_LABELS = {
'redis': 'ai.hams.redis.port',
'query_rest': 'ai.hams.query_frontend.query.port',
'query_rpc': 'ai.hams.query_frontend.rpc.port',
'management': 'ai.hams.management.port',
'metric': 'ai.hams.metric.port'
}
HAMS_METRIC_CONFIG_LABEL = 'ai.hams.metric.config'
# NOTE: we use '_' as the delimiter because kubernetes allows the use
# '_' in labels but not in deployment names. We force model names and
# versions to be compliant with both limitations, so this gives us an extra
# character to use when creating labels.
_MODEL_CONTAINER_LABEL_DELIMITER = "_"
class ClusterAdapter(logging.LoggerAdapter):
"""
This adapter adds cluster name to logging format.
Usage
-----
In ContainerManager init process, do:
self.logger = ClusterAdapter(logger, {'cluster_name': self.cluster_name})
"""
# def process(self, msg, kwargs):
# return "[{}] {}".format(self.extra['cluster_name'], msg), kwargs
def process(self, msg, kwargs):
return "{}".format(msg), kwargs
def create_model_container_label(name, version):
return "{name}{delim}{version}".format(
name=name, delim=_MODEL_CONTAINER_LABEL_DELIMITER, version=version)
def parse_model_container_label(label):
splits = label.split(_MODEL_CONTAINER_LABEL_DELIMITER)
if len(splits) != 2:
raise HamsException(
"Unable to parse model container label {}".format(label))
return splits
class ContainerManager(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def start_hams(self, query_frontend_image, mgmt_frontend_image,
frontend_exporter_image, cache_size,
qf_http_thread_pool_size, qf_http_timeout_request,
qf_http_timeout_content,
num_frontend_replicas):
return
@abc.abstractmethod
def connect(self):
return
@abc.abstractmethod
def connect_host(self, host_ip, host_port):
return
@abc.abstractmethod
def deploy_model(self, name, version, input_type, image):
return
@abc.abstractmethod
def add_replica(self, name, version, input_type, image, proxy_name="", proxy_port="", remove=True, runtime=""):
return
@abc.abstractmethod
def set_proxy(self, image, model_container_label, model_ip, host_ip):
return
@abc.abstractmethod
def get_num_replicas(self, name, version):
return
@abc.abstractmethod
def get_logs(self, logging_dir):
return
@abc.abstractmethod
def stop_models(self, models):
return
@abc.abstractmethod
def stop_all_model_containers(self):
return
@abc.abstractmethod
def stop_all(self, graceful=True):
pass
@abc.abstractmethod
def get_admin_addr(self):
return
@abc.abstractmethod
def get_query_addr(self):
return
@abc.abstractmethod
def get_container_ip(self, host_ip, container_id):
return
@abc.abstractmethod
def grpc_client(self, image, arg_list):
return
@abc.abstractmethod
def check_container_status(self, host_ip, container_id, timeout, threshold):
return
@abc.abstractmethod
def get_docker_client(self, host_ip):
return
@abc.abstractmethod
def add_frontend(self, host_ip, image, runtime_dag_id, entry_proxy_ip, entry_proxy_port, max_workers=64,stateful=False, remove=True):
return
| 28.448276
| 137
| 0.701333
| 2,476
| 0.600242
| 0
| 0
| 1,847
| 0.447758
| 0
| 0
| 1,047
| 0.253818
|
36170542f3bcc2d21452673199202e71e6245707
| 11,044
|
py
|
Python
|
solidata_api/api/api_auth/endpoint_user_tokens.py
|
co-demos/solidata-backend
|
2c67aecbd457cdec78b0772d78dcf699e20dd3dc
|
[
"MIT"
] | 2
|
2019-12-17T22:27:53.000Z
|
2020-06-22T12:47:37.000Z
|
solidata_api/api/api_auth/endpoint_user_tokens.py
|
co-demos/solidata-backend
|
2c67aecbd457cdec78b0772d78dcf699e20dd3dc
|
[
"MIT"
] | 13
|
2019-06-16T15:42:33.000Z
|
2022-02-26T05:12:34.000Z
|
solidata_api/api/api_auth/endpoint_user_tokens.py
|
co-demos/solidata-backend
|
2c67aecbd457cdec78b0772d78dcf699e20dd3dc
|
[
"MIT"
] | 1
|
2019-12-17T22:27:58.000Z
|
2019-12-17T22:27:58.000Z
|
# -*- encoding: utf-8 -*-
"""
endpoint_user_tokens.py
"""
from solidata_api.api import *
# from log_config import log, pformat
log.debug(">>> api_auth ... creating api endpoints for USER_TOKENS")
### create namespace
ns = Namespace('tokens', description='User : tokens freshening related endpoints')
### import models
from solidata_api._models.models_user import * #User_infos, AnonymousUser
model_user = User_infos(ns)
model_user_access = model_user.model_access
model_user_login_out = model_user.model_login_out
model_old_refresh_token = ExpiredRefreshToken(ns).model
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### ROUTES
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### cf : response codes : https://restfulapi.net/http-status-codes/
# cf : http://flask-jwt-extended.readthedocs.io/en/latest/refresh_tokens.html
"""
RESPONSE CODES
cf : https://restfulapi.net/http-status-codes/
200 (OK)
201 (Created)
202 (Accepted)
204 (No Content)
301 (Moved Permanently)
302 (Found)
303 (See Other)
304 (Not Modified)
307 (Temporary Redirect)
400 (Bad Request)
401 (Unauthorized)
403 (Forbidden)
404 (Not Found)
405 (Method Not Allowed)
406 (Not Acceptable)
412 (Precondition Failed)
415 (Unsupported Media Type)
500 (Internal Server Error)
501 (Not Implemented)
"""
@ns.doc(security='apikey')
@ns.route('/token_claims')
class GetTokenClaims(Resource) :
@jwt_required
@distant_auth(func_name="token_claims", return_resp=True )
def get(self) :
"""
Get token claims given a token
>
--- needs : a token in the header
>>> returns : msg, claims
"""
### DEBUGGING
print()
print("-+- "*40)
log.debug( "ROUTE class : %s", self.__class__.__name__ )
### retrieve current token raw from token
raw_jwt = get_raw_jwt()
log.debug("raw_jwt : \n %s", pformat(raw_jwt) )
### retrieve current token claims from token
# claims = get_jwt_claims()
claims = returnClaims()
log.debug("claims : \n %s", pformat(claims) )
return {
"msg" : "token claims from token in header " ,
"data" : claims ,
# "raw_jwt" : raw_jwt ,
}, 200
@ns.doc(security='apikey')
@ns.route('/confirm_access')
class ConfirmAccessToken(Resource) :
# @jwt_required
@guest_required
@distant_auth(func_name="confirm_access", return_resp=True )
def get(self) :
"""
Confirm access_token given
>
--- needs : a valid access_token in the header
>>> returns : msg, a new_access_token
"""
### DEBUGGING
print()
print("-+- "*40)
log.debug( "ROUTE class : %s", self.__class__.__name__ )
# log.debug ("payload : \n{}".format(pformat(ns.payload)))
### retrieve current user identity from refresh token
# claims = get_jwt_claims()
claims = returnClaims()
log.debug("claims : \n %s", pformat(claims) )
user_id = claims["_id"]
if user_id == None :
return {
"msg" : "user not found " ,
}, 401
else :
return {
"msg" : "user found " ,
"data" : claims ,
}, 200
@ns.doc(security='apikey')
@ns.route('/new_access_token')
class NewAccessToken(Resource) :
# The jwt_refresh_token_required decorator insures a valid refresh
# token is present in the request before calling this endpoint. We
# can use the get_jwt_identity() function to get the identity of
# the refresh token, and use the create_access_token() function again
# to make a new access token for this identity.
@jwt_refresh_token_required
@distant_auth(func_name="new_access_token", return_resp=True )
def get(self) :
"""
Refresh the access_token given a valid refresh_token
>
--- needs : a valid refresh_token in the header
>>> returns : msg, a new_access_token
"""
### DEBUGGING
print()
print("-+- "*40)
log.debug( "ROUTE class : %s", self.__class__.__name__ )
# log.debug ("payload : \n{}".format(pformat(ns.payload)))
### retrieve current user identity from refresh token
user_identity = get_jwt_identity()
log.debug("user_identity : \n %s", user_identity)
### retrieve user from db to get all infos
# user = mongo_users.find_one( {"infos.email" : user_email } )
user = mongo_users.find_one( {"_id" : ObjectId(user_identity) } )
log.debug("user : \n %s", pformat(user))
# if user or user_email == "anonymous":
if user :
if user :
# user_light = marshal( user , model_user_access)
# user_light["_id"] = str(user["_id"])
user_light = marshal( user , model_user_login_out)
# elif user_email == "anonymous" :
# anon_user_class = AnonymousUser()
# user_light = anon_user_class.__dict__
### create new access token
new_access_token = create_access_token(identity=user_light, fresh=False)
log.debug("new_access_token : \n %s ", new_access_token)
### store tokens
tokens = {
'access_token' : new_access_token,
# 'salt_token' : public_key_str,
}
if app.config["RSA_MODE"] == "yes":
tokens["salt_token"] = public_key_str
return {
"msg" : "new access token for user : {} ".format(user_identity) ,
"data" : user_light,
"tokens" : tokens
}, 200 ### indicates to redirect to other URL
else :
return {
"msg" : "user not found or is anonymous" ,
}, 401
@ns.doc(security='apikey')
@ns.route("/fresh_access_token")
class FreshAccessToken(Resource):
@ns.doc('user_fresh_token')
@jwt_refresh_token_required
@distant_auth(func_name="fresh_access_token", return_resp=True )
def get(self):
"""
Create a fresh access_token
>
--- needs : valid refresh_token in the header
>>> returns : msg, fresh access_token, is_user_confirmed
"""
### DEBUGGING
print()
print("-+- "*40)
log.debug( "ROUTE class : %s", self.__class__.__name__ )
### check identity
user_identity = get_jwt_identity()
log.debug('useremail from jwt : \n%s', user_identity )
### find user
user = mongo_users.find_one( {"infos.email" : user_identity } )
log.debug("user : \n %s", pformat(user))
if user :
### marshal user's info
user_light = marshal( user , model_user_access)
user_light["_id"] = str(user["_id"])
# Use create_access_token() to create user's fresh access token
fresh_access_token = create_access_token(identity=user_light, fresh=True)
tokens = {
"access_token" : fresh_access_token,
}
if app.config["RSA_MODE"] == "yes":
tokens["salt_token"] = public_key_str
return {
"msg" : "fresh access_token created for user '{}' ".format(user_identity) ,
"is_user_confirmed" : user["auth"]["conf_usr"],
"tokens" : tokens
}, 200
else :
return {
"msg" : "incorrect user" ,
}, 401
# @ns.route('/new_refresh_token' )
# @ns.route('/new_refresh_token/', defaults={ 'old_refresh_token':'your_old_refresh_token' } )
@ns.doc(security='apikey')
@ns.route('/new_refresh_token/<string:old_refresh_token>' )
@ns.param('old_refresh_token', 'The expired refresh_token')
class NewRefreshToken(Resource) :
@ns.doc(params={'old_refresh_token': 'the old refresh token'})
@distant_auth(func_name="new_refresh_token", return_resp=True, ns_payload=True )
def post(self, old_refresh_token) :
"""
Refresh the refresh_token given when POST an old refresh_token (in URL or in header) ...
From old_refresh_token check if :
- user exists in DB
- if user's email is confirmed and not anonymous
- if user is blacklisted
>
--- needs : an old refresh_token in the header or in the URL
>>> returns : msg, a new_refresh_token
"""
### DEBUGGING
print()
print("-+- "*40)
log.debug( "ROUTE class : %s", self.__class__.__name__ )
log.debug ("payload : \n{}".format(pformat(ns.payload)))
### retrieve jwt
# raw_jwt = ns.payload["old_refresh_token"]
raw_jwt = old_refresh_token
log.debug("raw_jwt : \n %s", pformat(raw_jwt))
### decode jwt
# decoded_token = decode_token(raw_jwt)
decoded_token = jwt.decode(raw_jwt, app.config.get('JWT_SECRET_KEY'), options={'verify_exp': False})
log.debug("decoded_token : \n %s", pformat(decoded_token))
### check jwt and user's identity from old refresh_token
jwt_type = decoded_token["type"]
jwt_identity = decoded_token["jti"]
log.debug('jwt_type : {} / jwt_identity : {}'.format(jwt_type, jwt_identity) )
user_identity = decoded_token["identity"]
log.debug('user_identity from old refresh_token : \n%s', user_identity )
# if user_identity != "anonymous" and jwt_type == "refresh" :
if user_identity and jwt_type == "refresh" :
### find user in db
user = mongo_users.find_one( {"_id" : ObjectId(user_identity) } )
# user = mongo_users.find_one( {"infos.email" : user_identity } )
log.debug("user : \n %s", pformat(user))
if user :
### check if there is something wrong : user's email not confirmed | user blacklisted
if user["auth"]["conf_usr"] and user["auth"]["is_blacklisted"] == False :
### marshal user's info
user_light = marshal( user , model_user_login_out)
# user_light["_id"] = str(user["_id"])
log.debug("user_light : \n %s", pformat(user_light))
# create a new refresh_token
new_refresh_token = create_refresh_token(identity=user_light)
# and save it into user's data in DB
user["auth"]["refr_tok"] = new_refresh_token
mongo_users.save(user)
log.debug("new_refresh_token is saved in user's data : %s", new_refresh_token )
# create user's new_access_token
new_access_token = create_access_token(identity=user_light)
tokens = {
"access_token" : new_access_token,
"refresh_token" : new_refresh_token
}
if app.config["RSA_MODE"] == "yes":
tokens["salt_token"] = public_key_str
### return new tokens
return {
"msg" : "new refresh_token created for user '{}' ".format(user_identity) ,
"tokens" : tokens
}, 200
### user's email not confirmed or blacklisted
else :
return {
"msg" : "you need to confirm your email '{}' first before...".format(user_identity)
}, 401
### user not in DB
else :
return {
"msg" : "no such user in DB"
}, 401
### user is anonymous | wrong jwt
else :
return {
"msg" : "anonyous users can't renew their refresh_token OR wrong jwt type..."
}, 401
| 30.174863
| 104
| 0.611554
| 9,105
| 0.82443
| 0
| 0
| 9,480
| 0.858385
| 0
| 0
| 5,886
| 0.532959
|
361738fea8f68576a66d9ee50d5cd2a6da5685cc
| 4,750
|
py
|
Python
|
tektonbundle/tektonbundle.py
|
chmouel/tektonbundle
|
6d44e47f9b6d5c2d1da4663f9c7bfcab50108074
|
[
"MIT"
] | 3
|
2020-10-22T04:57:21.000Z
|
2021-06-03T16:03:44.000Z
|
tektonbundle/tektonbundle.py
|
chmouel/tektonbundle
|
6d44e47f9b6d5c2d1da4663f9c7bfcab50108074
|
[
"MIT"
] | 3
|
2020-10-27T14:30:33.000Z
|
2020-11-12T11:39:07.000Z
|
tektonbundle/tektonbundle.py
|
chmouel/tektonbundle
|
6d44e47f9b6d5c2d1da4663f9c7bfcab50108074
|
[
"MIT"
] | null | null | null |
"""Main module."""
import copy
import io
import logging
import re
from typing import Dict, List
import yaml
log = logging.getLogger(__name__)
TEKTON_TYPE = ("pipeline", "pipelinerun", "task", "taskrun", "condition")
class TektonBundleError(Exception):
pass
def tpl_apply(yaml_obj, parameters):
def _apply(param):
if param in parameters:
return parameters[param]
return "{{%s}}" % (param)
return io.StringIO(
re.sub(
r"\{\{([_a-zA-Z0-9\.]*)\}\}",
lambda m: _apply(m.group(1)),
open(yaml_obj).read(),
))
def resolve_task(mpipe, name, yaml_documents, skip_task_inlining):
if 'pipelineSpec' in mpipe['spec']:
tasks = mpipe['spec']['pipelineSpec']['tasks']
else:
tasks = mpipe['spec']['tasks']
for task in tasks:
if 'taskRef' in task:
reftask = task['taskRef']['name']
if reftask in skip_task_inlining:
continue
if 'task' not in yaml_documents or reftask not in yaml_documents[
'task']:
raise TektonBundleError(
f"Pipeline: {name} reference a Task: {reftask} not in repository"
)
del task['taskRef']
task['taskSpec'] = yaml_documents['task'][reftask]['spec']
return mpipe
def parse(yamlfiles: List[str], parameters: Dict[str, str],
skip_inlining: List[str]) -> Dict[str, str]:
"""parse a bunch of yaml files"""
yaml_documents = {} # type: Dict[str, Dict]
results = []
notkube_ignored = []
nottekton_ignored = []
for yaml_file in yamlfiles:
for document in yaml.load_all(tpl_apply(yaml_file, parameters),
Loader=yaml.Loader):
if 'apiVersion' not in document or 'kind' not in document:
notkube_ignored.append(
yaml.dump(
document,
Dumper=yaml.Dumper,
))
continue
name = (document['metadata']['generateName']
if 'generateName' in document['metadata'].keys() else
document['metadata']['name'])
kind = document['kind'].lower()
if kind not in TEKTON_TYPE:
nottekton_ignored.append(
yaml.dump(
document,
Dumper=yaml.Dumper,
))
continue
yaml_documents.setdefault(kind, {})
yaml_documents[kind][name] = document
if 'pipelinerun' not in yaml_documents:
raise TektonBundleError("We need at least a PipelineRun")
# if we have pipeline (i.e: not embedded) then expand all tasksRef insides.
if 'pipeline' in yaml_documents:
for pipeline in yaml_documents['pipeline']:
mpipe = copy.deepcopy(yaml_documents['pipeline'][pipeline])
resolved = resolve_task(mpipe, pipeline, yaml_documents,
skip_inlining)
yaml_documents['pipeline'][pipeline] = copy.deepcopy(resolved)
# For all pipelinerun expands the pipelineRef, keep it as is if it's a
# pipelineSpec.
for pipeline_run in yaml_documents['pipelinerun']:
mpr = copy.deepcopy(yaml_documents['pipelinerun'][pipeline_run])
if 'pipelineSpec' in mpr['spec']:
mpr = resolve_task(mpr, pipeline_run, yaml_documents,
skip_inlining)
elif 'pipelineRef' in mpr['spec']:
refpipeline = mpr['spec']['pipelineRef']['name']
if 'pipeline' not in yaml_documents or refpipeline not in yaml_documents[
'pipeline']:
raise TektonBundleError(
f"PR: {pipeline_run} reference a Pipeline: {refpipeline} not in repository"
)
del mpr['spec']['pipelineRef']
mpr['spec']['pipelineSpec'] = yaml_documents['pipeline'][
refpipeline]['spec']
# Adjust names with generateName if needed
# TODO(chmou): make it optional, we maybe don't want to do this sometime
if 'name' in mpr['metadata']:
name = mpr['metadata']['name']
mpr['metadata']['generateName'] = name + "-"
del mpr['metadata']['name']
results.append(mpr)
ret = {
'bundle':
yaml.dump_all(results,
Dumper=yaml.Dumper,
default_flow_style=False,
allow_unicode=True),
'ignored_not_tekton':
nottekton_ignored,
'ignored_not_k8':
notkube_ignored
}
return ret
| 32.986111
| 95
| 0.550105
| 44
| 0.009263
| 0
| 0
| 0
| 0
| 0
| 0
| 1,157
| 0.243579
|
3617e8e260511cf8ba4c78d54d81b23de02b0480
| 2,385
|
py
|
Python
|
Scripts/sims4communitylib/classes/time/common_alarm_handle.py
|
ColonolNutty/Sims4CommunityLibrary
|
684f28dc3c7deb4d9fd520e21e63942b65a91d31
|
[
"CC-BY-4.0"
] | 118
|
2019-08-31T04:33:18.000Z
|
2022-03-28T21:12:14.000Z
|
Scripts/sims4communitylib/classes/time/common_alarm_handle.py
|
ColonolNutty/Sims4CommunityLibrary
|
684f28dc3c7deb4d9fd520e21e63942b65a91d31
|
[
"CC-BY-4.0"
] | 15
|
2019-12-05T01:29:46.000Z
|
2022-02-18T17:13:46.000Z
|
Scripts/sims4communitylib/classes/time/common_alarm_handle.py
|
ColonolNutty/Sims4CommunityLibrary
|
684f28dc3c7deb4d9fd520e21e63942b65a91d31
|
[
"CC-BY-4.0"
] | 28
|
2019-09-07T04:11:05.000Z
|
2022-02-07T18:31:40.000Z
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
import os
from sims4.commands import Command, CommandType, CheatOutput
from sims4communitylib.utils.common_time_utils import CommonTimeUtils
from typing import Any, Callable
ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
if not ON_RTD:
from scheduling import Timeline
from alarms import AlarmHandle
from date_and_time import DateAndTime, TimeSpan
else:
# noinspection PyMissingOrEmptyDocstring
class AlarmHandle:
def cancel(self):
pass
# noinspection PyMissingOrEmptyDocstring
class DateAndTime:
pass
# noinspection PyMissingOrEmptyDocstring
class TimeSpan:
pass
# noinspection PyMissingOrEmptyDocstring
class Timeline:
pass
class CommonAlarmHandle(AlarmHandle):
"""A custom alarm handle that keeps track of when it is slated to trigger for the first time."""
def __init__(
self,
owner: Any,
on_alarm_triggered_callback: Callable[['CommonAlarmHandle'], None],
timeline: Timeline,
when: DateAndTime,
should_repeat: bool=False,
time_until_repeat: TimeSpan=None,
accurate_repeat: bool=True,
persist_across_zone_loads: bool=False
):
self.started_at_date_and_time = when
super().__init__(
owner,
on_alarm_triggered_callback,
timeline,
when,
repeating=should_repeat,
repeat_interval=time_until_repeat,
accurate_repeat=accurate_repeat,
cross_zone=persist_across_zone_loads
)
if not ON_RTD:
@Command('s4clib.print_current_time', command_type=CommandType.Live)
def _s4clib_print_current_time(_connection: int=None):
output = CheatOutput(_connection)
output('Current time')
output('Hour {} Minute {}'.format(CommonTimeUtils.get_current_date_and_time().hour(), CommonTimeUtils.get_current_date_and_time().minute()))
output('Abs Hour {} Abs Minute {}'.format(CommonTimeUtils.get_current_date_and_time().absolute_hours(), CommonTimeUtils.get_current_date_and_time().absolute_minutes()))
| 33.125
| 176
| 0.704403
| 994
| 0.416771
| 0
| 0
| 526
| 0.220545
| 0
| 0
| 641
| 0.268763
|
3617f1fcc07ed43dd799a0a44d4cb775cd1c7478
| 1,884
|
py
|
Python
|
blackbook/migrations/0022_cleanup.py
|
bsiebens/blackbook
|
636d1adc8966db158914abba43e360c6a0d23173
|
[
"MIT"
] | 1
|
2021-05-10T19:15:48.000Z
|
2021-05-10T19:15:48.000Z
|
blackbook/migrations/0022_cleanup.py
|
bsiebens/BlackBook
|
636d1adc8966db158914abba43e360c6a0d23173
|
[
"MIT"
] | 20
|
2020-12-27T15:56:24.000Z
|
2021-09-22T18:25:02.000Z
|
blackbook/migrations/0022_cleanup.py
|
bsiebens/BlackBook
|
636d1adc8966db158914abba43e360c6a0d23173
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.4 on 2021-01-22 22:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blackbook', '0021_update_account_categories'),
]
operations = [
migrations.RemoveField(
model_name='budgetperiod',
name='budget',
),
migrations.RemoveField(
model_name='transaction',
name='account',
),
migrations.RemoveField(
model_name='transaction',
name='journal_entry',
),
migrations.RemoveField(
model_name='transactionjournalentry',
name='budget',
),
migrations.RemoveField(
model_name='transactionjournalentry',
name='category',
),
migrations.RemoveField(
model_name='transactionjournalentry',
name='from_account',
),
migrations.RemoveField(
model_name='transactionjournalentry',
name='tags',
),
migrations.RemoveField(
model_name='transactionjournalentry',
name='to_account',
),
migrations.RemoveField(
model_name='userprofile',
name='user',
),
migrations.DeleteModel(
name='Account',
),
migrations.DeleteModel(
name='AccountType',
),
migrations.DeleteModel(
name='Budget',
),
migrations.DeleteModel(
name='BudgetPeriod',
),
migrations.DeleteModel(
name='Category',
),
migrations.DeleteModel(
name='Transaction',
),
migrations.DeleteModel(
name='TransactionJournalEntry',
),
migrations.DeleteModel(
name='UserProfile',
),
]
| 25.459459
| 56
| 0.525478
| 1,799
| 0.954883
| 0
| 0
| 0
| 0
| 0
| 0
| 461
| 0.244692
|
36188c3a24365e2e84cb2983da3bc80cf1611d71
| 1,431
|
py
|
Python
|
core/myauthbackend.py
|
devendraotari/HRMS_project
|
c6480903c2a8212c6698987e8ced96a114c4d7c7
|
[
"BSD-2-Clause"
] | null | null | null |
core/myauthbackend.py
|
devendraotari/HRMS_project
|
c6480903c2a8212c6698987e8ced96a114c4d7c7
|
[
"BSD-2-Clause"
] | null | null | null |
core/myauthbackend.py
|
devendraotari/HRMS_project
|
c6480903c2a8212c6698987e8ced96a114c4d7c7
|
[
"BSD-2-Clause"
] | null | null | null |
from django.contrib.auth.backends import BaseBackend
from django.contrib.auth import get_user_model
class EmailPhoneBackend(BaseBackend):
"""
docstring
"""
def authenticate(self,request, email=None,phone=None, password=None):
# Check the username/password and return a user.
my_user_model = get_user_model()
user = None
try:
print(f"{request.data['phone']}")
if request.data.get('email',None):
print(f"custom auth call{email}")
user = my_user_model.objects.get(email=request.data.get('email',None))
if request.data.get('phone',None):
print("in auth phone")
user = my_user_model.objects.get(phone=request.data.get('phone',None))
print(f"user{user}")
if user.check_password(password):
return user # return user on valid credentials
except my_user_model.DoesNotExist as mmode:
print(f"{mmode}")
return None # return None if custom user model does not exist
except Exception as e:
return None # return None in case of other exceptions
def get_user(self, user_id):
my_user_model = get_user_model()
try:
return my_user_model.objects.get(pk=user_id)
except my_user_model.DoesNotExist:
return None
| 40.885714
| 87
| 0.596785
| 1,317
| 0.920335
| 0
| 0
| 0
| 0
| 0
| 0
| 322
| 0.225017
|
3618b1890763a3badcdbdde17119e78da0fca799
| 1,655
|
py
|
Python
|
apps/core/management/commands/update-banned-email.py
|
sparcs-kaist/sparcssso
|
9aeedc02652dadacb44c6a4ba06901f6d2372223
|
[
"MIT"
] | 18
|
2015-07-06T06:20:14.000Z
|
2022-03-20T23:45:40.000Z
|
apps/core/management/commands/update-banned-email.py
|
sparcs-kaist/sparcssso
|
9aeedc02652dadacb44c6a4ba06901f6d2372223
|
[
"MIT"
] | 170
|
2015-07-07T08:42:03.000Z
|
2022-03-24T17:31:17.000Z
|
apps/core/management/commands/update-banned-email.py
|
sparcs-kaist/sparcssso
|
9aeedc02652dadacb44c6a4ba06901f6d2372223
|
[
"MIT"
] | 11
|
2015-07-07T20:42:19.000Z
|
2022-01-12T22:39:59.000Z
|
import requests
from django.core.management.base import BaseCommand, CommandError
from apps.core.models import EmailDomain
DATA_URL = (
'https://raw.githubusercontent.com/martenson/disposable-email-domains'
'/master/disposable_email_blacklist.conf'
)
class Command(BaseCommand):
help = 'Update list of banned email domains'
def add_arguments(self, parser):
parser.add_argument(
'--overwrite',
action='store_true',
dest='overwrite',
help='Overwrite configured data',
)
parser.add_argument(
'--clean',
action='store_true',
dest='clean',
help='Empty the table and start from the beginning',
)
def handle(self, *args, **options):
try:
domains_raw = requests.get(DATA_URL).text.split('\n')
domains = [x for x in [y.strip() for y in domains_raw] if x]
except Exception:
raise CommandError(f'cannot fetch data from {DATA_URL}')
if options['clean']:
EmailDomain.objects.all().delete()
count_created, count_overwrited = 0, 0
for domain in domains:
obj, created = EmailDomain.objects.get_or_create(domain=domain)
if created:
count_created += 1
elif not obj.is_banned and options['overwrite']:
count_overwrited += 1
obj.is_banned = True
obj.save()
self.stdout.write(self.style.SUCCESS(
f'total {len(domains)}, '
f'created {count_created}, '
f'overwrited {count_overwrited}'))
| 30.648148
| 75
| 0.586103
| 1,390
| 0.839879
| 0
| 0
| 0
| 0
| 0
| 0
| 428
| 0.25861
|
361a68b0ba7eff6cb23d87bfa96dce0e03ec7a08
| 1,659
|
py
|
Python
|
LeetCode/Python3/Math/1323. Maximum 69 Number.py
|
WatsonWangZh/CodingPractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | 11
|
2019-09-01T22:36:00.000Z
|
2021-11-08T08:57:20.000Z
|
LeetCode/Python3/Math/1323. Maximum 69 Number.py
|
WatsonWangZh/LeetCodePractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | null | null | null |
LeetCode/Python3/Math/1323. Maximum 69 Number.py
|
WatsonWangZh/LeetCodePractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | 2
|
2020-05-27T14:58:52.000Z
|
2020-05-27T15:04:17.000Z
|
# Given a positive integer num consisting only of digits 6 and 9.
# Return the maximum number you can get by changing at most one digit (6 becomes 9, and 9 becomes 6).
# Example 1:
# Input: num = 9669
# Output: 9969
# Explanation:
# Changing the first digit results in 6669.
# Changing the second digit results in 9969.
# Changing the third digit results in 9699.
# Changing the fourth digit results in 9666.
# The maximum number is 9969.
# Example 2:
# Input: num = 9996
# Output: 9999
# Explanation: Changing the last digit 6 to 9 results in the maximum number.
# Example 3:
# Input: num = 9999
# Output: 9999
# Explanation: It is better not to apply any change.
# Constraints:
# 1 <= num <= 10^4
# num's digits are 6 or 9.
# Hints:
# Convert the number in an array of its digits.
# Brute force on every digit to get the maximum number.
class Solution(object):
def maximum69Number (self, num):
"""
:type num: int
:rtype: int
"""
# https://blog.csdn.net/CSerwangjun/article/details/104053280
# M1. 模拟
return str(num).replace('6','9', 1)
# M2. 模拟
str_num = str(num)
if '6' in str_num:
pos = str_num.index('6')
list_num = list(str_num)
list_num[pos] = '9'
str_num = ''.join(list_num)
return int(str_num)
else:
return num
# M3. 模拟
s = str(num)
lst = []
for i in s:
lst.append(i)
for i in range(len(lst)):
if lst[i] == '6':
lst[i] = '9'
break
s = ''.join(lst)
return int(s)
| 25.921875
| 101
| 0.57384
| 822
| 0.491921
| 0
| 0
| 0
| 0
| 0
| 0
| 997
| 0.596649
|
361c83b1b112f9b41fc07f6d3ac9327c01a72ef7
| 3,245
|
py
|
Python
|
ticketing/userticket/createqrcode.py
|
autlamps/tessera-backend
|
1d02e8e3651c1ad75bdf4e5d0e61765a2a6de0c2
|
[
"MIT"
] | null | null | null |
ticketing/userticket/createqrcode.py
|
autlamps/tessera-backend
|
1d02e8e3651c1ad75bdf4e5d0e61765a2a6de0c2
|
[
"MIT"
] | 1
|
2018-08-14T03:15:00.000Z
|
2018-08-21T00:33:34.000Z
|
ticketing/userticket/createqrcode.py
|
autlamps/tessera-backend
|
1d02e8e3651c1ad75bdf4e5d0e61765a2a6de0c2
|
[
"MIT"
] | null | null | null |
import base64
import rsa
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from ticketing.models import BalanceTicket, RideTicket
class VerifyFailedError(Exception):
pass
class QRCode:
"""
QRCode creator is used to create a user ticket/balance ID,
which is then signed and then returned
"""
# Refactor to QR Factory
# Make QR codes for RT tickets
def __init__(self, testing=False):
if not testing:
if settings.PRIVATE_KEY is None or settings.PUBLIC_KEY is None:
raise Exception("The settings file has an issue with the keys")
else:
self.private = rsa.PrivateKey.load_pkcs1(self.__getprivkey())
self.public = rsa.PublicKey.load_pkcs1(self.__getpubkey())
@staticmethod
def __getprivkey():
priv = settings.PRIVATE_KEY
header = priv[:32]
body = priv[32:len(priv)-29].replace(" ", "\n")
footer = priv[-29:]
privkey = header + "\n" + body + footer
return privkey
@staticmethod
def __getpubkey():
pub = settings.PUBLIC_KEY
header = pub[:31]
body = pub[31:len(pub)-28].replace(" ", "\n")
footer = pub[-28:]
pubkey = header + "\n" + body + footer
return pubkey
def createbtqrcode(self, btticket: BalanceTicket):
uid = btticket.qr_code_id
type = 'b'
val = 'x'
name = btticket.account.user.first_name
return self.__sign(uid, type, val, name)
def creatertqrcode(self, rtticket: RideTicket):
uid = rtticket.qr_code
type = 'r'
val = rtticket.initial_value
name = rtticket.short_name
return self.__sign(uid, type, val, name)
def __sign(self, uid, type, val, name):
tosign = str(uid) + '.' + type + '.' + val + '.' + name
signed = base64.b64encode(rsa.sign(tosign.encode('UTF-8'),
self.private, 'SHA-256'))
toreturn = str(tosign) + ':' + str(signed.decode('UTF-8'))
self.ID = toreturn
return toreturn
def verify(self, qrcode):
parts = qrcode.split(':')
hash = base64.b64decode(parts[1])
try:
rsa.verify(parts[0].encode(), hash, self.public)
print("Verified")
user = parts[0].split(".")
uuid = user[0]
ticketType = user[1]
if ticketType == "b":
try:
ticket = BalanceTicket.objects.get(qr_code_id=uuid)
return {"ticket": ticket, "type": ticketType}
except ObjectDoesNotExist:
raise VerifyFailedError()
elif ticketType == "r":
try:
ticket = RideTicket.objects.get(qr_code=uuid)
return {"ticket": ticket, "type": ticketType}
except ObjectDoesNotExist:
raise VerifyFailedError()
except rsa.VerificationError:
print("Verification Error")
raise VerifyFailedError
# Create an error for better usability
print("Hash 0 : " + parts[0])
print("Hash 1 : " + parts[1])
| 33.112245
| 79
| 0.561787
| 3,070
| 0.946071
| 0
| 0
| 501
| 0.154391
| 0
| 0
| 413
| 0.127273
|
361df2d9546970e2a42e2d2a91b1abc8fb87455f
| 3,015
|
py
|
Python
|
CollabMoodle.py
|
dantonbertuol/PyCollab
|
b36c968f5f1aabf1a322559854db24aa6691ac63
|
[
"MIT"
] | null | null | null |
CollabMoodle.py
|
dantonbertuol/PyCollab
|
b36c968f5f1aabf1a322559854db24aa6691ac63
|
[
"MIT"
] | null | null | null |
CollabMoodle.py
|
dantonbertuol/PyCollab
|
b36c968f5f1aabf1a322559854db24aa6691ac63
|
[
"MIT"
] | null | null | null |
import datetime
from webService import WebService
import Utilidades as ut
import sys
if __name__ == "__main__":
param = ut.mainMoodle(sys.argv[1:])
#param = 'moodle_plugin_sessions.txt', '', '2020-08-01 00:00:00,2020-12-31 00:00:00'
webService = WebService()
report = []
ret = 0
dates = param[2].split(",")
if param[0] != '' and param[1] == '':
print("Moodle Sesions...")
moodlSession = ut.leerUUID(param[0])
for sesion in moodlSession:
try:
nombre_session, date_session = webService.get_moodle_sesion_name(sesion)
except:
print('Erro WS')
nombre_session = None
if nombre_session == None or nombre_session == ' ':
print("Session name not found!")
else:
print(nombre_session)
try:
lista_grabaciones = webService.get_moodle_lista_grabaciones(nombre_session, dates, date_session)
except:
lista_grabaciones = None
if lista_grabaciones is None:
print("There's no recording for: " + nombre_session)
else:
for grabacion in lista_grabaciones:
try:
ret = ut.downloadrecording(grabacion['recording_id'],grabacion['recording_name'], dates)
except:
ret = 2
try:
if ret == 1:
report.append([grabacion['recording_id'], grabacion['recording_name'], grabacion['duration'],
grabacion['storageSize'], grabacion['created']])
elif ret == 2:
report.append(
['Erro no download', grabacion['recording_name'], grabacion['duration'],
grabacion['storageSize'], grabacion['created']])
elif ret == 3:
if [grabacion['recording_id'], grabacion['recording_name'], grabacion['duration'],
grabacion['storageSize'], grabacion['created']] in report:
print("EXISTE")
else:
report.append(
[grabacion['recording_id'], grabacion['recording_name'], grabacion['duration'],
grabacion['storageSize'], grabacion['created']])
except:
print("Nao foi possivel criar o relatorio")
if len(report) > 0:
try:
print(ut.crearReporteMoodle(report, dates))
except:
print("Nao foi possivel criar o relatorio")
else:
print('No recordings was found')
| 47.109375
| 125
| 0.469983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 572
| 0.189718
|
361df35a0da6b8703efd3e8c9fc20bd6344aa676
| 5,549
|
py
|
Python
|
eva/views_data.py
|
aqutor/CE_Backend
|
1265f7169aea0b6b8cff3fda742a8a5a295fe9ea
|
[
"MIT"
] | null | null | null |
eva/views_data.py
|
aqutor/CE_Backend
|
1265f7169aea0b6b8cff3fda742a8a5a295fe9ea
|
[
"MIT"
] | null | null | null |
eva/views_data.py
|
aqutor/CE_Backend
|
1265f7169aea0b6b8cff3fda742a8a5a295fe9ea
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework import status
from eva.serializers import WorkSerializer, PageSerializer, WordSerializer, RadicalSerializer
from eva.models import Work, Page, Word, Radical
from rest_framework.response import Response
from django.http import Http404
class WorkView(APIView):
def get(self, request, format=None):
"""return all works"""
works = Work.objects.all()
print(works)
serializer = WorkSerializer(works, many=True)
json = {
'works': serializer.data,
'count': works.count(),
'status': status.HTTP_200_OK,
}
return Response(json)
class WorkDetail(APIView):
"""
Retrieve, update or delete a snippet instance.
"""
def get_object(self, pk):
try:
return Work.objects.get(pk=pk)
except Work.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
work = self.get_object(pk)
serializer = WorkSerializer(work)
json = serializer.data
json['status'] = status.HTTP_200_OK
return Response(json)
class PageView(APIView):
def get(self, request, format=None, *args, **kwargs):
"""return all works"""
try:
workId = request.query_params.get("workId")
if workId is None:
pages = Page.objects.all()
workId = 0
stats = status.HTTP_200_OK
else:
pages = Page.objects.filter(workId=workId)
if pages.count() == 0:
stats = status.HTTP_404_NOT_FOUND
else:
stats = status.HTTP_200_OK
except ValueError:
return Response({
'status': status.HTTP_400_BAD_REQUEST,
'message': 'invalid pageId',
})
serializer = PageSerializer(pages, many=True)
json = {
'pages': serializer.data,
'count': pages.count(),
'workId': workId,
'status': stats,
}
return Response(json)
class PageDetail(APIView):
"""
Retrieve, update or delete a snippet instance.
"""
def get_object(self, pk):
try:
return Page.objects.get(pk=pk)
except Page.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
work = self.get_object(pk)
serializer = PageSerializer(work)
json = serializer.data
json['status'] = status.HTTP_200_OK
return Response(json)
class WordView(APIView):
def get(self, request, format=None, *args, **kwargs):
"""return all works"""
pageId = request.query_params.get("pageId")
try:
if pageId is None:
words = Word.objects.all()
pageId = 0
stats = status.HTTP_200_OK
else:
words = Word.objects.filter(pageId=pageId)
if words.count() == 0:
stats = status.HTTP_404_NOT_FOUND
else:
stats = status.HTTP_200_OK
except ValueError:
return Response({
'status': status.HTTP_400_BAD_REQUEST,
'message': 'invalid pageId',
})
serializer = WordSerializer(words, many=True)
json = {
'words': serializer.data,
'count': words.count(),
'pageId': pageId,
'status': stats,
}
return Response(json)
class WordDetail(APIView):
"""
Retrieve, update or delete a snippet instance.
"""
def get_object(self, pk):
try:
return Word.objects.get(pk=pk)
except Word.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
word = self.get_object(pk)
serializer = WordSerializer(word)
json = serializer.data
json['status'] = status.HTTP_200_OK
return Response(json)
class RadicalView(APIView):
def get(self, request, format=None, *args, **kwargs):
"""return all works"""
wordId = request.query_params.get("wordId")
try:
if wordId is None:
radicals = Radical.objects.all()
wordId = 0
stats = status.HTTP_200_OK
else:
radicals = Radical.objects.filter(wordId=wordId)
if radicals.count() == 0:
stats = status.HTTP_404_NOT_FOUND
else:
stats = status.HTTP_200_OK
except ValueError:
return Response({
'status': status.HTTP_400_BAD_REQUEST,
'message': 'invalid pageId',
})
serializer = RadicalSerializer(radicals, many=True)
json = {
'words': serializer.data,
'count': radicals.count(),
'wordId': wordId,
'status': stats,
}
return Response(json)
class RadicalDetail(APIView):
"""
Retrieve, update or delete a snippet instance.
"""
def get_object(self, pk):
try:
return Radical.objects.get(pk=pk)
except Word.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
radical = self.get_object(pk)
serializer = RadicalSerializer(radical)
json = serializer.data
json['status'] = status.HTTP_200_OK
return Response(json)
| 29.359788
| 93
| 0.548567
| 5,229
| 0.942332
| 0
| 0
| 0
| 0
| 0
| 0
| 603
| 0.108668
|
361ee510413d5ff2e8e4d3a5aa90b44d49e73ac2
| 1,447
|
py
|
Python
|
program/appID3.py
|
trungvuong55555/FlaskAPI_ExpertSystem
|
6f7a557fefd093e901070fe2ec363e0c2ed8ffa2
|
[
"MIT"
] | null | null | null |
program/appID3.py
|
trungvuong55555/FlaskAPI_ExpertSystem
|
6f7a557fefd093e901070fe2ec363e0c2ed8ffa2
|
[
"MIT"
] | null | null | null |
program/appID3.py
|
trungvuong55555/FlaskAPI_ExpertSystem
|
6f7a557fefd093e901070fe2ec363e0c2ed8ffa2
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, render_template
import pickle
app = Flask(__name__)#khoi tao flask
model = pickle.load(open('modelID3.pkl', 'rb'))#unpicke model
@app.route('/',methods =["GET", "POST"])
def home():
if request.method == "POST":
#lay gia tri tu form
one= request.form.get("a0")
two= request.form.get("a1")
three = request.form.get("a2")
four = request.form.get("a3")
five = request.form.get("a4")
six = request.form.get("a5")
seven = request.form.get("a6")
eight = request.form.get("a7")
nine = request.form.get("a8")
ten = request.form.get("a9")
eleven = request.form.get("a10")
#ep kieu du lieu ve int
one= int(one)
two= int(two)
three= int(three)
four= int(four)
five= int(five)
six= int(six)
seven= int(seven)
eight= int(eight)
nine= int(nine)
ten= int(ten)
eleven = int(eleven)
#dua ve dang vector
input_value= [one,two,three,four,five,six,seven,eight,nine,ten,eleven]
#dua ra ve du doan du lieu
prediction = model.predict([input_value])
prediction= str(prediction) #ep kieu du lieu ve dang string de co the xuat ra duoc man hinh
return "quality of wine is : "+ prediction;
return render_template('index.html')
if __name__ == "__main__":
app.run(debug=True)
| 28.94
| 99
| 0.57982
| 0
| 0
| 0
| 0
| 1,227
| 0.847961
| 0
| 0
| 315
| 0.217692
|
361ef035e9cacacdf5098c184cd2ac1fe4e53da4
| 474
|
py
|
Python
|
examples/deldup.py
|
rlan/pydmv
|
97619bbd2732b2ad8e64c97fe862a84dc147af93
|
[
"MIT"
] | null | null | null |
examples/deldup.py
|
rlan/pydmv
|
97619bbd2732b2ad8e64c97fe862a84dc147af93
|
[
"MIT"
] | null | null | null |
examples/deldup.py
|
rlan/pydmv
|
97619bbd2732b2ad8e64c97fe862a84dc147af93
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import os
import sys
import argparse
#Auto-import parent module
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import voc
#from pydmv import voc
parser = argparse.ArgumentParser(description="Print VOC index file without duplicates")
parser.add_argument("file", help="Input index file")
args = parser.parse_args()
in_file = args.file
my_bag = set()
for index in voc.stream(in_file):
if index not in my_bag:
print(index)
my_bag.add(index)
| 21.545455
| 87
| 0.736287
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 134
| 0.2827
|
362141754e09b014da8e86cb87845189f022576c
| 448
|
py
|
Python
|
home_work/App/views.py
|
jianghaiming0707/python1806homework
|
2509f75794ac0ef8711cb1d1c2c4378408619a75
|
[
"Apache-2.0"
] | 1
|
2018-06-28T01:01:35.000Z
|
2018-06-28T01:01:35.000Z
|
home_work/App/views.py
|
jianghaiming0707/python1806homework
|
2509f75794ac0ef8711cb1d1c2c4378408619a75
|
[
"Apache-2.0"
] | 6
|
2018-06-25T04:50:23.000Z
|
2018-07-03T10:24:08.000Z
|
home_work/App/views.py
|
jianghaiming0707/python1806homework
|
2509f75794ac0ef8711cb1d1c2c4378408619a75
|
[
"Apache-2.0"
] | 42
|
2018-06-19T09:48:04.000Z
|
2019-09-15T01:20:06.000Z
|
from django.shortcuts import render
from django.http import HttpResponse
from App.models import *
# Create your views here.
def search(seq):
myclass=Myclass.objects.all()
return render(seq,'test.html',context={'myclass':myclass})
def students(req):
students_id=req.GET.get('classid')
studentt=Student.objects.all()
studentt=studentt.filter(cid_id=students_id)
return render(req,'student.html',context={'students':studentt})
| 34.461538
| 67
| 0.747768
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 78
| 0.174107
|
3621452a8a1c3599be31b149a9b725b8f48992db
| 962
|
py
|
Python
|
Xiaomi_8/day_start/show_screen.py
|
Lezaza/hotpoor_autoclick_xhs
|
52eafad8cce59353a9de5bf6e488e8a2602e5536
|
[
"Apache-2.0"
] | 1
|
2021-12-21T10:42:46.000Z
|
2021-12-21T10:42:46.000Z
|
Xiaomi_8/day_start/show_screen.py
|
2218084076/hotpoor_autoclick_xhs
|
a52446ba691ac19e43410a465dc63f940c0e444d
|
[
"Apache-2.0"
] | 2
|
2021-11-03T11:36:44.000Z
|
2021-11-05T07:58:13.000Z
|
Xiaomi_8/day_start/show_screen.py
|
2218084076/hotpoor_autoclick_xhs
|
a52446ba691ac19e43410a465dc63f940c0e444d
|
[
"Apache-2.0"
] | 1
|
2021-10-09T10:28:57.000Z
|
2021-10-09T10:28:57.000Z
|
import os
import cv2
import time
path = "C:/Users/lenovo/Documents/Sites/github/hotpoor_autoclick_xhs/Xiaomi_8/day_start/hotpoor_autoclick_cache"
cache = "hotpoor_autoclick_cache/screen.png"
def get_image():
os.system(f"adb shell screencap -p /sdcard/%s"%cache)
os.system(r"adb pull /sdcard/%s %s"%(cache,path))
def load_image():
i1 = cv2.imread("%s/screen.png"%path)
scale_percent=50
w=int(i1.shape[1]*scale_percent/100)
h=int(i1.shape[0]*scale_percent/100)
dim=(w,h)
resized = cv2.resize(i1,dim,interpolation=cv2.INTER_AREA)
cv2.imshow("path", resized)
k = cv2.waitKey(0)
while True:
get_image()
print("get_image")
# load_image()
i1 = cv2.imread("%s/screen.png"%path)
scale_percent=40
w=int(i1.shape[1]*scale_percent/100)
h=int(i1.shape[0]*scale_percent/100)
dim=(w,h)
resized = cv2.resize(i1,dim,interpolation=cv2.INTER_AREA)
cv2.imshow("path", resized)
k = cv2.waitKey(1)
| 30.0625
| 112
| 0.686071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 269
| 0.279626
|
3622cd97012a4b31faded8cb9b89d6c988e04256
| 3,359
|
py
|
Python
|
hknweb/events/views/event_transactions/show_event.py
|
jyxzhang/hknweb
|
a01ffd8587859bf63c46213be6a0c8b87164a5c2
|
[
"MIT"
] | null | null | null |
hknweb/events/views/event_transactions/show_event.py
|
jyxzhang/hknweb
|
a01ffd8587859bf63c46213be6a0c8b87164a5c2
|
[
"MIT"
] | null | null | null |
hknweb/events/views/event_transactions/show_event.py
|
jyxzhang/hknweb
|
a01ffd8587859bf63c46213be6a0c8b87164a5c2
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect, reverse
from django.contrib import messages
from django.shortcuts import get_object_or_404
from django.core.paginator import Paginator
from hknweb.utils import markdownify
from hknweb.utils import allow_public_access
from hknweb.events.constants import (
ACCESSLEVEL_TO_DESCRIPTION,
ATTR,
RSVPS_PER_PAGE,
)
from hknweb.events.models import Event, Rsvp, AttendanceForm
from hknweb.events.utils import format_url
from hknweb.utils import get_access_level
@allow_public_access
def show_details(request, id):
return show_details_helper(request, id, reverse("events:index"), True)
def show_details_helper(request, id, back_link: str, can_edit: bool):
event = get_object_or_404(Event, pk=id)
if event.access_level < get_access_level(request.user):
messages.warning(request, "Insufficent permission to access event.")
return redirect(back_link)
context = {
"event": event,
"event_description": markdownify(event.description),
"event_location": format_url(event.location),
"user_access_level": ACCESSLEVEL_TO_DESCRIPTION[get_access_level(request.user)],
"event_access_level": ACCESSLEVEL_TO_DESCRIPTION[event.access_level],
"back_link": back_link,
"can_edit": can_edit and request.user.has_perm("events.change_event"),
}
if not request.user.is_authenticated:
return render(request, "events/show_details.html", context)
rsvps = Rsvp.objects.filter(event=event)
waitlisted = False
waitlist_position = 0
rsvp = None
user_rsvps = rsvps.filter(user=request.user)
if user_rsvps.exists():
# Gets the rsvp object for the user
rsvp = user_rsvps.first()
# Check if waitlisted
if event.rsvp_limit:
rsvps_before = rsvps.filter(created_at__lt=rsvp.created_at).count()
waitlisted = rsvps_before >= event.rsvp_limit
# Get waitlist position
if waitlisted:
position = rsvps.filter(created_at__lt=rsvp.created_at).count()
waitlist_position = position - event.rsvp_limit + 1
# Render only non-waitlisted rsvps
rsvps = event.admitted_set()
waitlists = event.waitlist_set()
limit = event.rsvp_limit
rsvps_page = Paginator(rsvps, RSVPS_PER_PAGE).get_page(
request.GET.get("rsvps_page")
)
waitlists_page = Paginator(waitlists, RSVPS_PER_PAGE).get_page(
request.GET.get("waitlists_page")
)
data = [
{
ATTR.TITLE: "RSVPs",
ATTR.DATA: rsvps_page if len(rsvps_page) > 0 else None,
ATTR.PAGE_PARAM: "rsvps_page",
ATTR.COUNT: str(rsvps.count()) + " / {limit}".format(limit=limit),
},
]
if limit:
data.append(
{
ATTR.TITLE: "Waitlist",
ATTR.DATA: waitlists_page if len(waitlists_page) > 0 else None,
ATTR.PAGE_PARAM: "waitlists_page",
ATTR.COUNT: str(waitlists.count()),
}
)
context = {
**context,
ATTR.DATA: data,
"rsvp": rsvp,
"attendance_form": AttendanceForm.objects.filter(event=event).first(),
"waitlisted": waitlisted,
"waitlist_position": waitlist_position,
}
return render(request, "events/show_details.html", context)
| 33.59
| 88
| 0.669842
| 0
| 0
| 0
| 0
| 126
| 0.037511
| 0
| 0
| 482
| 0.143495
|
36230cd6aca7407d1176980b4ef533beffe100f8
| 9,756
|
py
|
Python
|
pysnmp-with-texts/HPN-ICF-VOICE-IF-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/HPN-ICF-VOICE-IF-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/HPN-ICF-VOICE-IF-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module HPN-ICF-VOICE-IF-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HPN-ICF-VOICE-IF-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:41:57 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint")
hpnicfVoice, = mibBuilder.importSymbols("HPN-ICF-OID-MIB", "hpnicfVoice")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, Unsigned32, Gauge32, NotificationType, MibIdentifier, ModuleIdentity, Counter32, IpAddress, iso, Counter64, ObjectIdentity, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Unsigned32", "Gauge32", "NotificationType", "MibIdentifier", "ModuleIdentity", "Counter32", "IpAddress", "iso", "Counter64", "ObjectIdentity", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hpnicfVoiceInterface = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13))
hpnicfVoiceInterface.setRevisions(('2007-12-10 17:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hpnicfVoiceInterface.setRevisionsDescriptions(('The initial version of this MIB file.',))
if mibBuilder.loadTexts: hpnicfVoiceInterface.setLastUpdated('200712101700Z')
if mibBuilder.loadTexts: hpnicfVoiceInterface.setOrganization('')
if mibBuilder.loadTexts: hpnicfVoiceInterface.setContactInfo('')
if mibBuilder.loadTexts: hpnicfVoiceInterface.setDescription('This MIB file is to provide the definition of the voice interface general configuration.')
hpnicfVoiceIfObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1))
hpnicfVoiceIfConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1), )
if mibBuilder.loadTexts: hpnicfVoiceIfConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfConfigTable.setDescription('The table contains configurable parameters for both analog voice interface and digital voice interface.')
hpnicfVoiceIfConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hpnicfVoiceIfConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfConfigEntry.setDescription('The entry of voice interface table.')
hpnicfVoiceIfCfgCngOn = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgCngOn.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgCngOn.setDescription('This object indicates whether the silence gaps should be filled with background noise. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line.')
hpnicfVoiceIfCfgNonLinearSwitch = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgNonLinearSwitch.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgNonLinearSwitch.setDescription('This object expresses the nonlinear processing is enable or disable for the voice interface. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line. Currently, only digital voice subscriber lines can be set disabled.')
hpnicfVoiceIfCfgInputGain = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-140, 139))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgInputGain.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgInputGain.setDescription('This object indicates the amount of gain added to the receiver side of the voice interface. Unit is 0.1 db. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line.')
hpnicfVoiceIfCfgOutputGain = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-140, 139))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgOutputGain.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgOutputGain.setDescription('This object indicates the amount of gain added to the send side of the voice interface. Unit is 0.1 db. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line.')
hpnicfVoiceIfCfgEchoCancelSwitch = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgEchoCancelSwitch.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgEchoCancelSwitch.setDescription('This object indicates whether the echo cancellation is enabled. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line.')
hpnicfVoiceIfCfgEchoCancelDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgEchoCancelDelay.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgEchoCancelDelay.setDescription("This object indicates the delay of the echo cancellation for the voice interface. This value couldn't be modified unless hpnicfVoiceIfCfgEchoCancelSwitch is enable. Unit is milliseconds. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line. The default value of this object is 32.")
hpnicfVoiceIfCfgTimerDialInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 300))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgTimerDialInterval.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgTimerDialInterval.setDescription('The interval, in seconds, between two dialing numbers. The default value of this object is 10 seconds. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 with loop-start or ground-start protocol voice subscriber line.')
hpnicfVoiceIfCfgTimerFirstDial = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 300))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgTimerFirstDial.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgTimerFirstDial.setDescription('The period of time, in seconds, before dialing the first number. The default value of this object is 10 seconds. It is applicable to FXO, FXS subscriber lines and E1/T1 with loop-start or ground-start protocol voice subscriber line.')
hpnicfVoiceIfCfgPrivateline = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 31))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgPrivateline.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgPrivateline.setDescription('This object indicates the E.164 phone number for plar mode. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line.')
hpnicfVoiceIfCfgRegTone = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 10), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(2, 3), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgRegTone.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgRegTone.setDescription('This object uses 2 or 3 letter country code specify voice parameters of different countrys. This value will take effect on all voice interfaces of all cards on the device.')
mibBuilder.exportSymbols("HPN-ICF-VOICE-IF-MIB", hpnicfVoiceInterface=hpnicfVoiceInterface, hpnicfVoiceIfCfgEchoCancelDelay=hpnicfVoiceIfCfgEchoCancelDelay, hpnicfVoiceIfConfigEntry=hpnicfVoiceIfConfigEntry, PYSNMP_MODULE_ID=hpnicfVoiceInterface, hpnicfVoiceIfObjects=hpnicfVoiceIfObjects, hpnicfVoiceIfCfgNonLinearSwitch=hpnicfVoiceIfCfgNonLinearSwitch, hpnicfVoiceIfCfgTimerFirstDial=hpnicfVoiceIfCfgTimerFirstDial, hpnicfVoiceIfCfgPrivateline=hpnicfVoiceIfCfgPrivateline, hpnicfVoiceIfCfgInputGain=hpnicfVoiceIfCfgInputGain, hpnicfVoiceIfCfgRegTone=hpnicfVoiceIfCfgRegTone, hpnicfVoiceIfCfgTimerDialInterval=hpnicfVoiceIfCfgTimerDialInterval, hpnicfVoiceIfCfgCngOn=hpnicfVoiceIfCfgCngOn, hpnicfVoiceIfCfgEchoCancelSwitch=hpnicfVoiceIfCfgEchoCancelSwitch, hpnicfVoiceIfCfgOutputGain=hpnicfVoiceIfCfgOutputGain, hpnicfVoiceIfConfigTable=hpnicfVoiceIfConfigTable)
| 154.857143
| 863
| 0.791513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,593
| 0.368286
|
3624a7b0fa4de41698f562d63ac67b0fc5033a54
| 1,230
|
py
|
Python
|
data_access_layer/abstract_classes/customer_dao.py
|
Alejandro-Fuste/python-bank-application
|
46e44c830ab8c13fd64c08e2db4f743a7d1d35de
|
[
"MIT"
] | null | null | null |
data_access_layer/abstract_classes/customer_dao.py
|
Alejandro-Fuste/python-bank-application
|
46e44c830ab8c13fd64c08e2db4f743a7d1d35de
|
[
"MIT"
] | 15
|
2021-11-22T16:05:42.000Z
|
2021-12-08T16:43:37.000Z
|
data_access_layer/abstract_classes/customer_dao.py
|
Alejandro-Fuste/python-bank-application
|
46e44c830ab8c13fd64c08e2db4f743a7d1d35de
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from entities.customers import Customer
from typing import List
class CustomerDao(ABC):
@abstractmethod
def create_customer_entry(self, customer: Customer) -> Customer:
pass
@abstractmethod
def get_customer_by_id(self, customer_id: str) -> Customer:
pass
@abstractmethod
def get_all_customers(self) -> List[Customer]:
pass
@abstractmethod
def get_customer_balance_by_id(self, customer_id: str, account_id: int) -> float:
pass
@abstractmethod
def deposit_into_account_by_id(self, customer_id: str, account_id: int, deposit: float) -> float:
pass
@abstractmethod
def withdraw_from_account_by_id(self, customer_id: str, account_id: int, withdraw: float) -> float:
pass
@abstractmethod
def transfer_money_by_their_ids(self, customer_id: str, from_account_id: int, to_account_id: int,
transfer_amount: float) -> float:
pass
@abstractmethod
def update_customer_by_id(self, customer_id: str, customer: Customer) -> Customer:
pass
@abstractmethod
def delete_customer_by_id(self, customer_id: int) -> bool:
pass
| 27.954545
| 103
| 0.68374
| 1,127
| 0.91626
| 0
| 0
| 1,050
| 0.853659
| 0
| 0
| 0
| 0
|
3624ec443278ac728598d1df9f161910bd3e69fe
| 975
|
py
|
Python
|
examples/make_sphere_graphic.py
|
itamar-dw/spherecluster
|
7c9b81d8bb6c6c2a0c569c17093bf0b4550f2768
|
[
"MIT"
] | 186
|
2018-09-14T06:51:59.000Z
|
2022-03-30T12:56:01.000Z
|
examples/make_sphere_graphic.py
|
itamar-dw/spherecluster
|
7c9b81d8bb6c6c2a0c569c17093bf0b4550f2768
|
[
"MIT"
] | 20
|
2018-10-16T15:40:08.000Z
|
2022-03-23T14:37:52.000Z
|
examples/make_sphere_graphic.py
|
itamar-dw/spherecluster
|
7c9b81d8bb6c6c2a0c569c17093bf0b4550f2768
|
[
"MIT"
] | 40
|
2018-09-13T21:05:50.000Z
|
2022-03-09T16:05:53.000Z
|
import sys
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # NOQA
import seaborn # NOQA
from spherecluster import sample_vMF
plt.ion()
n_clusters = 3
mus = np.random.randn(3, n_clusters)
mus, r = np.linalg.qr(mus, mode='reduced')
kappas = [15, 15, 15]
num_points_per_class = 250
Xs = []
for nn in range(n_clusters):
new_X = sample_vMF(mus[nn], kappas[nn], num_points_per_class)
Xs.append(new_X.T)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(
1, 1, 1, aspect='equal', projection='3d',
adjustable='box-forced', xlim=[-1.1, 1.1], ylim=[-1.1, 1.1],
zlim=[-1.1, 1.1]
)
colors = ['b', 'r', 'g']
for nn in range(n_clusters):
ax.scatter(Xs[nn][0, :], Xs[nn][1, :], Xs[nn][2, :], c=colors[nn])
ax.set_aspect('equal')
plt.axis('off')
plt.show()
def r_input(val=None):
val = val or ''
if sys.version_info[0] >= 3:
return eval(input(val))
return raw_input(val)
r_input()
| 20.744681
| 70
| 0.644103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 67
| 0.068718
|
3626cc57d851fc7ca881f30af21ead100d822372
| 1,043
|
py
|
Python
|
pointnet2/tf_ops/sampling/tf_sampling.py
|
ltriess/pointnet2_keras
|
29be56161c8c772442b85b8fda300d10ff7fe7b3
|
[
"MIT"
] | 2
|
2022-02-06T23:12:15.000Z
|
2022-03-28T06:48:52.000Z
|
pointnet2/tf_ops/sampling/tf_sampling.py
|
ltriess/pointnet2_keras
|
29be56161c8c772442b85b8fda300d10ff7fe7b3
|
[
"MIT"
] | null | null | null |
pointnet2/tf_ops/sampling/tf_sampling.py
|
ltriess/pointnet2_keras
|
29be56161c8c772442b85b8fda300d10ff7fe7b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Furthest point sampling
Original author: Haoqiang Fan
Modified by Charles R. Qi
All Rights Reserved. 2017.
Modified by Larissa Triess (2020)
"""
import os
import sys
import tensorflow as tf
from tensorflow.python.framework import ops
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sampling_module = tf.load_op_library(os.path.join(BASE_DIR, "tf_sampling_so.so"))
def farthest_point_sample(k: int, points: tf.Tensor) -> tf.Tensor:
"""Returns the indices of the k farthest points in points
Arguments:
k : int
The number of points to consider.
points : tf.Tensor(shape=(batch_size, P1, 3), dtype=tf.float32)
The points with P1 dataset points given in xyz.
Returns:
indices : tf.Tensor(shape=(batch_size, k), dtype=tf.int32)
The indices of the k farthest points in points.
"""
return sampling_module.farthest_point_sample(points, k)
ops.NoGradient("FarthestPointSample")
| 25.439024
| 81
| 0.701822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 650
| 0.623202
|
3626d45d010076e81364291684b9ea5d2493fb6c
| 561
|
py
|
Python
|
gql/resolvers/mutations/scope.py
|
apoveda25/graphql-python-server
|
eb7b911aa1116327120b857beb17da3e30523e74
|
[
"Apache-2.0"
] | 4
|
2020-06-20T11:54:04.000Z
|
2021-09-07T11:41:32.000Z
|
gql/resolvers/mutations/scope.py
|
apoveda25/graphql-python-server
|
eb7b911aa1116327120b857beb17da3e30523e74
|
[
"Apache-2.0"
] | null | null | null |
gql/resolvers/mutations/scope.py
|
apoveda25/graphql-python-server
|
eb7b911aa1116327120b857beb17da3e30523e74
|
[
"Apache-2.0"
] | null | null | null |
from ariadne import MutationType
from datetime import datetime as dt
from models.scope import Scope
from schemas.helpers.normalize import change_keys
from schemas.scope import ScopeCreate
mutations_resolvers = MutationType()
@mutations_resolvers.field("scopeCreate")
async def resolve_scope_create(_, info, scope) -> dict:
store_data = Scope.get_instance()
data = ScopeCreate(**scope, key=f'{scope["collection"]}{scope["action"]}')
normalize = change_keys(data.dict(exclude_none=True), key="_key")
return await store_data.create(normalize)
| 31.166667
| 78
| 0.773619
| 0
| 0
| 0
| 0
| 331
| 0.590018
| 289
| 0.515152
| 60
| 0.106952
|
36288867b24d81ec55fecb507750b334c645d763
| 5,188
|
py
|
Python
|
napari_subboxer/interactivity_utils.py
|
alisterburt/napari-subboxer
|
f450e72a5c1c64c527c4f999644f99f3109c36e8
|
[
"BSD-3-Clause"
] | 3
|
2021-11-01T18:18:43.000Z
|
2021-11-25T02:59:50.000Z
|
napari_subboxer/interactivity_utils.py
|
alisterburt/napari-subboxer
|
f450e72a5c1c64c527c4f999644f99f3109c36e8
|
[
"BSD-3-Clause"
] | 1
|
2021-11-24T20:59:18.000Z
|
2021-11-24T20:59:24.000Z
|
napari_subboxer/interactivity_utils.py
|
alisterburt/napari-subboxer
|
f450e72a5c1c64c527c4f999644f99f3109c36e8
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Optional
import napari
import napari.layers
import numpy as np
from napari.utils.geometry import project_point_onto_plane
def point_in_bounding_box(point: np.ndarray, bounding_box: np.ndarray) -> bool:
"""Determine whether an nD point is inside an nD bounding box.
Parameters
----------
point : np.ndarray
(n,) array containing nD point coordinates to check.
bounding_box : np.ndarray
(2, n) array containing the min and max of the nD bounding box.
As returned by `Layer._extent_data`.
"""
if np.all(point > bounding_box[0]) and np.all(point < bounding_box[1]):
return True
return False
def drag_data_to_projected_distance(
start_position, end_position, view_direction, vector
):
"""Calculate the projected distance between two mouse events.
Project the drag vector between two mouse events onto a 3D vector
specified in data coordinates.
The general strategy is to
1) find mouse drag start and end positions, project them onto a
pseudo-canvas (a plane aligned with the canvas) in data coordinates.
2) project the mouse drag vector onto the (normalised) vector in data
coordinates
Parameters
----------
start_position : np.ndarray
Starting point of the drag vector in data coordinates
end_position : np.ndarray
End point of the drag vector in data coordinates
view_direction : np.ndarray
Vector defining the plane normal of the plane onto which the drag
vector is projected.
vector : np.ndarray
(3,) unit vector or (n, 3) array thereof on which to project the drag
vector from start_event to end_event. This argument is defined in data
coordinates.
Returns
-------
projected_distance : (1, ) or (n, ) np.ndarray of float
"""
# enforce at least 2d input
vector = np.atleast_2d(vector)
# Store the start and end positions in world coordinates
start_position = np.array(start_position)
end_position = np.array(end_position)
# Project the start and end positions onto a pseudo-canvas, a plane
# parallel to the rendered canvas in data coordinates.
start_position_canvas = start_position
end_position_canvas = project_point_onto_plane(
end_position, start_position_canvas, view_direction
)
# Calculate the drag vector on the pseudo-canvas.
drag_vector_canvas = np.squeeze(
end_position_canvas - start_position_canvas
)
# Project the drag vector onto the specified vector(s), return the distance
return np.einsum('j, ij -> i', drag_vector_canvas, vector).squeeze()
def point_in_layer_bounding_box(point, layer):
bbox = layer._display_bounding_box(layer._dims_displayed).T
if np.any(point < bbox[0]) or np.any(point > bbox[1]):
return False
else:
return True
def rotation_matrices_to_align_vectors(a: np.ndarray, b: np.ndarray):
"""
Find rotation matrices r such that r @ a = b
Implementation designed to avoid trig calls, a and b must be normalised.
based on https://iquilezles.org/www/articles/noacos/noacos.htm
Parameters
----------
a : np.ndarray
(1 or n, 3) normalised vector(s) of length 3.
b : np.ndarray
(1 or n, 3) normalised vector(s) of length 3.
Returns
-------
r : np.ndarray
(3, 3) rotation matrix or (n, 3, 3) array thereof.
"""
# setup
a = a.reshape(-1, 3)
b = b.reshape(-1, 3)
n_vectors = a.shape[0]
# cross product to find axis about which rotation should occur
axis = np.cross(a, b, axis=1)
# dot product equals cosine of angle between normalised vectors
cos_angle = np.einsum('ij, ij -> i', a, b)
# k is a constant which appears as a factor in the rotation matrix
k = 1 / (1 + cos_angle)
# construct rotation matrix
r = np.empty((n_vectors, 3, 3))
r[:, 0, 0] = (axis[:, 0] * axis[:, 0] * k) + cos_angle
r[:, 0, 1] = (axis[:, 1] * axis[:, 0] * k) - axis[:, 2]
r[:, 0, 2] = (axis[:, 2] * axis[:, 0] * k) + axis[:, 1]
r[:, 1, 0] = (axis[:, 0] * axis[:, 1] * k) + axis[:, 2]
r[:, 1, 1] = (axis[:, 1] * axis[:, 1] * k) + cos_angle
r[:, 1, 2] = (axis[:, 2] * axis[:, 1] * k) - axis[:, 0]
r[:, 2, 0] = (axis[:, 0] * axis[:, 2] * k) - axis[:, 1]
r[:, 2, 1] = (axis[:, 1] * axis[:, 2] * k) + axis[:, 0]
r[:, 2, 2] = (axis[:, 2] * axis[:, 2] * k) + cos_angle
return r.squeeze()
def rotation_matrix_from_z_vector(z_vector: np.ndarray):
return rotation_matrices_to_align_vectors(np.array([0, 0, 1]), z_vector)
def theta2rotz(theta: np.ndarray) -> np.ndarray:
"""
Rz = [[c(t), -s(t), 0],
[s(t), c(t), 0],
[ 0, 0, 1]]
"""
theta = np.deg2rad(np.asarray(theta).reshape(-1))
rotation_matrices = np.zeros((theta.shape[0], 3, 3), dtype=float)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
rotation_matrices[:, 2, 2] = 1
rotation_matrices[:, (0, 1), (0, 1)] = cos_theta[:, np.newaxis]
rotation_matrices[:, 0, 1] = -sin_theta
rotation_matrices[:, 1, 0] = sin_theta
return rotation_matrices.squeeze()
| 35.292517
| 79
| 0.632999
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,563
| 0.494025
|
3628f30f1da84eb0aeefd00f476c1a8932e5c523
| 1,245
|
py
|
Python
|
src/attribute_generator.py
|
neutron101/cs231A-project
|
a147a3cc7de66c852dfc6b8cb9c65780c9d55d07
|
[
"MIT"
] | null | null | null |
src/attribute_generator.py
|
neutron101/cs231A-project
|
a147a3cc7de66c852dfc6b8cb9c65780c9d55d07
|
[
"MIT"
] | null | null | null |
src/attribute_generator.py
|
neutron101/cs231A-project
|
a147a3cc7de66c852dfc6b8cb9c65780c9d55d07
|
[
"MIT"
] | null | null | null |
import numpy as np
class AttributeGenerator:
def __init__(self, RTs, Ks, Ps):
self._RTs = RTs
self._Ks = Ks
self._Ps = Ps
def generate(self):
self._update_intrinsics()
self._update_translation_and_rotation()
self._updateProjection()
def use_default_translation_and_rotation(self):
self.rotation_translation = self._RTs
def _updateProjection(self):
self.projection = []
for i in range(len(self.rotation_translation)):
kr = self.intrinsics[i].dot(self.rotation_translation[i][0:3,0:3].transpose())
kt = np.reshape(self.intrinsics[i].dot(-1*self.rotation_translation[i][0:3,0:3].transpose().dot(self.rotation_translation[i][0:3,3])), [3,1])
new_projection = np.hstack((kr, kt))
self.projection.append(new_projection)
def _update_translation_and_rotation(self):
self.rotation_translation = []
base_rot = self._RTs[0][0:3,0:3]
base_trans = self._RTs[0][0:3,3]
for i in range(0, len(self._RTs)):
current = np.linalg.inv(base_rot).dot(self._RTs[i][0:3,0:3])
t = np.linalg.inv(current).dot(self._RTs[i][0:3,3] - base_trans)
rt = np.hstack((current, np.reshape(t, [3,1])))
self.rotation_translation.append(rt)
def _update_intrinsics(self):
self.intrinsics = self._Ks
| 24.411765
| 144
| 0.702811
| 1,218
| 0.978313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
362a49ef92737d73a5b3be88d93c98a6d215ec47
| 6,265
|
py
|
Python
|
theDarkArtsClass.py
|
biechuyangwang/UniversalAutomaticAnswer
|
4c558396cc04b36224e9be4409f80f9654c4aa88
|
[
"Apache-2.0"
] | 2
|
2021-12-11T19:11:59.000Z
|
2021-12-24T19:32:12.000Z
|
theDarkArtsClass.py
|
biechuyangwang/UniversalAutomaticAnswer
|
4c558396cc04b36224e9be4409f80f9654c4aa88
|
[
"Apache-2.0"
] | null | null | null |
theDarkArtsClass.py
|
biechuyangwang/UniversalAutomaticAnswer
|
4c558396cc04b36224e9be4409f80f9654c4aa88
|
[
"Apache-2.0"
] | null | null | null |
# 分析黑魔法防御课界面
import cv2
import sys
sys.path.append(r"C:\\Users\\SAT") # 添加自定义包的路径
from UniversalAutomaticAnswer.conf.confImp import get_yaml_file
from UniversalAutomaticAnswer.screen.screenImp import ScreenImp # 加入自定义包
from UniversalAutomaticAnswer.ocr.ocrImp import OCRImp
from UniversalAutomaticAnswer.util.filter import filterQuestion, filterLine, filterPersonState
from paddleocr import PaddleOCR
# 获取配置文件
conf_path = 'conf/conf.yml'
conf_data = get_yaml_file(conf_path)
# 初始化ocr模型
ocr = OCRImp(conf_data)
# 初始化屏幕操作模块
screen = ScreenImp(conf_data)
# left click
import win32api
import win32con
def left_click(x,y,times=4):
win32api.SetCursorPos((x,y))
import time
while times:
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
times -= 1
walk_coordinate = [[330,640],[1260,630],[740,550]] # 左 右 中
card_coordinate = [[522,820],[695,798],[838,821],[987,818],[1185,830]] # ~ 1 2 3 4
# charms_coordinate = [[200,770,300,855],[630,700,676,777],[765,690,818,778],[910,700,960,775],[1060,700,1108,786],[556, 878,637, 922]] # states: steps 1 2 3 4 HP
# copy_coordinate = [[540,400,650,500],[980,345,1090,445],[1160,320,1260,420]]
win_rect, img= screen.get_screenshot()
# img_path = './img/harry_charmsclass.png'
# img = cv2.imread(img_path)
# img_steps = img[770:855,200:300]
# img1 = img[700:800,600:700]
# img2 = img[690:778,765:818] # 点击 850 716
# img3 = img[700:775,910:960]
# img4 = img[700:786,1060:1108]
# img5 = img[878:932,556:637] # 蓝条
# walk_coordinate = [[850,716],[846,712],[854,720]]
# card_coordinate = [[522,820],[695,798],[838,821],[987,818],[1122,830]] # ~ 1 2 3 4
import matplotlib.pyplot as plt
# result = ocr.ocr(img, det=True, cls=True)
# print(result)
# plt.imshow(img)
# plt.show()
# """
def is_start(img, str_start):
img_start = screen.get_startMatchBtn(img)
result_start = ocr.ocr(img_start)
content_start = ocr.ocr_content(result_start)
content_start = filterLine(content_start)
if len(content_start)>0 and content_start[0] == str_start:
time.sleep(5)
x, y = 1300, 840
left_click(win_rect[0]+x,win_rect[1]+y,2)
return True
return False
count_steps = 0
epoch_num = 3
while True:
if epoch_num == 0:
break
import time
time.sleep(2)
win_rect, img= screen.get_screenshot()
# img_path = './img/harry_darkclass3.png' #
# img = cv2.imread(img_path)
# print(img.shape)
# img = img[875:920,1185:1300] # [1185, 875, 1300, 920] 点击继续
# img = img[830:880, 1234:1414] # [1234,830,1414,880] 匹配上课
# 识别匹配上课
flag1 = is_start(img, '匹配上课')
flag2 = is_start(img, '学院活动匹配')
if flag1 or flag2: # 识别到了就跳过,重新截图
epoch_num -= 1
continue
# 识别继续按钮
img_continue = img[875:920,1185:1300]
result_continue = ocr.ocr(img_continue)
content_continue = ocr.ocr_content(result_continue)
content_continue = filterLine(content_continue)
if len(content_continue)>0 and content_continue[0] == '点击继续':
x, y = 1200, 890
left_click(win_rect[0]+x,win_rect[1]+y,2)
time.sleep(1)
continue
img_steps, img_1, img_2, img_3, img_4, img_5 = '-1', '15', '15', '15', '15', '11'
img_steps = img[800:850, 200:265]
img_1 = img[710:777, 615:665] # 1
img_2 = img[710:777, 770:820] # 2
img_3 = img[710:777, 920:970] # 3
img_4 = img[720:787, 1060:1110] # 4
img_nextcard = img[768:816, 1205:1246,::-1] # 下一张卡
img_5 = img[878:932,556:637] # 蓝条
result_steps = ocr.ocr(img_steps)
result_1 = ocr.ocr(img_1)
result_2 = ocr.ocr(img_2)
result_3 = ocr.ocr(img_3)
result_4 = ocr.ocr(img_4)
result_nextcard = ocr.ocr(img_nextcard)
result_5 = ocr.ocr(img_5)
result_steps = ocr.ocr_content(result_steps)
result_steps = filterLine(result_steps)
result_1 = ocr.ocr_content(result_1)
result_1 = filterLine(result_1)
result_2 = ocr.ocr_content(result_2)
result_2 = filterLine(result_2)
result_3 = ocr.ocr_content(result_3)
result_3 = filterLine(result_3)
result_4 = ocr.ocr_content(result_4)
result_4 = filterLine(result_4)
result_5 = ocr.ocr_content(result_5)
result_5 = filterLine(result_5)
if (result_steps!=None) and len(result_steps) > 0 and result_steps[0].isdigit():
result_steps = int(result_steps[0][0][0])
else:
result_steps = 0
if (result_1!=None) and len(result_1) > 0 and result_1[0].isdigit():
result_1 = int(result_1[0][0][0])
else:
result_1 = 15
if (result_2!=None) and len(result_2) > 0 and result_2[0].isdigit():
result_2 = int(result_2[0][0][0])
else:
result_2 = 15
if (result_3!=None) and len(result_3) > 0 and result_3[0].isdigit():
result_3 = int(result_3[0][0][0])
else:
result_3 = 15
if (result_4!=None) and len(result_4) > 0 and result_4[0].isdigit():
result_4 = int(result_4[0][0][0])
else:
result_4 = 15
if (result_5!=None) and len(result_5) > 0 and result_5[0].isdigit():
result_5 = int(result_5[0][0][0])
else:
result_5 = -1
fee = [result_1,result_2,result_3,result_4]
idx = fee.index(min(fee))
import random
# idx = random.randint(0, 3)
# if fee[idx]>7:
# continue
walk_idx = random.randint(0, 2)
x_walk, y_walk = walk_coordinate[walk_idx][0], walk_coordinate[walk_idx][1]
x_0, y_0 = card_coordinate[0][0], card_coordinate[0][1] # 伙伴卡
x, y = card_coordinate[idx+1][0], card_coordinate[idx+1][1]
if result_5 == -1 or result_5 > 5:
if count_steps % 3 == 0:
left_click(win_rect[0]+x_walk,win_rect[1]+y_walk,4) # 走一步
left_click(win_rect[0]+x_0,win_rect[1]+y_0,4) # 点击伙伴卡
count_steps += 1
left_click(win_rect[0]+x,win_rect[1]+y,4) # 点击目标卡
print('所剩步数:',result_steps)
print('卡1费用:',result_1)
print('卡2费用:',result_2)
print('卡3费用:',result_3)
print('卡4费用:',result_4)
print('剩余费用:',result_5)
print('点击位置:', x, y)
# """
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# import matplotlib.pyplot as plt
# plt.imshow(img)
# plt.show()
# cv2.imwrite('./img/harry_charmsclass.png',img)
| 34.423077
| 162
| 0.65012
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,755
| 0.267245
|
362bc4e36845077cd1de93becd4b863b9767b65f
| 521
|
py
|
Python
|
lt_104.py
|
fdzhonglin/trees
|
9a13412a5c424560722abf2caac797540fa508e4
|
[
"MIT"
] | null | null | null |
lt_104.py
|
fdzhonglin/trees
|
9a13412a5c424560722abf2caac797540fa508e4
|
[
"MIT"
] | null | null | null |
lt_104.py
|
fdzhonglin/trees
|
9a13412a5c424560722abf2caac797540fa508e4
|
[
"MIT"
] | null | null | null |
# standard traversal problem
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
# leaf condition
if root == None:
return 0
# skeleton, since function has return, need to assign variables for following usage
left_depth = self.maxDepth(root.left)
right_depth = self.maxDepth(root.right)
# this is according to the definition
return max(left_depth, right_depth) + 1
| 30.647059
| 91
| 0.589251
| 492
| 0.944338
| 0
| 0
| 0
| 0
| 0
| 0
| 228
| 0.43762
|
362d167af1df22dfcc0fab4281874b494b14c018
| 826
|
py
|
Python
|
src/anim.py
|
JovialKnoll/monsters
|
15d969d0220fd003c2c28ae690f66633da370682
|
[
"MIT"
] | 2
|
2017-05-14T06:37:14.000Z
|
2022-03-07T02:25:32.000Z
|
src/anim.py
|
JovialKnoll/monsters
|
15d969d0220fd003c2c28ae690f66633da370682
|
[
"MIT"
] | 2
|
2017-10-08T19:41:18.000Z
|
2021-04-08T04:40:50.000Z
|
src/anim.py
|
JovialKnoll/monsters
|
15d969d0220fd003c2c28ae690f66633da370682
|
[
"MIT"
] | null | null | null |
import pygame.mixer
from vec2d import Vec2d
from saveable import Saveable
class Anim(Saveable):
__slots__ = (
'func',
'time',
'pos',
'sound',
'positional_sound',
)
def __init__(self, func: str, time: int, x_or_pair, y=None,
sound: pygame.mixer.Sound = None, positional_sound: bool = False):
self.func = func
self.time = time
self.pos = Vec2d(x_or_pair, y)
self.sound = sound
self.positional_sound = positional_sound
def save(self):
# no sound right now, sorry
# if we need it, either start passing sounds as paths
# or don't save when there are pending Anims
return self.func, self.time, self.pos
@classmethod
def load(cls, save_data):
return cls(*save_data)
| 25.030303
| 83
| 0.596852
| 748
| 0.905569
| 0
| 0
| 73
| 0.088378
| 0
| 0
| 166
| 0.200969
|
362e01958a44c444693e75555e77973e632954c9
| 5,926
|
py
|
Python
|
nevermined_compute_api/workflow_utils.py
|
nevermined-io/compute-api
|
c0d3b1875b3b95ffa78374ff89a4fefd0d3af598
|
[
"Apache-2.0"
] | null | null | null |
nevermined_compute_api/workflow_utils.py
|
nevermined-io/compute-api
|
c0d3b1875b3b95ffa78374ff89a4fefd0d3af598
|
[
"Apache-2.0"
] | 3
|
2020-11-20T11:57:04.000Z
|
2021-04-06T10:56:49.000Z
|
nevermined_compute_api/workflow_utils.py
|
nevermined-io/compute-api
|
c0d3b1875b3b95ffa78374ff89a4fefd0d3af598
|
[
"Apache-2.0"
] | null | null | null |
import os
from pathlib import Path
import json
from contracts_lib_py.utils import get_account
from common_utils_py.ddo.ddo import DDO
from nevermined_sdk_py import Nevermined, Config
import yaml
from configparser import ConfigParser
config_parser = ConfigParser()
configuration = config_parser.read('config.ini')
GROUP = config_parser.get('resources', 'group') # str | The custom resource's group name
VERSION = config_parser.get('resources', 'version') # str | The custom resource's version
NAMESPACE = config_parser.get('resources', 'namespace') # str | The custom resource's namespace
KEYFILE = json.loads(Path(os.getenv("PROVIDER_KEYFILE")).read_text())
def create_execution(service_agreement_id, workflow):
"""Creates the argo workflow template
Args:
service_agreement_id (str): The id of the service agreement being executed
workflow (dict): The workflow submitted to the compute api
Returns:
dict: The workflow template filled by the compute api with all the parameters
"""
ddo = DDO(dictionary=workflow)
workflow_template = get_workflow_template()
workflow_template['apiVersion'] = GROUP + '/' + VERSION
workflow_template['metadata']['namespace'] = NAMESPACE
workflow_template['spec']['arguments']['parameters'] += create_arguments(ddo)
workflow_template["spec"]["workflowMetadata"]["labels"][
"serviceAgreementId"] = service_agreement_id
if ddo.metadata["main"]["type"] == "fl-coordinator":
workflow_template["spec"]["entrypoint"] = "coordinator-workflow"
else:
workflow_template["spec"]["entrypoint"] = "compute-workflow"
return workflow_template
def create_arguments(ddo):
"""Create the arguments that need to be add to the argo template.
Args:
ddo (:py:class:`common_utils_py.ddo.ddo.DDO`): The workflow DDO.
Returns:
list: The list of arguments to be appended to the argo workflow
"""
args = ''
image = ''
tag = ''
if ddo.metadata["main"]["type"] != "fl-coordinator":
workflow = ddo.metadata["main"]["workflow"]
options = {
"resources": {
"metadata.url": "http://172.17.0.1:5000",
},
"keeper-contracts": {
"keeper.url": "http://172.17.0.1:8545"
}
}
config = Config(options_dict=options)
nevermined = Nevermined(config)
# TODO: Currently this only supports one stage
transformation_did = workflow["stages"][0]["transformation"]["id"]
transformation_ddo = nevermined.assets.resolve(transformation_did)
transformation_metadata = transformation_ddo.get_service("metadata")
# get args and container
args = transformation_metadata.main["algorithm"]["entrypoint"]
image = transformation_metadata.main["algorithm"]["requirements"]["container"]["image"]
tag = transformation_metadata.main["algorithm"]["requirements"]["container"]["tag"]
arguments = [
{
"name": "credentials",
# remove white spaces
"value": json.dumps(KEYFILE, separators=(",", ":"))
},
{
"name": "password",
"value": os.getenv("PROVIDER_PASSWORD")
},
{
"name": "metadata_url",
"value": "http://172.17.0.1:5000"
},
{
"name": "gateway_url",
"value": "http://172.17.0.1:8030"
},
{
"name": "node",
"value": "http://172.17.0.1:8545"
},
{
"name": "secret_store_url",
"value": "http://172.17.0.1:12001"
},
{
"name": "workflow",
"value": f"did:nv:{ddo.asset_id[2:]}"
},
{
"name": "verbose",
"value": "false"
},
{
"name": "transformation_container_image",
"value": f"{image}:{tag}"
},
{
"name": "transformation_arguments",
"value": args
}
]
return arguments
def setup_keeper():
init_account_envvars()
account = get_account(0)
if account is None:
raise AssertionError(f'Nevermined Gateway cannot run without a valid '
f'ethereum account. Account address was not found in the environment'
f'variable `PROVIDER_ADDRESS`. Please set the following evnironment '
f'variables and try again: `PROVIDER_ADDRESS`, `PROVIDER_PASSWORD`, '
f', `PROVIDER_KEYFILE`, `RSA_KEYFILE` and `RSA_PASSWORD`.')
if not account.key_file and not (account.password and account.key_file):
raise AssertionError(f'Nevermined Gateway cannot run without a valid '
f'ethereum account with either a password and '
f'keyfile/encrypted-key-string '
f'or private key. Current account has password {account.password}, '
f'keyfile {account.key_file}, encrypted-key {account._encrypted_key} '
f'and private-key {account._private_key}.')
def init_account_envvars():
os.environ['PARITY_ADDRESS'] = os.getenv('PROVIDER_ADDRESS', '')
os.environ['PARITY_PASSWORD'] = os.getenv('PROVIDER_PASSWORD', '')
os.environ['PARITY_KEYFILE'] = os.getenv('PROVIDER_KEYFILE', '')
os.environ['PSK-RSA_PRIVKEY_FILE'] = os.getenv('RSA_PRIVKEY_FILE', '')
os.environ['PSK-RSA_PUBKEY_FILE'] = os.getenv('RSA_PUBKEY_FILE', '')
def get_workflow_template():
"""Returns a pre configured argo workflow template.
Returns:
dict: argo workflow template
"""
path = Path(__file__).parent / "argo-workflow.yaml"
with path.open() as f:
workflow_template = yaml.safe_load(f)
return workflow_template
| 35.065089
| 99
| 0.602599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,766
| 0.466757
|
362f493d8462bb8006f529fc1fed6929dd628362
| 1,206
|
py
|
Python
|
providers/scoop_mock_provider.py
|
prezesp/scoop-viewer
|
115f413979ba2e4e766e334f0240082a9343e314
|
[
"MIT"
] | 86
|
2018-07-17T14:21:05.000Z
|
2022-03-29T03:00:40.000Z
|
providers/scoop_mock_provider.py
|
prezesp/scoop-viewer
|
115f413979ba2e4e766e334f0240082a9343e314
|
[
"MIT"
] | 16
|
2018-04-24T22:45:24.000Z
|
2021-12-15T08:37:38.000Z
|
providers/scoop_mock_provider.py
|
prezesp/scoop-viewer
|
115f413979ba2e4e766e334f0240082a9343e314
|
[
"MIT"
] | 5
|
2018-03-28T18:24:52.000Z
|
2022-01-08T11:28:31.000Z
|
""" Module to interact with scoop. """
from subprocess import Popen, PIPE # nosec
import os
class ScoopMockProvider:
""" Module to interact with scoop. """
def __init__(self):
self.version = 'unknown'
def get_version(self):
pass
def __run_scoop(self, args, universal_newlines=False):
workdir = os.path.dirname(os.path.realpath(__file__))
return Popen(['python', os.path.join(workdir, 'mock', 'scoop.py')] + args,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=universal_newlines) # nosec
def get_installed(self): # pylint: disable=R0201
""" Get all installed app from scoop. """
stdout, _ = self.__run_scoop(['list']).communicate()
stdout = stdout.decode("utf-8")
return [a.strip().split(' ')[0] for a in stdout.split('\n')]
def install(self, app, file_size_wrapper): # pylint: disable=R0201
""" Install app through scoop. """
_, _ = self.__run_scoop(['install', app]).communicate()
def uninstall(self, app): # pylint: disable=R0201
""" Uninstal app. """
_, _ = self.__run_scoop(['uninstall', app]).communicate()
| 32.594595
| 82
| 0.609453
| 1,112
| 0.922056
| 0
| 0
| 0
| 0
| 0
| 0
| 328
| 0.271973
|
362ff49962e9b464199213d8822138a4aa8efdf5
| 515
|
py
|
Python
|
services/movies_streaming_converter/src/models/convertation.py
|
fuodorov/yacinema
|
43ad869575fbaab7c7056229538638666aa87110
|
[
"MIT"
] | null | null | null |
services/movies_streaming_converter/src/models/convertation.py
|
fuodorov/yacinema
|
43ad869575fbaab7c7056229538638666aa87110
|
[
"MIT"
] | null | null | null |
services/movies_streaming_converter/src/models/convertation.py
|
fuodorov/yacinema
|
43ad869575fbaab7c7056229538638666aa87110
|
[
"MIT"
] | 1
|
2021-09-30T09:49:40.000Z
|
2021-09-30T09:49:40.000Z
|
import datetime
import uuid
from typing import Optional
from models.base import CustomBaseModel
class ConvertVideoIn(CustomBaseModel):
source_path: str
destination_path: str
resolution: str
codec_name: Optional[str] = None
display_aspect_ratio: Optional[str] = None
fps: Optional[int] = None
class ConvertVideoCreate(ConvertVideoIn):
id: uuid.UUID = uuid.uuid4()
created_at: datetime.datetime = datetime.datetime.now()
class ConvertVideoOut(CustomBaseModel):
result: bool
| 21.458333
| 59
| 0.751456
| 409
| 0.794175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
36307c13abd4a232603e88d4d656fa8c1d5c6d39
| 3,965
|
py
|
Python
|
flasc/circular_statistics.py
|
NREL/flasc
|
ac734892efc1bc7684e2393ffa1ce7a97a54efa1
|
[
"Apache-2.0"
] | 3
|
2022-01-23T19:33:32.000Z
|
2022-03-14T10:29:36.000Z
|
flasc/circular_statistics.py
|
NREL/flasc
|
ac734892efc1bc7684e2393ffa1ce7a97a54efa1
|
[
"Apache-2.0"
] | 2
|
2022-03-02T20:45:30.000Z
|
2022-03-22T18:49:24.000Z
|
flasc/circular_statistics.py
|
NREL/flasc
|
ac734892efc1bc7684e2393ffa1ce7a97a54efa1
|
[
"Apache-2.0"
] | 4
|
2022-02-17T18:40:36.000Z
|
2022-03-24T05:44:31.000Z
|
# Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy as np
from floris.utilities import wrap_360
def calc_wd_mean_radial(angles_array_deg, axis=0):
# Use unit vectors to calculate the mean
wd_x = np.cos(angles_array_deg * np.pi / 180.)
wd_y = np.sin(angles_array_deg * np.pi / 180.)
mean_wd = np.arctan2(np.sum(wd_y, axis=axis),
np.sum(wd_x, axis=axis))
mean_wd = wrap_360(mean_wd * 180. / np.pi)
return mean_wd
# def calc_wd_mean_radial_list(angles_array_list):
# if isinstance(angles_array_list, (pd.DataFrame, pd.Series)):
# array = np.array(angles_array_list)
# elif isinstance(angles_array_list, list):
# array = np.vstack(angles_array_list).T
# else:
# array = np.array(angles_array_list)
# # Use unit vectors to calculate the mean
# dir_x = np.cos(array * np.pi / 180.).sum(axis=1)
# dir_y = np.sin(array * np.pi / 180.).sum(axis=1)
# mean_dirs = np.arctan2(dir_y, dir_x)
# mean_out = wrap_360(mean_dirs * 180. / np.pi)
# return mean_out
def calculate_wd_statistics(angles_array_deg, axis=0,
calc_median_min_max_std=True):
"""Determine statistical properties of an array of wind directions.
This includes the mean of the array, the median, the standard deviation,
the minimum value and the maximum value.
Args:
angles_array_deg ([float/int]): Array of angles in degrees
Returns:
mean_wd (float): Mean wind direction in [0, 360] deg
median_wd (float): Median wind direction in [0, 360] deg
std_wd (float): Standard deviation in deg
min_wd (float): Minimum wind direction in [0, 360] deg
max_wd (float): Maximum wind direction in [0, 360] deg
"""
# Preprocessing
angles_array_deg = np.array(angles_array_deg, dtype=float)
angles_array_deg = wrap_360(angles_array_deg)
# Check for unique cases
if angles_array_deg.shape[0] <= 0:
if calc_median_min_max_std:
return np.nan, np.nan, np.nan, np.nan, np.nan
else:
return np.nan
if np.unique(angles_array_deg).shape[0] == 1:
mean_wd = angles_array_deg[0]
if not calc_median_min_max_std:
return mean_wd
median_wd = angles_array_deg[0]
std_wd = 0.0
min_wd = angles_array_deg[0]
max_wd = angles_array_deg[0]
return mean_wd, median_wd, std_wd, min_wd, max_wd
# Calculate the mean
mean_wd = calc_wd_mean_radial(angles_array_deg, axis=axis)
# Return if we dont need to calculate statistical properties
if not calc_median_min_max_std:
return mean_wd
# Upsample mean_wd for next calculations
new_shape = list(mean_wd.shape)
new_shape.insert(axis, 1) # Add dimension at axis
new_shape = tuple(new_shape)
mean_wd_full = mean_wd.reshape(new_shape).repeat(
angles_array_deg.shape[axis], axis=axis)
# Copy angles_array_deg and wrap values around its mean value
angles_wrp = angles_array_deg
angles_wrp[angles_wrp > (mean_wd_full + 180.)] += -360.
angles_wrp[angles_wrp < (mean_wd_full - 180.)] += 360.
median_wd = wrap_360(np.nanmedian(angles_wrp, axis=axis))
std_wd = np.nanstd(angles_wrp, axis=axis)
min_wd = wrap_360(np.nanmin(angles_wrp, axis=axis))
max_wd = wrap_360(np.nanmax(angles_wrp, axis=axis))
return mean_wd, median_wd, std_wd, min_wd, max_wd
| 35.088496
| 79
| 0.682219
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,001
| 0.504666
|
36323555756558519c34b677df24af6e2865a756
| 2,797
|
py
|
Python
|
src/cltl/backend/source/pyaudio_source.py
|
leolani/cltl-backend
|
4ecc6227f9d48e40b9f59e6d78e0fcee9cdadbd4
|
[
"MIT"
] | null | null | null |
src/cltl/backend/source/pyaudio_source.py
|
leolani/cltl-backend
|
4ecc6227f9d48e40b9f59e6d78e0fcee9cdadbd4
|
[
"MIT"
] | null | null | null |
src/cltl/backend/source/pyaudio_source.py
|
leolani/cltl-backend
|
4ecc6227f9d48e40b9f59e6d78e0fcee9cdadbd4
|
[
"MIT"
] | null | null | null |
import logging
import uuid
from typing import Iterable
import numpy as np
import pyaudio
from cltl.backend.api.util import raw_frames_to_np
from cltl.backend.spi.audio import AudioSource
logger = logging.getLogger(__name__)
class PyAudioSource(AudioSource):
BUFFER = 8
def __init__(self, rate, channels, frame_size):
self.id = str(uuid.uuid4())[:6]
self._rate = rate
self._channels = channels
self._frame_size = frame_size
self._pyaudio = pyaudio.PyAudio()
self._active = False
self._start_time = None
self._time = None
@property
def audio(self) -> Iterable[np.array]:
return raw_frames_to_np(self, self.frame_size, self.channels, self.depth)
@property
def rate(self) -> int:
return self._rate
@property
def channels(self) -> int:
return self._channels
@property
def frame_size(self) -> int:
return self._frame_size
@property
def depth(self) -> int:
return 2
@property
def active(self):
return self._active
@property
def time(self):
return self._mic_time - self._start_time
@property
def _mic_time(self):
return self._time
@_mic_time.setter
def _mic_time(self, stream_time):
advanced = stream_time - self._time
if advanced > self._stream.get_input_latency():
logger.exception("Latency exceeded buffer (%.4fsec) - dropped frames: %.4fsec",
self._stream.get_input_latency(), advanced)
self._time = stream_time
def stop(self):
self._active = False
logger.debug("Stopped microphone (%s)", self.id)
def __enter__(self):
self._stream = self._pyaudio.open(self._rate, self._channels, pyaudio.paInt16, input=True,
frames_per_buffer=self.BUFFER * self._frame_size)
self._active = True
self._start_time = self._stream.get_time()
self._time = self._start_time
logger.debug("Opened microphone (%s) with rate: %s, channels: %s, frame_size: %s",
self.id, self._rate, self._channels, self._frame_size)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._active:
self._active = False
self._stream.close()
logger.debug("Closed microphone (%s)", self.id)
else:
logger.warning("Ignored close microphone (%s)", self.id)
def __iter__(self):
return self
def __next__(self):
if not self._active:
raise StopIteration()
data = self._stream.read(self._frame_size, exception_on_overflow=False)
self._mic_time = self._stream.get_time()
return data
| 27.15534
| 98
| 0.620665
| 2,568
| 0.918127
| 0
| 0
| 944
| 0.337504
| 0
| 0
| 209
| 0.074723
|
363237db275189c9b2a840bb149422bab3cd8c25
| 21,097
|
py
|
Python
|
toolkit4nlp/optimizers.py
|
xv44586/toolkit4nlp
|
0ca8c45efe4ad4c6dc20b47016a13326aadcd0bd
|
[
"Apache-2.0"
] | 94
|
2020-07-16T03:07:59.000Z
|
2022-03-13T08:06:30.000Z
|
toolkit4nlp/optimizers.py
|
xv44586/toolkit4nlp
|
0ca8c45efe4ad4c6dc20b47016a13326aadcd0bd
|
[
"Apache-2.0"
] | 14
|
2020-11-24T04:26:26.000Z
|
2021-09-13T02:44:51.000Z
|
toolkit4nlp/optimizers.py
|
xv44586/toolkit4nlp
|
0ca8c45efe4ad4c6dc20b47016a13326aadcd0bd
|
[
"Apache-2.0"
] | 17
|
2020-09-04T07:24:24.000Z
|
2021-11-19T06:35:18.000Z
|
# -*- coding: utf-8 -*-
# @Date : 2020/7/6
# @Author : mingming.xu
# @Email : xv44586@gmail.com
# @File : optimizers.py
import re
import numpy as np
import tensorflow as tf
from keras.optimizers import *
from toolkit4nlp.backend import keras, K, is_tf_keras, piecewise_linear
from toolkit4nlp.utils import *
class Adam(keras.optimizers.Optimizer):
'''
w_t = w_t-1 - update_t
update_t = lr * m_t / sqrt(v_t)
m_t = beta_1 * m_t-1 + (1 - beta_1) * g_t
v_t = beta_2 * v_t-1 + (1 - beta_2) * g_t**2
由于更新前期梯度较小,容易朝着0方向走,所以通常加一个bias correct来校正方向
m_t_hat = m_t / (1 + beta_1**t)
v_t_hat = v_t / (1 + beta_2 ** t)
ref:
- [zhihu-zhuanlan](
https://zhuanlan.zhihu.com/p/32230623)
- [Adam - A Method for Stochastic Optimization](
https://arxiv.org/abs/1412.6980v8)
- [On the Convergence of Adam and Beyond](
https://openreview.net/forum?id=ryQu7f-RZ)
'''
def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.99, epsilon=1e-6, bias_correct=True, **kwargs):
kwargs['name'] = kwargs.get('name', 'Adam')
super(Adam, self).__init__(**kwargs)
self._set_hyper('learning_rate', learning_rate)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or K.epsilon()
self.bias_correct = bias_correct
def _create_slots(self, var_list):
for var in var_list:
self.add_slot(var, 'm')
self.add_slot(var, 'v')
def _resource_apply(self, grad, var, indices=None):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta_1_t = self._get_hyper('beta_1', var_dtype)
beta_2_t = self._get_hyper('beta_2', var_dtype)
local_step = K.cast(self.iterations + 1, var_dtype)
beta_1_power = K.pow(beta_1_t, local_step)
beta_2_power = K.pow(beta_2_t, local_step)
# update
if indices is None:
m_t = K.update(m, beta_1_t * m + (1 - beta_1_t) * grad)
v_t = K.update(v, beta_2_t * v + (1 - beta_2_t) * grad ** 2)
else:
mv_ops = [K.update(m, beta_1_t * m), K.update(v, beta_2_t * v)]
with tf.control_dependencies(mv_ops):
m_t = self._resource_scatter_add(m, indices, (1 - beta_1_t) * grad)
v_t = self._resource_scatter_add(v, indices, (1 - beta_2_t) * grad ** 2)
#
with tf.control_dependencies([m_t, v_t]):
if self.bias_correct:
m_t = m_t / (1 + beta_1_power)
v_t = v_t / (1 + beta_2_power)
var_t = var - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
return K.update(var, var_t)
def _resource_apply_dense(self, grad, var):
return self._resource_apply(grad, var)
def _resource_apply_sparse(self, grad, var, indices):
return self._resource_apply(grad, var, indices)
def get_config(self):
config = {
'learnint_rate': self._serialize_hyperparameter('learning_rate'),
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon
}
basic_config = super(Adam, self).get_config()
return dict(list(basic_config.items()) + list(config.items()))
class AdaBelief(keras.optimizers.Optimizer):
"""AdaBelief optimizer.
Default parameters follow those provided in the original paper.
# Arguments
learning_rate: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
amsgrad: boolean. Whether to apply the AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and
Beyond".
# References
- [Adam - A Method for Stochastic Optimization](
https://arxiv.org/abs/1412.6980v8)
- [AdaBelief Optimizer: Adapting Stepsizes by the Belief in Observed Gradients](
https://arxiv.org/pdf/2010.07468.pdf)
"""
def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999,
amsgrad=False, **kwargs):
self.initial_decay = kwargs.pop('decay', 0.0)
self.epsilon = kwargs.pop('epsilon', K.epsilon())
learning_rate = kwargs.pop('lr', learning_rate)
super(AdaBelief, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.learning_rate = K.variable(learning_rate, name='learning_rate')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(self.initial_decay, name='decay')
self.amsgrad = amsgrad
@K.symbolic
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.learning_rate
if self.initial_decay > 0:
lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
(1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p),
dtype=K.dtype(p),
name='m_' + str(i))
for (i, p) in enumerate(params)]
vs = [K.zeros(K.int_shape(p),
dtype=K.dtype(p),
name='v_' + str(i))
for (i, p) in enumerate(params)]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p),
dtype=K.dtype(p),
name='vhat_' + str(i))
for (i, p) in enumerate(params)]
else:
vhats = [K.zeros(1, name='vhat_' + str(i))
for i in range(len(params))]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g - m_t)
if self.amsgrad:
vhat_t = K.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
self.updates.append(K.update(vhat, vhat_t))
else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'learning_rate': float(K.get_value(self.learning_rate)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad}
base_config = super(AdaBelief, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AdaBeliefTf(keras.optimizers.Optimizer):
"""tf.keras 版
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
bias_correct=True,
**kwargs
):
kwargs['name'] = kwargs.get('name') or 'AdaBelief'
super(AdaBeliefTf, self).__init__(**kwargs)
self._set_hyper('learning_rate', learning_rate)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or K.epislon()
self.bias_correct = bias_correct
def _create_slots(self, var_list):
for var in var_list:
self.add_slot(var, 'm')
self.add_slot(var, 'v')
def _resource_apply(self, grad, var, indices=None):
# 准备变量
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta_1_t = self._get_hyper('beta_1', var_dtype)
beta_2_t = self._get_hyper('beta_2', var_dtype)
epsilon_t = K.cast(self.epsilon, var_dtype)
local_step = K.cast(self.iterations + 1, var_dtype)
beta_1_t_power = K.pow(beta_1_t, local_step)
beta_2_t_power = K.pow(beta_2_t, local_step)
# 更新公式
m_t = K.update(m, beta_1_t * m + (1 - beta_1_t) * grad)
v_t = K.update(v, beta_2_t * v + (1 - beta_2_t) * (grad - m_t) ** 2)
# 返回算子
with tf.control_dependencies([m_t, v_t]):
if self.bias_correct:
m_t = m_t / (1.0 - beta_1_t_power)
v_t = v_t / (1.0 - beta_2_t_power)
var_t = var - lr_t * m_t / (K.sqrt(v_t) + epsilon_t)
return K.update(var, var_t)
def _resource_apply_dense(self, grad, var):
return self._resource_apply(grad, var)
def _resource_apply_sparse(self, grad, var, indices):
grad = tf.IndexedSlices(grad, indices, K.shape(var))
grad = tf.convert_to_tensor(grad)
return self._resource_apply_dense(grad, var)
def get_config(self):
config = {
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
'bias_correct': self.bias_correct,
}
base_config = super(AdaBeliefTf, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def export_to_custom_objects(extend_with_func):
def new_extend_with_func(BaseOptimizer, name=None):
NewOptimizer = extend_with_func(BaseOptimizer)
if name:
NewOptimizer.__name__ = name
name = NewOptimizer.__name__
keras.utils.get_custom_objects()[name] = NewOptimizer
return NewOptimizer
return new_extend_with_func
@export_to_custom_objects
def extend_with_gradient_accumulation_tf2(BaseOptimizer):
class NewOptimizer(BaseOptimizer):
@insert_arguments(grad_accum_steps=2)
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def _create_slots(self, var_list):
super(NewOptimizer, self)._create_slots(var_list)
for var in var_list:
self.add_slot(var, 'gradient_accumulation')
def _resource_apply(self, grad, var, indices=None):
"""interation % acc_steps==0 then update else accumulate
思路是先判断是否累计了 acc_steps,如果没有,则update时保持原样,
并累计当前梯度,否则,更新梯度并将累计的梯度置零
"""
# 是否更新
cond = K.equal(self.iterations % self.grad_accum_steps, 0)
# 获取梯度累计量
gradient_accumulation = self.get_slot(var, 'gradient_accumulation')
# 获取平均梯度
gradient_t = gradient_accumulation / self.grad_accum_steps
old_update = K.update
# 根据条件判断是否真的更新
def new_update(x, new_x):
new_x = K.switch(cond, new_x, x)
return old_update(x, new_x)
K.update = new_update
op = super(NewOptimizer, self)._resource_apply(gradient_t, var)
K.update = old_update
# 根据条件判断是否需要置零
with tf.control_dependencies([op]):
gradient_t = K.switch(cond, K.zeros_like(gradient_accumulation), gradient_accumulation)
with tf.control_dependencies([K.update(gradient_accumulation, gradient_t)]):
if indices is None:
gradient_t = K.update(gradient_accumulation, gradient_accumulation + grad)
else:
gradient_t = self._resource_scatter_add(gradient_accumulation, indices, grad)
return gradient_t
def get_config(self):
config = super(NewOptimizer, self).get_config()
config.update({'grad_accum_steps': self.grad_accum_steps})
return config
return NewOptimizer
@export_to_custom_objects
def extend_with_gradient_accumulation(BaseOptimizer):
"""原生keras版"""
class NewOptimizer(BaseOptimizer):
@insert_arguments(grad_accum_steps=2)
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
self._first_grad = True # first grad
@K.symbolic
def get_updates(self, loss, params):
# 是否更新
cond = K.equal(self.iterations % self.grad_accum_steps, 0)
cond = K.cast(cond, K.floatx())
# 获取梯度
grads = self.get_gradients(loss, params)
self.accum_grads = [K.zeros(
shape=K.int_shape(p), dtype=K.dtype(p), name='accum_grad_{}'.format(i)) for i, p in enumerate(params)]
old_update = K.update
def new_update(x, new_x):
new_x = cond * new_x + (1 - cond) * x
return old_update(x, new_x)
K.update = new_update
updates = super(NewOptimizer, self).get_updates(loss, params)
K.update = old_update
# 累计更新
with K.control_dependencies(updates):
acc_updates = [
K.update(ag, g + (1 - cond) * ag) for ag, g in zip(self.accum_grads, grads)
]
return acc_updates
def get_gradients(self, loss, params):
if self._first_grad:
self._first_grad = False
return super(NewOptimizer, self).get_gradients(loss, params)
else:
return [ag / self.grad_accum_steps for ag in self.accum_grads]
def get_config(self):
config = {'grad_accum_steps': self.grad_accum_steps}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_weight_decay_tf2(BaseOptimizer):
"""增加权重衰减
ref: [DECOUPLED WEIGHT DECAY REGULARIZATION](https://arxiv.org/pdf/1711.05101.pdf)
大多数框架在实现L2 regularization时是使用weight decay,然而L2 regularization 与 weight decay 在标准 SGD下是等价的,
但是当使用Adam时,缺不是等价的,原因是:
g_t = ▽f_t-1 + λθ,其中λθ是 L2 loss的梯度
m_t = β_1 * m_t-1 + (1 - β_1) * g_t
v_t = β_2 * v_t-2 + (1 - β_2) * g_t^2
θ_t = θ_t - 1 - α(m_t / v_t^0.5 + ε)
代入上面三式后带有θ的项为 α(λθ/ v_t^0.5 + ε),这导致梯度变化越大的方向,权重约束越小,这显然不合理。
L2 regularization应该是各向同性。一种改进这个问题的方法就是将梯度下降与weight decay 解耦,
不在求梯度时代入weight decay ,而是在整个梯度下降完成后,加入weight decay,这样将梯度下降与weight decay解耦,
达到L2 regularization效果
"""
class NewOptimizer(BaseOptimizer):
@insert_arguments(weight_decay_rate=0.01, exclude_from_weight_decay=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def _resource_apply(self, grad, var, indices=None):
old_update = K.update
def new_update(x, new_x):
if x is var and self._do_use_weight_decay(x):
lr_t = self._decayed_lr(x.dtype.base_dtype)
new_x = new_x - lr_t * self.weight_decay_rate * x
return old_update(x, new_x)
K.update = new_update
op = super(NewOptimizer, self)._resource_apply(grad, var, indices)
K.update = old_update
return op
def _do_use_weight_decay(self, param):
"""Whether to use L2 weight decay for `param_name`."""
param_name = param.name
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def get_config(self):
config = super(NewOptimizer, self).get_config()
config.update({'weight_decay_rate': self.weight_decay_rate,
'exclude_from_weight_decay': self.exclude_from_weight_decay})
return config
return NewOptimizer
@export_to_custom_objects
def extend_with_weight_decay(BaseOptimizer):
"""原生keras版"""
class NewOptimizer(BaseOptimizer):
@insert_arguments(weight_decay_rate=0.01, exclude_from_weight_decay=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
if not hasattr(self, 'learning_rate'):
self.learning_rate = self.lr
@K.symbolic
def get_update(self, loss, params):
old_update = K.update
def new_update(x, new_x):
if x in params and self._do_use_weight_decay(x):
new_x = new_x - self.learning_rate * self.weight_decay_rate * x
return old_update(x, new_x)
K.update = new_update
updates = super(NewOptimizer, self).get_update(loss, params)
K.update = old_update
return updates
def _do_use_weight_decay(self, param):
"""Whether to use L2 weight decay for `param_name`."""
param_name = param.name
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def get_config(self):
config = {'weight_decay_rate': self.weight_decay_rate,
'exclude_from_weight_decay': self.exclude_from_weight_decay}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_piecewise_linear_lr_tf2(BaseOptimizer):
"""
分段线性学习率,使用场景如 warmup
"""
class NewOptimzer(BaseOptimizer):
"""
schedule 是一个{ point: value} 的字典,如 {10: 1, 20: 0.5}代表从 0 到 10 步 lr 从 0 线性增加到 100% ,
然后从 10 到 20 线性降低到 50%,之后一直保持 50% 不变
"""
@insert_arguments(lr_schedule={0: 1})
def __init__(self, *args, **kwargs):
super(NewOptimzer, self).__init__(*args, **kwargs)
self.lr_schedule = {int(t): v for t, v in self.lr_schedule.items()}
def _decayed_lr(self, var_dtypes):
"""重写获取decayed learning rate 方法"""
lr_t = super(NewOptimzer, self)._decayed_lr(var_dtypes)
lr_rate = piecewise_linear(self.iterations, self.lr_schedule)
return lr_t * K.cast(lr_rate, var_dtypes)
def get_config(self):
config = super(NewOptimzer, self).get_config()
config.update({'lr_schedule': self.lr_schedule})
return config
return NewOptimzer
@export_to_custom_objects
def extend_with_piecewise_linear_lr(BaseOptimizer):
"""原生keras版"""
class NewOptimizer(BaseOptimizer):
@insert_arguments(lr_schedule={0:1})
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
self.lr_schedule = {int(t): v for t, v in self.lr_schedule.items()}
@K.symbolic
def get_update(self, loss, params):
# 获取当前 step 的 lr rate
lr_rate_t = piecewise_linear(self.iterations, self.lr_schedule)
old_update = K.update
def new_update(x, new_x):
new_x = x + (new_x - x) * lr_rate_t # 按照当前lr rate 缩放 update
return old_update(x, new_x)
K.update = new_update
updates = super(NewOptimizer, self).get_update(loss, params)
K.update = old_update
return updates
def get_config(self):
config = {'lr_schedule': self.lr_schedule}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
# keras or tf.keras
if is_tf_keras:
extend_with_piecewise_linear_lr = extend_with_piecewise_linear_lr_tf2
extend_with_gradient_accumulation = extend_with_gradient_accumulation_tf2
extend_with_weight_decay = extend_with_weight_decay_tf2
AdaBelief = AdaBeliefTf
else:
Adam = keras.optimizers.Adam
custom_objects = {
'Adam': Adam,
'AdaBelief': AdaBelief,
}
keras.utils.get_custom_objects().update(custom_objects)
| 37.273852
| 118
| 0.593592
| 18,958
| 0.865978
| 0
| 0
| 12,931
| 0.590672
| 0
| 0
| 4,305
| 0.196647
|
3632e12e345819f464e0f6feced15ba246770c00
| 5,832
|
py
|
Python
|
quadpy/triangle/_laursen_gellert.py
|
dariusarnold/quadpy
|
9dc7c1ebff99d15ae57ed9195cde94d97a599be8
|
[
"MIT"
] | null | null | null |
quadpy/triangle/_laursen_gellert.py
|
dariusarnold/quadpy
|
9dc7c1ebff99d15ae57ed9195cde94d97a599be8
|
[
"MIT"
] | null | null | null |
quadpy/triangle/_laursen_gellert.py
|
dariusarnold/quadpy
|
9dc7c1ebff99d15ae57ed9195cde94d97a599be8
|
[
"MIT"
] | null | null | null |
from sympy import Rational as frac
from ..helpers import article
from ._helpers import TriangleScheme, concat, s1, s2, s3
citation = article(
authors=["M.E. Laursen", "M. Gellert"],
title="Some criteria for numerically integrated matrices and quadrature formulas for triangles",
journal="International Journal for Numerical Methods in Engineering",
volume="12",
number="1",
year="1978",
pages="67–76",
url="https://doi.org/10.1002/nme.1620120107",
)
def laursen_gellert_01():
weights, points = s3(1)
return TriangleScheme("Laursen-Gellert 1", weights, points, 1, citation)
def laursen_gellert_02a():
weights, points = s2([frac(1, 3), frac(1, 6)])
return TriangleScheme("Laursen-Gellert 2a", weights, points, 2, citation)
def laursen_gellert_02b():
weights, points = s2([frac(1, 3), frac(1, 2)])
return TriangleScheme("Laursen-Gellert 2b", weights, points, 2, citation)
def laursen_gellert_03():
weights, points = concat(s3(-frac(9, 16)), s2([frac(25, 48), frac(1, 5)]))
return TriangleScheme("Laursen-Gellert 3", weights, points, 3, citation)
def laursen_gellert_04():
weights, points = s1([1.0 / 6.0, 0.659027622374092, 0.231933368553031])
return TriangleScheme("Laursen-Gellert 4", weights, points, 3, citation)
def laursen_gellert_05():
weights, points = s2(
[0.109951743655322, 0.091576213509771], [0.223381589678011, 0.445948490915965]
)
return TriangleScheme("Laursen-Gellert 5", weights, points, 4, citation)
def laursen_gellert_06():
weights, points = concat(
s3(3.0 / 8.0), s1([5.0 / 48.0, 0.736712498968435, 0.237932366472434])
)
return TriangleScheme("Laursen-Gellert 6", weights, points, 4, citation)
def laursen_gellert_07():
weights, points = concat(
s3(9.0 / 40.0),
s2(
[0.125939180544827, 0.101286507323456],
[0.132394152788506, 0.470142064105115],
),
)
return TriangleScheme("Laursen-Gellert 7", weights, points, 5, citation)
def laursen_gellert_08():
weights, points = concat(
s2([0.205950504760887, 0.437525248383384]),
s1([0.063691414286223, 0.797112651860071, 0.165409927389841]),
)
return TriangleScheme("Laursen-Gellert 8", weights, points, 5, citation)
def laursen_gellert_09():
weights, points = concat(
s2(
[0.050844906370207, 0.063089014491502],
[0.116786275726379, 0.249286745170910],
),
s1([0.082851075618374, 0.636502499121399, 0.310352451033785]),
)
return TriangleScheme("Laursen-Gellert 9", weights, points, 6, citation)
def laursen_gellert_10():
weights, points = concat(
s3(-0.149570044467670),
s2(
[+0.175615257433204, 0.260345966079038],
[+0.053347235608839, 0.065130102902216],
),
s1([+0.077113760890257, 0.638444188569809, 0.312865496004875]),
)
return TriangleScheme("Laursen-Gellert 10", weights, points, 7, citation)
def laursen_gellert_11():
weights, points = concat(
s2([0.053077801790233, 0.064930513159165]),
s1(
[0.070853083692136, 0.284575584249173, 0.517039939069325],
[0.069274682079415, 0.313559184384932, 0.043863471792371],
),
)
return TriangleScheme("Laursen-Gellert 11", weights, points, 7, citation)
def laursen_gellert_12():
weights, points = concat(
s3(0.144315607677787),
s2(
[0.103217370534718, 0.170569307751761],
[0.032458497623198, 0.050547228317031],
[0.095091634267284, 0.459292588292723],
),
s1([0.027230314174435, 0.008394777409958, 0.263112829634638]),
)
return TriangleScheme("Laursen-Gellert 12", weights, points, 8, citation)
def laursen_gellert_13():
weights, points = concat(
s3(0.097135796282799),
s2(
[0.031334700227139, 0.489682519198738],
[0.077827541004774, 0.437089591492937],
[0.079647738927210, 0.188203535619033],
[0.025577675658698, 0.044729513394453],
),
s1([0.043283539377289, 0.036838412054736, 0.221962989160766]),
)
return TriangleScheme("Laursen-Gellert 13", weights, points, 9, citation)
def laursen_gellert_14():
weights, points = concat(
s2(
[0.051617202569021, 0.481519834783311],
[0.094080073458356, 0.403603979817940],
[0.025993571032320, 0.045189009784377],
),
s1(
[0.045469538047619, 0.136991201264904, 0.218290070971381],
[0.035351705089199, 0.030424361728820, 0.222063165537318],
),
)
return TriangleScheme("Laursen-Gellert 14", weights, points, 9, citation)
def laursen_gellert_15a():
weights, points = concat(
s3(0.079894504741240),
s2(
[0.071123802232377, 0.425086210602091],
[0.008223818690464, 0.023308867510000],
),
s1(
[0.045430592296170, 0.147925626209534, 0.223766973576973],
[0.037359856234305, 0.029946031954171, 0.358740141864431],
[0.030886656884564, 0.035632559587504, 0.143295370426867],
),
)
return TriangleScheme("Laursen-Gellert 15a", weights, points, 10, citation)
def laursen_gellert_15b():
weights, points = concat(
s3(0.081743329146286),
s2(
[0.045957963604745, 0.142161101056564],
[0.013352968813150, 0.032055373216944],
),
s1(
[0.063904906396424, 0.148132885783821, 0.321812995288835],
[0.034184648162959, 0.029619889488730, 0.369146781827811],
[0.025297757707288, 0.028367665339938, 0.163701733737182],
),
)
return TriangleScheme("Laursen-Gellert 15b", weights, points, 10, citation)
| 32.043956
| 100
| 0.641632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 571
| 0.097875
|
36359877c7a4f6573f92718849e22bc0b0b933eb
| 624
|
py
|
Python
|
python2/examples/tutorial_threadednotifier.py
|
openEuler-BaseService/pyinotify
|
d6c8b832177945106901fb6c0cd5ae7d54df8247
|
[
"MIT"
] | 1,509
|
2015-01-04T01:20:06.000Z
|
2022-03-29T08:06:41.000Z
|
python2/examples/tutorial_threadednotifier.py
|
openEuler-BaseService/pyinotify
|
d6c8b832177945106901fb6c0cd5ae7d54df8247
|
[
"MIT"
] | 98
|
2015-01-09T20:58:57.000Z
|
2022-03-29T11:53:44.000Z
|
python2/examples/tutorial_threadednotifier.py
|
openEuler-BaseService/pyinotify
|
d6c8b832177945106901fb6c0cd5ae7d54df8247
|
[
"MIT"
] | 333
|
2015-01-02T09:22:01.000Z
|
2022-03-24T01:51:40.000Z
|
# ThreadedNotifier example from tutorial
#
# See: http://github.com/seb-m/pyinotify/wiki/Tutorial
#
import pyinotify
wm = pyinotify.WatchManager() # Watch Manager
mask = pyinotify.IN_DELETE | pyinotify.IN_CREATE # watched events
class EventHandler(pyinotify.ProcessEvent):
def process_IN_CREATE(self, event):
print "Creating:", event.pathname
def process_IN_DELETE(self, event):
print "Removing:", event.pathname
#log.setLevel(10)
notifier = pyinotify.ThreadedNotifier(wm, EventHandler())
notifier.start()
wdd = wm.add_watch('/tmp', mask, rec=True)
wm.rm_watch(wdd.values())
notifier.stop()
| 24
| 66
| 0.735577
| 208
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 172
| 0.275641
|
36360d07dd0f1e6bcc68b6986125359b768850eb
| 885
|
py
|
Python
|
VersionMonitorDeamonForPy/deamon/ZTest.py
|
xblia/Upgrade-service-for-java-application
|
6118cb270daba5d6511f41a2b3f0784c5a444c17
|
[
"Apache-2.0"
] | null | null | null |
VersionMonitorDeamonForPy/deamon/ZTest.py
|
xblia/Upgrade-service-for-java-application
|
6118cb270daba5d6511f41a2b3f0784c5a444c17
|
[
"Apache-2.0"
] | null | null | null |
VersionMonitorDeamonForPy/deamon/ZTest.py
|
xblia/Upgrade-service-for-java-application
|
6118cb270daba5d6511f41a2b3f0784c5a444c17
|
[
"Apache-2.0"
] | null | null | null |
#coding=utf-8
'''/*
* Copyright 2015 lixiaobo
*
* VersionUpgrade project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/'''
'''
Created on 2015年12月30日
@author: xiaobolx
'''
import os
if __name__ == '__main__':
os.rename(r"D:\eclipse_workspace\VersionMonitorDeamonForPy\build\aaa", r"D:\eclipse_workspace\VersionMonitorDeamonForPy\build\exe.win32xxxx")
| 34.038462
| 145
| 0.748023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 842
| 0.945006
|
3636162e87cf5572646ae4d4770a37dc7c29083e
| 9,158
|
py
|
Python
|
ROBOTIS/DynamixelSDK/python/tests/protocol2_0/sync_read_write.py
|
andy-Chien/timda_dual_arm
|
94170d8889218ea0dc4e6031dcbbf59b7e37e70c
|
[
"MIT"
] | 3
|
2020-02-17T12:56:22.000Z
|
2020-09-30T11:17:03.000Z
|
ROBOTIS/DynamixelSDK/python/tests/protocol2_0/sync_read_write.py
|
andy-Chien/timda_dual_arm
|
94170d8889218ea0dc4e6031dcbbf59b7e37e70c
|
[
"MIT"
] | 12
|
2019-05-14T12:24:02.000Z
|
2020-03-24T14:00:48.000Z
|
ROBOTIS/DynamixelSDK/python/tests/protocol2_0/sync_read_write.py
|
andy-Chien/timda_dual_arm
|
94170d8889218ea0dc4e6031dcbbf59b7e37e70c
|
[
"MIT"
] | 9
|
2021-02-01T08:20:53.000Z
|
2021-09-17T05:52:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Copyright 2017 ROBOTIS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# Author: Ryu Woon Jung (Leon)
#
# ********* Sync Read and Sync Write Example *********
#
#
# Available Dynamixel model on this example : All models using Protocol 2.0
# This example is tested with two Dynamixel PRO 54-200, and an USB2DYNAMIXEL
# Be sure that Dynamixel PRO properties are already set as %% ID : 1 / Baudnum : 1 (Baudrate : 57600)
#
import os
if os.name == 'nt':
import msvcrt
def getch():
return msvcrt.getch().decode()
else:
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
def getch():
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
from dynamixel_sdk import * # Uses Dynamixel SDK library
# Control table address
ADDR_PRO_TORQUE_ENABLE = 64 # Control table address is different in Dynamixel model
ADDR_PRO_GOAL_POSITION = 116
ADDR_PRO_PRESENT_POSITION = 132
# Data Byte Length
LEN_PRO_GOAL_POSITION = 4
LEN_PRO_PRESENT_POSITION = 4
# Protocol version
PROTOCOL_VERSION = 2.0 # See which protocol version is used in the Dynamixel
# Default setting
DXL1_ID = 1 # Dynamixel#1 ID : 1
DXL2_ID = 2 # Dynamixel#1 ID : 2
BAUDRATE = 57600 # Dynamixel default baudrate : 57600
DEVICENAME = '/dev/ttyUSB0' # Check which port is being used on your controller
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0" Mac: "/dev/tty.usbserial-*"
TORQUE_ENABLE = 1 # Value for enabling the torque
TORQUE_DISABLE = 0 # Value for disabling the torque
DXL_MINIMUM_POSITION_VALUE = 100 # Dynamixel will rotate between this value
DXL_MAXIMUM_POSITION_VALUE = 4000 # and this value (note that the Dynamixel would not move when the position value is out of movable range. Check e-manual about the range of the Dynamixel you use.)
DXL_MOVING_STATUS_THRESHOLD = 20 # Dynamixel moving status threshold
index = 0
dxl_goal_position = [DXL_MINIMUM_POSITION_VALUE, DXL_MAXIMUM_POSITION_VALUE] # Goal position
# Initialize PortHandler instance
# Set the port path
# Get methods and members of PortHandlerLinux or PortHandlerWindows
portHandler = PortHandler(DEVICENAME)
# Initialize PacketHandler instance
# Set the protocol version
# Get methods and members of Protocol1PacketHandler or Protocol2PacketHandler
packetHandler = PacketHandler(PROTOCOL_VERSION)
# Initialize GroupSyncWrite instance
groupSyncWrite = GroupSyncWrite(portHandler, packetHandler, ADDR_PRO_GOAL_POSITION, LEN_PRO_GOAL_POSITION)
# Initialize GroupSyncRead instace for Present Position
groupSyncRead = GroupSyncRead(portHandler, packetHandler, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
# Open port
if portHandler.openPort():
print("Succeeded to open the port")
else:
print("Failed to open the port")
print("Press any key to terminate...")
getch()
quit()
# Set port baudrate
if portHandler.setBaudRate(BAUDRATE):
print("Succeeded to change the baudrate")
else:
print("Failed to change the baudrate")
print("Press any key to terminate...")
getch()
quit()
# Enable Dynamixel#1 Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL1_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
else:
print("Dynamixel#%d has been successfully connected" % DXL1_ID)
# Enable Dynamixel#2 Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL2_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
else:
print("Dynamixel#%d has been successfully connected" % DXL2_ID)
# Add parameter storage for Dynamixel#1 present position value
dxl_addparam_result = groupSyncRead.addParam(DXL1_ID)
if dxl_addparam_result != True:
print("[ID:%03d] groupSyncRead addparam failed" % DXL1_ID)
quit()
# Add parameter storage for Dynamixel#2 present position value
dxl_addparam_result = groupSyncRead.addParam(DXL2_ID)
if dxl_addparam_result != True:
print("[ID:%03d] groupSyncRead addparam failed" % DXL2_ID)
quit()
while 1:
print("Press any key to continue! (or press ESC to quit!)")
if getch() == chr(0x1b):
break
# Allocate goal position value into byte array
param_goal_position = [DXL_LOBYTE(DXL_LOWORD(dxl_goal_position[index])), DXL_HIBYTE(DXL_LOWORD(dxl_goal_position[index])), DXL_LOBYTE(DXL_HIWORD(dxl_goal_position[index])), DXL_HIBYTE(DXL_HIWORD(dxl_goal_position[index]))]
# Add Dynamixel#1 goal position value to the Syncwrite parameter storage
dxl_addparam_result = groupSyncWrite.addParam(DXL1_ID, param_goal_position)
if dxl_addparam_result != True:
print("[ID:%03d] groupSyncWrite addparam failed" % DXL1_ID)
quit()
# Add Dynamixel#2 goal position value to the Syncwrite parameter storage
dxl_addparam_result = groupSyncWrite.addParam(DXL2_ID, param_goal_position)
if dxl_addparam_result != True:
print("[ID:%03d] groupSyncWrite addparam failed" % DXL2_ID)
quit()
# Syncwrite goal position
dxl_comm_result = groupSyncWrite.txPacket()
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
# Clear syncwrite parameter storage
groupSyncWrite.clearParam()
while 1:
# Syncread present position
dxl_comm_result = groupSyncRead.txRxPacket()
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
# Check if groupsyncread data of Dynamixel#1 is available
dxl_getdata_result = groupSyncRead.isAvailable(DXL1_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
if dxl_getdata_result != True:
print("[ID:%03d] groupSyncRead getdata failed" % DXL1_ID)
quit()
# Check if groupsyncread data of Dynamixel#2 is available
dxl_getdata_result = groupSyncRead.isAvailable(DXL2_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
if dxl_getdata_result != True:
print("[ID:%03d] groupSyncRead getdata failed" % DXL2_ID)
quit()
# Get Dynamixel#1 present position value
dxl1_present_position = groupSyncRead.getData(DXL1_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
# Get Dynamixel#2 present position value
dxl2_present_position = groupSyncRead.getData(DXL2_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
print("[ID:%03d] GoalPos:%03d PresPos:%03d\t[ID:%03d] GoalPos:%03d PresPos:%03d" % (DXL1_ID, dxl_goal_position[index], dxl1_present_position, DXL2_ID, dxl_goal_position[index], dxl2_present_position))
if not ((abs(dxl_goal_position[index] - dxl1_present_position) > DXL_MOVING_STATUS_THRESHOLD) and (abs(dxl_goal_position[index] - dxl2_present_position) > DXL_MOVING_STATUS_THRESHOLD)):
break
# Change goal position
if index == 0:
index = 1
else:
index = 0
# Clear syncread parameter storage
groupSyncRead.clearParam()
# Disable Dynamixel#1 Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL1_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_DISABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
# Disable Dynamixel#2 Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL2_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_DISABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
# Close port
portHandler.closePort()
| 40.166667
| 226
| 0.700371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,706
| 0.404674
|
3636470ba1388bdc81e02a4d210d625e92578097
| 2,063
|
py
|
Python
|
models/globalsenti.py
|
movabo/newstsc
|
dcf0cff31c0e463c9a96cdaa24e9b662ed53f7ed
|
[
"MIT"
] | 3
|
2021-02-28T19:14:49.000Z
|
2022-03-29T12:10:14.000Z
|
models/globalsenti.py
|
movabo/newstsc
|
dcf0cff31c0e463c9a96cdaa24e9b662ed53f7ed
|
[
"MIT"
] | null | null | null |
models/globalsenti.py
|
movabo/newstsc
|
dcf0cff31c0e463c9a96cdaa24e9b662ed53f7ed
|
[
"MIT"
] | 1
|
2021-05-13T10:27:12.000Z
|
2021-05-13T10:27:12.000Z
|
# -*- coding: utf-8 -*-
# file: lcf_bert.py
# author: yangheng <yangheng@m.scnu.edu.cn>
# Copyright (C) 2019. All Rights Reserved.
# The code is based on repository: https://github.com/yangheng95/LCF-ABSA
import torch
import torch.nn as nn
from models.lcf import LCF_BERT
class Global_LCF(nn.Module):
def __init__(self, bert, opt):
super(Global_LCF, self).__init__()
self.max_num_components = 20
self.lcf = LCF_BERT(bert, opt, is_global_configuration=True)
self.linear_merge_remainder_comps = nn.Linear(opt.bert_dim * self.max_num_components, opt.bert_dim)
self.linear_merge_lcf_and_remainder = nn.Linear(opt.bert_dim * 2, opt.polarities_dim)
def _get_inputs_for_component(self, inputs, component_index):
assert component_index < self.max_num_components, "component_index({}) >= max_num_components({})".format(
component_index, self.max_num_components)
return [inputs[component_index * 4], inputs[component_index * 4 + 1], inputs[component_index * 4 + 2], inputs[
component_index * 4 + 3]]
def forward(self, inputs):
# this is the main component, which we want to classify
main_comp_inputs = self._get_inputs_for_component(inputs, 0)
main_lcf_output = self.lcf(main_comp_inputs)
# process remaining document components, which we don't want to classify but use as context
# TODO maybe disable gradient in these components? or at least in BERT in them?
lst_remainder_comp_outputs = []
for i in range(1, self.max_num_components):
cur_comp_inputs = self._get_inputs_for_component(inputs, i)
cur_comp_output = self.lcf(cur_comp_inputs)
lst_remainder_comp_outputs.append(cur_comp_output)
remainder_comp_outputs = torch.cat(lst_remainder_comp_outputs, dim=-1)
remainder_merged = self.linear_merge_remainder_comps(remainder_comp_outputs)
dense_out = self.linear_merge_lcf_and_remainder(main_lcf_output, remainder_merged)
return dense_out
| 38.924528
| 118
| 0.713039
| 1,783
| 0.864275
| 0
| 0
| 0
| 0
| 0
| 0
| 472
| 0.228793
|
36364741a2a1bcdc096a9a1390acb2038c00084b
| 10,351
|
py
|
Python
|
analysis/outflows/__init__.py
|
lconaboy/seren3
|
5a2ec80adf0d69664d2ee874f5ba12cc02d6c337
|
[
"CNRI-Python"
] | 1
|
2017-09-21T14:58:23.000Z
|
2017-09-21T14:58:23.000Z
|
analysis/outflows/__init__.py
|
lconaboy/seren3
|
5a2ec80adf0d69664d2ee874f5ba12cc02d6c337
|
[
"CNRI-Python"
] | 1
|
2020-09-09T08:52:43.000Z
|
2020-09-09T08:52:43.000Z
|
analysis/outflows/__init__.py
|
lconaboy/seren3
|
5a2ec80adf0d69664d2ee874f5ba12cc02d6c337
|
[
"CNRI-Python"
] | 1
|
2019-01-21T10:57:41.000Z
|
2019-01-21T10:57:41.000Z
|
def integrate_surface_flux(flux_map, r):
'''
Integrates a healpix surface flux to compute the total
net flux out of the sphere.
r is the radius of the sphere in meters
'''
import numpy as np
import healpy as hp
from scipy.integrate import trapz
from seren3.array import SimArray
if not ((isinstance(flux_map, SimArray) or isinstance(r, SimArray))):
raise Exception("Must pass SimArrays")
# Compute theta/phi
npix = len(flux_map)
nside = hp.npix2nside(npix)
# theta, phi = hp.pix2ang(nside, range(npix))
theta, phi = hp.pix2ang(nside, range(npix))
r = r.in_units("kpc") # make sure r is in meters
# Compute the integral
# integrand = np.zeros(len(theta))
ix = theta.argsort()
integrand = r**2 * np.sin(theta[ix]) * flux_map[ix]
# for i in range(len(theta)):
# th, ph = (theta[i], phi[i])
# integrand[i] = r**2 * np.sin(th) * flux_map[i] # mass_flux_radial function already deals with unit vev
# integrand = integrand[:, None] + np.zeros(len(phi)) # 2D over theta and phi
# I = trapz(trapz(integrand, phi), theta)
I = trapz(integrand, theta[ix]) * 2.*np.pi
return SimArray(I, "Msol yr**-1")
def dm_by_dt(subsnap, filt=False, **kwargs):
'''
Compute mass flux at the virial sphere
'''
import numpy as np
from seren3.array import SimArray
from seren3.analysis.render import render_spherical
reload(render_spherical)
rvir = SimArray(subsnap.region.radius, subsnap.info["unit_length"])
to_distance = rvir/4.
# to_distance = rvir
in_units = "kg s**-1 m**-2"
s = kwargs.pop("s", subsnap.pynbody_snapshot(filt=filt))
if "nside" not in kwargs:
kwargs["nside"] = 2**3
kwargs["radius"] = to_distance
kwargs["denoise"] = True
im = render_spherical.render_quantity(subsnap.g, "mass_flux_radial", s=s, in_units=in_units, out_units=in_units, **kwargs)
im.convert_units("Msol yr**-1 kpc**-2")
def _compute_flux(im, to_distance, direction=None):
im_tmp = im.copy()
ix = None
if ("out" == direction):
ix = np.where(im_tmp < 0)
im_tmp[ix] = 0
elif ("in" == direction):
ix = np.where(im_tmp > 0)
im_tmp[ix] = 0
else:
return integrate_surface_flux(im, to_distance)
return integrate_surface_flux(im_tmp, to_distance)
F = _compute_flux(im, to_distance)
F_plus = _compute_flux(im, to_distance, direction="out")
F_minus = _compute_flux(im, to_distance, direction="in")
return (F, F_plus, F_minus), im
def integrate_dm_by_dt(I1, I2, lbtime):
from scipy.integrate import trapz
return trapz(I1, lbtime) / trapz(I2, lbtime)
def mass_flux_hist(halo, back_to_aexp, return_data=True, **kwargs):
'''
Compute history of in/outflows
'''
import numpy as np
from seren3.scripts.mpi import write_mass_flux_hid_dict
db = kwargs.pop("db", write_mass_flux_hid_dict.load_db(halo.base.path, halo.base.ioutput))
if (int(halo["id"]) in db.keys()):
catalogue = halo.base.halos(finder="ctrees")
F = []
age_arr = []
hids = []
iouts = []
def _compute(h, db):
hid = int(h["id"])
res = db[hid]
F.append(res["F"])
age_arr.append(h.base.age)
hids.append(hid)
iouts.append(h.base.ioutput)
_compute(halo, db)
for prog in catalogue.iterate_progenitors(halo, back_to_aexp=back_to_aexp):
prog_db = write_mass_flux_hid_dict.load_db(prog.base.path, prog.base.ioutput)
if (int(prog["id"]) in prog_db.keys()):
_compute(prog, prog_db)
else:
break
F = np.array(F)
age_arr = np.array(age_arr)
hids = np.array(hids, dtype=np.int64)
iouts = np.array(iouts)
lbtime = halo.base.age - age_arr
if return_data:
return F, age_arr, lbtime, hids, iouts
return F
else:
return None
def fesc_tot_outflow(snapshot):
'''
Integrate the total mass ourflowed and photons escaped for all haloes
'''
import numpy as np
from scipy.integrate import trapz
from seren3.array import SimArray
from seren3.scripts.mpi import time_int_fesc_all_halos, history_mass_flux_all_halos
fesc_db = time_int_fesc_all_halos.load(snapshot)
mass_flux_db = history_mass_flux_all_halos.load(snapshot)
mass_flux_hids = np.array( [int(res.idx) for res in mass_flux_db] )
def _integrate_halo(fesc_res, mass_flux_res):
photons_escaped = SimArray(fesc_res["I1"], "s**-1").in_units("yr**-1")
cum_photons_escaped = trapz(photons_escaped, fesc_res["lbtime"].in_units("yr"))
F, F_plus, F_minus = mass_flux_res["F"].transpose()
F_plus = SimArray(F_plus, "Msol yr**-1")
F_minus = SimArray(F_minus, "Msol yr**-1")
if (len(F_plus) != len(photons_escaped)):
return np.nan, np.nan
cum_outflowed_mass = trapz(F_plus, mass_flux_res["lbtime"].in_units("yr"))
cum_inflowed_mass = np.abs(trapz(F_minus, mass_flux_res["lbtime"].in_units("yr")))
# return cum_photons_escaped, cum_outflowed_mass - cum_inflowed_mass
return cum_photons_escaped, cum_outflowed_mass
nphotons_escaped = np.zeros(len(fesc_db))
tot_mass_outflowed = np.zeros(len(fesc_db))
mvir = np.zeros(len(fesc_db))
for i in range(len(fesc_db)):
hid = int(fesc_db[i].idx)
fesc_res = fesc_db[i].result
mass_flux_res_ix = np.abs(mass_flux_hids - hid).argmin()
mass_flux_res = mass_flux_db[mass_flux_res_ix].result
nphotons_escaped[i], tot_mass_outflowed[i] = _integrate_halo(fesc_res, mass_flux_res)
mvir[i] = fesc_res["Mvir"]
ix = np.where( np.logical_and( ~np.isnan(nphotons_escaped), ~np.isnan(tot_mass_outflowed)) )
nphotons_escaped = nphotons_escaped[ix]
tot_mass_outflowed = tot_mass_outflowed[ix]
mvir = mvir[ix]
return nphotons_escaped, tot_mass_outflowed, mvir
def fesc_mean_time_outflow(snapshot):
'''
Integrate the total mass outflowed and photons escaped for all haloes
'''
import numpy as np
from scipy.integrate import trapz
from seren3.array import SimArray
from seren3.scripts.mpi import time_int_fesc_all_halos, history_mass_flux_all_halos
fesc_db = time_int_fesc_all_halos.load(snapshot)
mass_flux_db = history_mass_flux_all_halos.load(snapshot)
mass_flux_hids = np.array( [int(res.idx) for res in mass_flux_db] )
def _integrate_halo(fesc_res, mass_flux_res):
photons_escaped = SimArray(fesc_res["I1"], "s**-1").in_units("yr**-1")
# cum_photons_escaped = trapz(photons_escaped, fesc_res["lbtime"].in_units("yr"))
cum_photons_escaped = fesc_res["tint_fesc_hist"][0]
F, F_plus, F_minus = mass_flux_res["F"].transpose()
F_plus = SimArray(F_plus, "Msol yr**-1")
F_minus = SimArray(F_minus, "Msol yr**-1")
if (len(F_plus) != len(photons_escaped)):
return np.nan, np.nan
lbtime = mass_flux_res["lbtime"]
F_net_outflow = F_plus - np.abs(F_minus)
if len(np.where(np.isnan(F_net_outflow))[0] > 0):
return np.nan, np.nan
ix = np.where(F_net_outflow < 0.)
if len(ix[0] == 0):
return cum_photons_escaped, lbtime[-1]
else:
time_outflow = [0]
for i in ix[0]:
if (i == 0):
continue
time_outflow.append(lbtime[i - 1])
time_spent = np.zeros(len(time_outflow) - 1)
for i in range(len(time_spent)):
time_spent[i] = time_outflow[i+1] - time_outflow[i]
return cum_photons_escaped, time_spent.mean()
nphotons_escaped = np.zeros(len(fesc_db))
time_spent_net_outflow = np.zeros(len(fesc_db))
mvir = np.zeros(len(fesc_db))
for i in range(len(fesc_db)):
hid = int(fesc_db[i].idx)
fesc_res = fesc_db[i].result
mass_flux_res_ix = np.abs(mass_flux_hids - hid).argmin()
mass_flux_res = mass_flux_db[mass_flux_res_ix].result
nphotons_escaped[i], time_spent_net_outflow[i] = _integrate_halo(fesc_res, mass_flux_res)
mvir[i] = fesc_res["Mvir"]
ix = np.where( np.logical_and( ~np.isnan(nphotons_escaped),\
np.logical_and(~np.isnan(time_spent_net_outflow),\
time_spent_net_outflow > 0) ) )
nphotons_escaped = nphotons_escaped[ix]
time_spent_net_outflow = time_spent_net_outflow[ix]
mvir = mvir[ix]
return nphotons_escaped, SimArray(time_spent_net_outflow, "Gyr"), mvir
def plot(sims, iout, labels, cols, ax=None, **kwargs):
import numpy as np
import matplotlib.pylab as plt
from seren3.analysis import plots
if (ax is None):
ax = plt.gca()
ls = ["-", "--"]
lw = [3., 1.5]
for sim, label, col, lsi, lwi in zip(sims, labels, cols, ls, lw):
snap = sim[iout]
nphotons_escaped, tot_mass_outflowed, mvir = fesc_tot_outflow(snap)
print "%e" % nphotons_escaped.sum()
log_mvir = np.log10(mvir)
x = np.log10(tot_mass_outflowed)
y = np.log10(nphotons_escaped)
ix = np.where(np.logical_and(log_mvir >= 7.5, x>=5.5))
x = x[ix]
y = y[ix]
ix = np.where(np.logical_and(np.isfinite(x), np.isfinite(y)))
x = x[ix]
y = y[ix]
bc, mean, std, sterr = plots.fit_scatter(x, y, ret_sterr=True, **kwargs)
ax.scatter(x, y, alpha=0.10, s=5, color=col)
e = ax.errorbar(bc, mean, yerr=std, color=col, label=label,\
fmt="o", markerfacecolor=col, mec='k',\
capsize=2, capthick=2, elinewidth=2, linewidth=lwi, linestyle=lsi)
# ax.plot(bc, mean, color=col, label=None, linewidth=3., linestyle="-")
# ax.fill_between(bc, mean-std, mean+std, facecolor=col, alpha=0.35, interpolate=True, label=label)
ax.set_xlabel(r"log$_{10}$ $\int_{0}^{t_{\mathrm{H}}}$ $\vec{F}_{+}(t)$ $dt$ [M$_{\odot}$]", fontsize=20)
ax.set_ylabel(r'log$_{10}$ $\int_{0}^{t_{\mathrm{H}}}$ $\dot{\mathrm{N}}_{\mathrm{ion}}(t)$ f$_{\mathrm{esc}}$ ($t$) $dt$ [#]', fontsize=20)
ax.legend(loc='lower right', frameon=False, prop={"size" : 16})
| 34.734899
| 144
| 0.628925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,775
| 0.171481
|
3637422656965fc8f3771e5007feaef41fa1973f
| 2,859
|
py
|
Python
|
evalution/composes/utils/matrix_utils.py
|
esantus/evalution2
|
622a9faf729b7c704ad45047911b9a03cf7c8dae
|
[
"MIT"
] | 1
|
2017-12-06T21:46:26.000Z
|
2017-12-06T21:46:26.000Z
|
evalution/composes/utils/matrix_utils.py
|
esantus/EVALution-2.0
|
622a9faf729b7c704ad45047911b9a03cf7c8dae
|
[
"MIT"
] | 5
|
2020-03-24T15:27:40.000Z
|
2021-06-01T21:47:18.000Z
|
evalution/composes/utils/matrix_utils.py
|
esantus/EVALution-2.0
|
622a9faf729b7c704ad45047911b9a03cf7c8dae
|
[
"MIT"
] | 1
|
2018-02-15T17:13:02.000Z
|
2018-02-15T17:13:02.000Z
|
import numpy as np
from composes.matrix.sparse_matrix import SparseMatrix
from composes.matrix.dense_matrix import DenseMatrix
from composes.matrix.matrix import Matrix
from scipy.sparse import issparse
from composes.utils.py_matrix_utils import is_array
from warnings import warn
def to_matrix(matrix_):
"""
Converts an array-like structure to a DenseMatrix/SparseMatrix
"""
if issparse(matrix_):
return SparseMatrix(matrix_)
else:
return DenseMatrix(matrix_)
def is_array_or_matrix(data):
return is_array(data) or isinstance(data, Matrix)
def assert_is_array_or_matrix(data):
if not is_array_or_matrix(data):
raise TypeError("expected array-like or matrix, received %s"
% (type(data)))
def padd_matrix(matrix_, axis, value=1):
matrix_type = type(matrix_)
if axis == 0:
append_mat = matrix_type(np.ones((1, matrix_.shape[1]))*value)
return matrix_.vstack(append_mat)
elif axis == 1:
append_mat = matrix_type(np.ones((matrix_.shape[0], 1))*value)
return matrix_.hstack(append_mat)
else:
raise ValueError("Invalid axis value:%s" % axis)
def assert_same_shape(matrix1, matrix2, axis=None):
if axis is None:
if matrix1.shape != matrix2.shape:
raise ValueError("Inconsistent shapes")
else:
if not axis in [0, 1]:
raise ValueError("Invalid axis value: %s, expected 0 or 1." % axis)
if matrix1.shape[axis] != matrix2.shape[axis]:
raise ValueError("Inconsistent shapes")
def to_compatible_matrix_types(v1, v2):
if isinstance(v1, Matrix) and isinstance(v2, Matrix):
v2 = type(v1)(v2)
elif not isinstance(v1, Matrix) and isinstance(v2, Matrix):
v1 = type(v2)(v1)
elif not isinstance(v2, Matrix) and isinstance(v1, Matrix):
v2 = type(v1)(v2)
else:
v1 = to_matrix(v1)
v2 = type(v1)(v2)
return v1, v2
def get_type_of_largest(matrix_list):
max_dim = 0
max_type = None
for matrix_ in matrix_list:
if matrix_.shape[0] * matrix_.shape[1] > max_dim:
max_type = type(matrix_)
max_dim = matrix_.shape[0] * matrix_.shape[1]
return max_type
def resolve_type_conflict(matrix_list, matrix_type):
new_matrix_list = []
if matrix_type_conflict(matrix_list):
warn("Efficiency warning: matrices should have the same dense/sparse type!")
for matrix_ in matrix_list:
new_matrix_list.append(matrix_type(matrix_))
return new_matrix_list
return list(matrix_list)
def matrix_type_conflict(matrix_list):
if not matrix_list:
return False
matrix_type = type(matrix_list[0])
for matrix_ in matrix_list:
if not isinstance(matrix_, matrix_type):
return True
return False
| 27.490385
| 84
| 0.666317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 299
| 0.104582
|
3637cf787bdf4e4784cdc6527a8256c98d6b4fec
| 1,646
|
py
|
Python
|
cpu/pipeline/writeback_unit.py
|
tim-roderick/simple-cpu-simulator
|
334baf1934751527b7e5ffa0ad85d5e53e7215a1
|
[
"MIT"
] | 2
|
2019-12-09T12:02:50.000Z
|
2019-12-09T22:40:01.000Z
|
cpu/pipeline/writeback_unit.py
|
tim-roderick/simple-cpu-simulator
|
334baf1934751527b7e5ffa0ad85d5e53e7215a1
|
[
"MIT"
] | null | null | null |
cpu/pipeline/writeback_unit.py
|
tim-roderick/simple-cpu-simulator
|
334baf1934751527b7e5ffa0ad85d5e53e7215a1
|
[
"MIT"
] | 1
|
2020-05-04T09:13:50.000Z
|
2020-05-04T09:13:50.000Z
|
from .component import Component
from cpu.Memory import SCOREBOARD
from isa.Instructions import ALUInstruction as alu
class writeback_unit(Component):
def add_result(self, result):
result.finished = True
self.pipeline_register = self.pipeline_register + [result]
self.clean()
def clean(self):
self.pipeline_register = list(filter(None, self.pipeline_register))
def run(self, cpu):
if not self.halt:
cpu.update_reservation()
for instruction in self.pipeline_register:
if cpu.reorder_buffer.is_retirable(cpu, instruction):
instruction.writeback(cpu)
instruction.reservation_update()
#
# if str(instruction.eo[0]).startswith('r'):
# cpu.update_reservation()
#
cpu.increment_ie()
if instruction in self.pipeline_register:
index = self.pipeline_register.index(instruction)
self.pipeline_register[index] = ""
self.clean()
def flush(self, cpu, instruction):
self.halt = True
for instruction in self.pipeline_register:
if instruction not in cpu.reorder_buffer.buffer:
#
if isinstance(instruction, alu) or instruction.opcode in ["LD", "LDC", "MOV"]:
SCOREBOARD[instruction.operands[0]] = 1
#
index = self.pipeline_register.index(instruction)
self.pipeline_register[index] = ""
self.clean()
| 38.27907
| 94
| 0.567436
| 1,526
| 0.927096
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.055893
|
36397c2f3323af879bfcf0a875f647ed132668eb
| 273
|
py
|
Python
|
ex026.py
|
juniorpedroso/Exercicios-CEV-Python
|
4adad3b6f3994cf61f9ead5564124b8b9c58d304
|
[
"MIT"
] | null | null | null |
ex026.py
|
juniorpedroso/Exercicios-CEV-Python
|
4adad3b6f3994cf61f9ead5564124b8b9c58d304
|
[
"MIT"
] | null | null | null |
ex026.py
|
juniorpedroso/Exercicios-CEV-Python
|
4adad3b6f3994cf61f9ead5564124b8b9c58d304
|
[
"MIT"
] | null | null | null |
frase = str(input('Digite uma frase: ').strip().upper())
print('A letra a aparece {} vezes'.format(frase.count('A')))
print('Sua primeira aparição é na posição {}'.format(frase.find('A') + 1))
print('Ela aparece pela última vez na posição {}'.format(frase.rfind('A') + 1))
| 54.6
| 79
| 0.673993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 147
| 0.523132
|
363ab7e49354291dcd24ad4beee0131449a7700e
| 3,269
|
py
|
Python
|
MyDataLoader.py
|
WynMew/WaifuLite
|
fbd9680dda4a5f501b7c66515c9fef1444f2d9e7
|
[
"Apache-2.0"
] | 22
|
2019-07-16T13:59:18.000Z
|
2022-01-17T02:58:01.000Z
|
MyDataLoader.py
|
WynMew/WaifuLite
|
fbd9680dda4a5f501b7c66515c9fef1444f2d9e7
|
[
"Apache-2.0"
] | null | null | null |
MyDataLoader.py
|
WynMew/WaifuLite
|
fbd9680dda4a5f501b7c66515c9fef1444f2d9e7
|
[
"Apache-2.0"
] | 3
|
2020-02-19T19:37:52.000Z
|
2021-05-11T05:48:09.000Z
|
import glob
import io
import numpy as np
import re
import os
from io import BytesIO
import random
from uuid import uuid4
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision.transforms import RandomCrop
from torchvision.transforms.functional import to_tensor
class ListDatasetLite(Dataset):
def __init__(self, root, list_file, patch_size=96, shrink_size=2, noise_level=1, down_sample_method=None, transform=None):
self.root = root
self.transform = transform
self.random_cropper = RandomCrop(size=patch_size)
self.img_augmenter = ImageAugment(shrink_size, noise_level, down_sample_method)
self.transform = transform
self.fnames = []
if isinstance(list_file, list):
tmp_file = '/tmp/listfile.txt'
os.system('cat %s > %s' % (' '.join(list_file), tmp_file))
list_file = tmp_file
with open(list_file) as f:
lines = f.readlines()
self.num_imgs = len(lines)
for line in lines:
self.fnames.append(line)
def __getitem__(self, idx):
fname = self.fnames[idx].strip()
img = Image.open(os.path.join(self.root, fname))
if img.mode != 'RGB':
img = img.convert('RGB')
img_patch = self.random_cropper(img)
lr_img, hr_img = self.img_augmenter.process(img_patch)
return self.transform(lr_img), self.transform(hr_img)
#return to_tensor(lr_img), to_tensor(hr_img)
def __len__(self):
return self.num_imgs
class ImageAugment:
def __init__(self,
shrink_size=2,
noise_level=1,
down_sample_method=None
):
# noise_level (int): 0: no noise; 1: 75-95% quality; 2:50-75%
if noise_level == 0:
self.noise_level = [0, 0]
elif noise_level == 1:
self.noise_level = [5, 25]
elif noise_level == 2:
self.noise_level = [25, 50]
else:
raise KeyError("Noise level should be either 0, 1, 2")
self.shrink_size = shrink_size
self.down_sample_method = down_sample_method
def shrink_img(self, hr_img):
if self.down_sample_method is None:
resample_method = random.choice([Image.BILINEAR, Image.BICUBIC, Image.LANCZOS])
else:
resample_method = self.down_sample_method
img_w, img_h = tuple(map(lambda x: int(x / self.shrink_size), hr_img.size))
lr_img = hr_img.resize((img_w, img_h), resample_method)
return lr_img
def add_jpeg_noise(self, hr_img):
quality = 100 - round(random.uniform(*self.noise_level))
lr_img = BytesIO()
hr_img.save(lr_img, format='JPEG', quality=quality)
lr_img.seek(0)
lr_img = Image.open(lr_img)
return lr_img
def process(self, hr_patch_pil):
lr_patch_pil = self.shrink_img(hr_patch_pil)
if self.noise_level[1] > 0:
lr_patch_pil = self.add_jpeg_noise(lr_patch_pil)
return lr_patch_pil, hr_patch_pil
def up_sample(self, img, resample):
width, height = img.size
return img.resize((self.shrink_size * width, self.shrink_size * height), resample=resample)
| 32.69
| 126
| 0.632303
| 2,967
| 0.907617
| 0
| 0
| 0
| 0
| 0
| 0
| 194
| 0.059345
|
363b300b4584703dde103216ec3118b56fec2aec
| 179
|
py
|
Python
|
model/get_data.py
|
qq1010903229/OIer
|
ec1f4c60d76188efd18af157f46849b27dd8ddae
|
[
"Apache-2.0"
] | null | null | null |
model/get_data.py
|
qq1010903229/OIer
|
ec1f4c60d76188efd18af157f46849b27dd8ddae
|
[
"Apache-2.0"
] | null | null | null |
model/get_data.py
|
qq1010903229/OIer
|
ec1f4c60d76188efd18af157f46849b27dd8ddae
|
[
"Apache-2.0"
] | null | null | null |
f = open("OI_school.csv")
op = open("mdt.txt","w")
for i in f.readlines():
c = i.split('","')
op.write(c[-3]+','+c[-2]+','+"".join([i+',' for i in eval(c[1])])[:-1]+'\n')
| 29.833333
| 80
| 0.463687
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 0.26257
|
363b4bc29bcc02e72a0083db4df10c04444ae917
| 523
|
py
|
Python
|
pythontabcmd2/parsers/global_options.py
|
playkazoomedia/tabcmd2
|
a89db9be6047d95379a7c88264236e9cb3e78189
|
[
"MIT"
] | 11
|
2020-09-02T03:41:01.000Z
|
2022-01-20T12:38:20.000Z
|
pythontabcmd2/parsers/global_options.py
|
playkazoomedia/tabcmd2
|
a89db9be6047d95379a7c88264236e9cb3e78189
|
[
"MIT"
] | 19
|
2020-09-03T04:54:47.000Z
|
2022-01-31T17:41:19.000Z
|
pythontabcmd2/parsers/global_options.py
|
playkazoomedia/tabcmd2
|
a89db9be6047d95379a7c88264236e9cb3e78189
|
[
"MIT"
] | 6
|
2020-11-21T15:45:51.000Z
|
2022-01-24T12:26:20.000Z
|
class GlobalOptions:
""" Class to evaluate global options for example: project path"""
@staticmethod
def evaluate_project_path(path):
""" Method to parse the project path provided by the user"""
first_dir_from_end = None
if path[-1] != "/":
path = path + "/"
new_path = path.rsplit('/')[-2]
for directory in new_path[::-1]:
if directory != " ":
first_dir_from_end = new_path
break
return first_dir_from_end
| 34.866667
| 69
| 0.565966
| 522
| 0.998088
| 0
| 0
| 427
| 0.816444
| 0
| 0
| 137
| 0.26195
|
363ecc9fcc777c09f95b187bd0eb4e97cd4e05fe
| 2,068
|
py
|
Python
|
power_data_to_sat_passes/filtersatpowerfiles.py
|
abrahamneben/orbcomm_beam_mapping
|
71b3e7d6e4214db0a6f4e68ebeeb7d7f846f5004
|
[
"MIT"
] | 1
|
2019-04-10T02:50:19.000Z
|
2019-04-10T02:50:19.000Z
|
power_data_to_sat_passes/filtersatpowerfiles.py
|
abrahamneben/orbcomm_beam_mapping
|
71b3e7d6e4214db0a6f4e68ebeeb7d7f846f5004
|
[
"MIT"
] | null | null | null |
power_data_to_sat_passes/filtersatpowerfiles.py
|
abrahamneben/orbcomm_beam_mapping
|
71b3e7d6e4214db0a6f4e68ebeeb7d7f846f5004
|
[
"MIT"
] | null | null | null |
#!/users/aneben/python/bin/python
import sys
import commands
import numpy as np
import string
np.set_printoptions(precision=3,linewidth=200)
months={'Jan':'01','Feb':'02','Mar':'03','Apr':'04','May':'05','Jun':'06','Jul':'07','Aug':'08','Sept':'09','Oct':'10','Nov':'11','Dec':'12'}
def make_datetime_numeric(dt):
dt_elts = dt.split()
month = months[dt_elts[2]]
day = dt_elts[3]
time = ''.join(dt_elts[4].split(':'))
year = dt_elts[5]
return year+month+day+time
def read_next_refew_spectrum(f):
header = ''
inheader = True
while inheader:
nextline = f.readline()
if len(nextline) == 0:
return [[],[]]
elif nextline == ' CH 1 CH 2 CH 3 CH 4\n':
break
else:
header += nextline
spectrum = np.zeros(512)
# cols: tileEW=0, refEW=1, tileNS=2, refNS=3
for i in range(512): spectrum[i] = float(f.readline().split()[1])
return [header,spectrum]
label = sys.argv[1]
satpowerdir = '/media/disk-1/MWA_Tile/newdata/'+label
satpowerfnames = commands.getoutput('ls '+satpowerdir+'/satpower*').split()
outf = open('../phase3/composite_'+label+'/'+label+'_filteredsatpows.txt','w')
satbins = np.array([102, 115, 128, 225, 236, 339, 352, 365 ,378, 410])
skip=4
for fname in satpowerfnames:
f = open(fname)
print 'reading '+fname
acq_num = 0
[header,spect] = read_next_refew_spectrum(f)
while len(spect) != 0:
satstrs = header.split('\n')[3:-2]
allsats = np.zeros(8,dtype=int)
sats = [int(satstr[2:4]) for satstr in satstrs]
allsats[0:len(sats)] = sats
if acq_num%skip == 0:
datetime = header.split('\n')[2]
outf.write('\n'+make_datetime_numeric(datetime))
for i in range(len(satbins)): outf.write(",%1.3f"%(20*np.log10(spect[satbins[i]])))
outf.write(',')
outf.write(','.join(map(str,allsats)))
acq_num += 1
if acq_num%5000==0: print acq_num/50000.
[header,spect] = read_next_refew_spectrum(f)
f.close()
outf.close()
| 26.512821
| 141
| 0.597679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 370
| 0.178917
|
363f007b5be683fdae2cae98f2ef185659366c8a
| 6,060
|
py
|
Python
|
scripts/utils/prepare.py
|
Glaciohound/VCML
|
5a0f01a0baba238cef2f63131fccd412e3d7822b
|
[
"MIT"
] | 52
|
2019-12-04T22:26:56.000Z
|
2022-03-31T17:04:15.000Z
|
scripts/utils/prepare.py
|
guxiwuruo/VCML
|
5a0f01a0baba238cef2f63131fccd412e3d7822b
|
[
"MIT"
] | 6
|
2020-08-25T07:35:14.000Z
|
2021-09-09T04:57:09.000Z
|
scripts/utils/prepare.py
|
guxiwuruo/VCML
|
5a0f01a0baba238cef2f63131fccd412e3d7822b
|
[
"MIT"
] | 5
|
2020-02-10T07:39:24.000Z
|
2021-06-23T02:53:42.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : prepare.py
# Author : Chi Han, Jiayuan Mao
# Email : haanchi@gmail.com, maojiayuan@gmail.com
# Date : 17.07.2019
# Last Modified Date: 03.12.2019
# Last Modified By : Chi Han
#
# This file is part of the VCML codebase
# Distributed under MIT license
import os
from dataset.visual_dataset.visual_dataset import Dataset
from dataset.question_dataset.question_dataset import Dataset as QDataset
from dataset.visual_dataset.utils.sceneGraph_loader import \
load_multiple_sceneGraphs
from utility.common import load, make_dir
from utility.cache import Cache
from reason.models.parser import Seq2seqParser
from . import register
def load_visual_dataset(args, logger, process_fn):
with Cache(args.task+'_sceneGraphs', logger, args) as cache:
if not cache.exist():
cache.cache(load_multiple_sceneGraphs(
args.sceneGraph_dir, args, logger, process_fn))
sceneGraphs = cache.obj
visual_dataset = Dataset(args, logger, sceneGraphs, 0).get_agent()
logger(f'SceneGraphs size: {len(visual_dataset)}')
return visual_dataset
def split_visual_dataset(dataset_dir, visual_dataset, config, logger):
visual_split_config = load(
os.path.join(dataset_dir, config.visual_split_dir))
visual_splits = visual_dataset.resplit(visual_split_config)
return visual_splits
def print_args(args, logger):
logger.showtime()
logger.split_line()
logger('Printing Arguments')
logger(args.str)
logger.split_line()
logger.line_break()
def load_training_visual_dataset(args, filename, logger, index):
# filename = os.path.join(path, dataset, 'sceneGraphs.pkl')
sceneGraphs = load(filename)
logger(f'Loaded sceneGraphs from: {filename}')
logger(f'SceneGraphs size: {len(sceneGraphs)}', resume=True)
visual_dataset = Dataset(args, logger, sceneGraphs, index).get_agent()
return visual_dataset
def load_model(args, tools, device, logger):
if args.model in ('VCML', 'NSCL', 'BERTvariant'):
from models.model.vcml_model import VCML_Model
model = VCML_Model(args, tools, device, logger)
elif args.model == 'BERT':
from models.model.bert_model import BERTModel
model = BERTModel(args, tools, device, logger)
elif args.model.startswith('GRU'):
use_vision = args.model == 'GRUCNN'
use_pretrained = args.pretrained_embedding
finetune = args.finetune_embedding
assert not (args.force_off_lm and args.force_on_lm), \
'force-on / off can not be both true'
use_lm = (not use_pretrained and not args.force_off_lm) or \
args.force_on_lm
from models.model.gru_model import GRUModel
model = GRUModel(args, tools, device, logger,
use_vision=use_vision,
fix_vision=args.fix_resnet,
use_pretrained=use_pretrained,
finetune=finetune,
use_lm=use_lm)
'''
elif args.model == 'MAC':
from models.model.mac_model import MAC_agent
model = MAC_agent(args, tools, device)
'''
return model
def load_for_schedule(schedule, visual_dataset, tools):
for stage in schedule:
for dataset in stage['question_splits'].values():
dataset.load_parts(visual_dataset, tools)
def questions_directly(path, args, logger=None):
if logger is not None:
logger(f'Loading questions from {path}')
suite = {split: QDataset(load(os.path.join(
path, f'{split}_questions.json'))['questions'],
args).get_agent()
for split in ['train', 'test', 'val']}
schedule = [
{
'length': args.epochs,
'question_splits': suite,
'test_concepts': None,
}
]
if logger is not None:
for split in ('train', 'val', 'test'):
logger(f'{split} questions size = {len(suite[split])}',
resume=True)
return schedule
def load_ref_dataset(args, logger, index, process_fn):
filename = os.path.join(args.ref_scene_json)
logger(f'Loading referential-expression dataset from {filename}')
sceneGraphs = load(filename)
processed = process_fn(sceneGraphs, '', logger, args)
visual_dataset = Dataset(args, logger, processed, index,
image_dir=args.ref_image_dir
).get_agent()
logger(f'SceneGraphs size: {len(visual_dataset)}', resume=True)
return visual_dataset
def get_parser(args, device, logger, index, is_main):
class fixed_opt:
def __init__(self, **kwarg):
self.__dict__.update(kwarg)
if args.task in ['CLEVR', 'GQA']:
ckpt_name_dir = args.task + '_reason'
elif 'meronym' in args.name:
ckpt_name_dir = 'CUB_meronym_reason'
else:
ckpt_name_dir = 'CUB_hypernym_reason'
ckpt_name = ckpt_name_dir + '.tgz'
temp_dir = os.path.join(args.temp_dir, 'vcml_reason', str(index))
make_dir(temp_dir)
ckpt_link = os.path.join(args.webpage, 'ckpt', ckpt_name)
ckpt_file = os.path.join(temp_dir, ckpt_name)
ckpt_dir = os.path.join(temp_dir, ckpt_name_dir)
logger(f'Loading question parser from {ckpt_link}')
os.system(f'rm -f {ckpt_file}')
if is_main:
os.system(f'wget {ckpt_link} -P {temp_dir}')
else:
os.system(f'wget -q {ckpt_link} -P {temp_dir}')
os.system(f'mkdir {ckpt_dir} && tar xf {ckpt_file} -C {ckpt_dir}')
opt = fixed_opt(
load_checkpoint_path=os.path.join(
temp_dir, ckpt_name_dir, 'checkpoint.pt'),
gpu_ids=[0],
fix_embedding=False
)
with logger.levelup():
tools = register.init_word2index(logger)
tools.load(ckpt_dir)
tools.operations.register_special()
parser = Seq2seqParser(opt, tools, device)
os.system(f'rm -r {ckpt_dir} {ckpt_file}')
return parser
| 33.854749
| 74
| 0.64769
| 93
| 0.015347
| 0
| 0
| 0
| 0
| 0
| 0
| 1,417
| 0.233828
|
363f6b85601d80ec792d9609a878c76ff8a2a456
| 14,280
|
py
|
Python
|
burst_paper/all_ds/plot_allband_ds.py
|
jackievilladsen/dynspec
|
87101b188d7891644d848e781bca00f044fe3f0b
|
[
"MIT"
] | 2
|
2019-05-01T00:34:28.000Z
|
2021-02-10T09:18:10.000Z
|
burst_paper/all_ds/plot_allband_ds.py
|
jackievilladsen/dynspec
|
87101b188d7891644d848e781bca00f044fe3f0b
|
[
"MIT"
] | null | null | null |
burst_paper/all_ds/plot_allband_ds.py
|
jackievilladsen/dynspec
|
87101b188d7891644d848e781bca00f044fe3f0b
|
[
"MIT"
] | null | null | null |
'''
plot_allband_ds.py - Load P,L,S band dynamic spectrum for a given epoch, bin to specified resolution, and plot to file
'''
import dynspec.plot
reload(dynspec.plot)
from dynspec import load_dict
from dynspec.plot import *
from pylab import *
import os, subprocess
import matplotlib.gridspec as gridspec
'''
def get_obsname(obsfile):
# take a file directory such as '/data/jrv/15A-416/YZCMi/1' and
# convert to obs name such as '15A-416_YZCMi_1' and srcname 'YZCMi'
names = obsfile.split('/')
srcname = names[4]
obsname = names[3]+'_'+names[4]+'_'+names[5]
return obsname,srcname
'''
def get_obsfile(obsname):
# take an obs name such as '15A-416_YZCMi_1' and return srcname ('YZCMi')
# and file directory ('/data/jrv/15A-416/YZCMi/1')
names = obsname.split('_')
srcname = names[1]
obsfile = '/data/jrv/'+names[0]+'/'+names[1]+'/'+names[2]
return obsfile, srcname
params = {'legend.fontsize': 'small',
'axes.titlesize': 'small',
'axes.labelsize': 'small',
'xtick.labelsize': 'x-small',
'ytick.labelsize': 'x-small',
'image.interpolation': 'nearest'}
rcParams.update(params)
loadfile = '/data/jrv/burst_paper/all_burst_epoch_dynspec_LSband.npy'
ds_list = load_dict(loadfile)
loadfileP = '/data/jrv/burst_paper/all_burst_epoch_dynspec_Pband.npy'
dsP_list = load_dict(loadfileP)
ds_dir = '/data/jrv/burst_paper/ds/all_burst_dynspec/' # where to save ds plots
if not os.path.exists(ds_dir):
os.system('mkdir '+ds_dir)
close('all')
# note: throughout, "LS" can also include C band, I initially wrote this code for 2015 data (which only has LS band)
# but it works for the 2013 data with LSC band
# params that can be changed are listed in default_fig_params
default_fig_params = {
'tint_P': 300,
'tint_LS': 60,
'df_MHz_P': 16,
'df_MHz_LS': 16,
'smax_P': None,
'smax_LS': None,
'pixflag_sigfacP': 7.,
'pixflag_sigfacLS': 10.,
'chanflag_sigfacP': 3.,
'chanflag_sigfacLS': 7.,
'colorscale_P':'linear',
'colorscale_LS':'linear',
'maskpartial_P':0.5,
'maskpartial_LS':0.5,
'linthresh_P':None,
'linthresh_LS':None}
fig_params_dict = {
'13A-423_UVCet_1':{'tint_LS':60,'df_MHz_LS':32,'smax_LS':None,'colorscale_LS':'symlog','pixflag_sigfacLS':100,'maskpartial_LS':1.0},
'13A-423_UVCet_2':{'tint_LS':60,'df_MHz_LS':32,'smax_LS':0.015,'maskpartial_LS':0.55},
'13A-423_UVCet_2_b':{'tint_LS':300,'df_MHz_LS':64,'smax_LS':0.008,'linthresh_LS':0.002,'maskpartial_LS':0.55,'colorscale_LS':'symlog'},
'15A-416_ADLeo_3':{'smax_LS':0.03,'smax_P':0.02},
'15A-416_ADLeo_4':{'smax_LS':0.045,'smax_P':0.02,'pixflag_sigfacLS':50.},
'15A-416_ADLeo_5':{'tint_LS':120,'df_MHz_LS':32,'tint_P':150,'df_MHz_P':8},
'15A-416_EQPeg_2':{'tint_LS':120,'df_MHz_LS':32,'tint_P':180,'df_MHz_P':8,'chanflag_sigfacP':2.5,'maskpartial_P':0.9,'pixflag_sigfacP':5.,'smax_P':0.1,'maskpartial_LS':0.7},
'15A-416_UVCet_1':{'df_MHz_LS':32},
'15A-416_UVCet_2':{'tint_P':150,'smax_P':0.05},
'15A-416_UVCet_3':{'tint_P':180,'df_MHz_P':16,'smax_P':0.05},
'15A-416_UVCet_4':{'colorscale_LS':'symlog','smax_LS':0.1,'df_MHz_LS':16,'maskpartial_LS':0.9,'linthresh_LS':0.012,'tint_P':180,'smax_P':0.05},
'15A-416_UVCet_5':{'smax_P':0.04,'maskpartial_P':0.7,'maskpartial_LS':0.9},
'15A-416_YZCMi_1':{'smax_P':0.05,'maskpartial_P':0.7,'maskpartial_LS':0.8,'tint_LS':150,'df_MHz_LS':32,'colorscale_LS':'symlog','smax_LS':0.05,'linthresh_LS':0.0075,'chanflag_sigfacLS':4.},
'15A-416_YZCMi_2':{'smax_P':0.05,'tint_LS':120,'df_MHz_LS':32,'smax_LS':0.015}
}
### PLOT INDIVIDUAL OBSERVATIONS ###
obs_list = fig_params_dict.keys()
#obs_list = ['15A-416_EQPeg_2'] # so I can work on just this event
fig_max_width=6.5
fig_max_height=8.25
for obsname in obs_list:
for func in [real,imag]:
# load dynamic spectra for this observation
print '\n-----', obsname, '-----'
obsfile,srcname = get_obsfile(obsname)
ds = ds_list[obsfile]
dsP = dsP_list.get(obsfile,None)
# load custom parameters for plotting this epoch (binning, RFI flagging, color scale)
fig_params = deepcopy(default_fig_params)
fp_dict_temp = fig_params_dict.get(obsname,{})
for k in fp_dict_temp:
fig_params[k] = fp_dict_temp[k]
# Duration of observation relative to 3h40m (max duration of any) - scale x-axis by this
# so they are all on the same time scale
duration = ds.get_tlist()[-1]*ds.dt()
print 'Duration:',duration,'sec'
frac_duration = duration/(3*3600+40*60)
print 'Fractional duration compared to 3h40m:', frac_duration
# Bandwidth of >1 GHz data relative to 3 GHz (default for 2015) - scale y-axis of >1 GHz dynspec by this
BW_LSC = max(ds.f)-min(ds.f)
frac_BW = BW_LSC/3.e9
print 'Fractional bandwidth of >1 GHz data compared to 3 GHz:',frac_BW
# bin LS band dynamic spectrum to desired resolution
# mask RFI pix and chans before binning, pix after binning
ds.mask_RFI_pixels(rmsfac=fig_params['pixflag_sigfacLS'],func=imag)
ds.mask_RFI(rmsfac=fig_params['chanflag_sigfacLS'])
nt = int(round(fig_params['tint_LS']/ds.dt())) # number of integrations to bin together
nf = int(round(fig_params['df_MHz_LS']/(ds.df()/1e6))) # number of channels to bin together
ds = ds.bin_dynspec(nt=nt,nf=nf,mask_partial=fig_params['maskpartial_LS'])
ds.mask_RFI_pixels(rmsfac=fig_params['pixflag_sigfacLS'],func=imag)
if dsP:
dsP.mask_RFI_pixels(rmsfac=fig_params['pixflag_sigfacP'])
dsP.mask_RFI(rmsfac=fig_params['chanflag_sigfacP'])
# bin P band dynamic spectrum to desired resolution
nt = int(round(fig_params['tint_P']/dsP.dt())) # number of integrations to bin together
nf = int(round(fig_params['df_MHz_P']/(dsP.df()/1e6))) # number of channels to bin together
dsP = dsP.bin_dynspec(nt=nt,nf=nf,mask_partial=fig_params['maskpartial_P'])
dsP.mask_RFI(rmsfac=fig_params['chanflag_sigfacP'])
# calculate horizontal positions of subplots in units from 0 to 1
# (0 is left edge)
dsplot_w = 3.2 * frac_duration # width of dynamic spectrum in inches
gap_l = 0.55 # width of x-axis blank space (left) in inches
gap_c = 0.15 # width of x-axis blank space (center) in inches
gap_cbar = 0.45 # width of blank space between V plot & cbar in inches
gap_r = 0.57 # width of x-axis blank space (right) in inches
cbar_w = 0.13 # width of colorbar in inches
tot_w = 2*dsplot_w + cbar_w + gap_l + gap_c + gap_cbar + gap_r # total width in inches
#if obs == '13A-423_UVCet_2':
# tot_w += gap_c + dsplot_w + gap_cbar + gap_r
print 'Total width of figure in inches:', tot_w, '(goal: <=8.25)'
x1 = gap_l/tot_w # left edge of Stokes I dynspec
x2 = x1 + dsplot_w/tot_w # right edge of Stokes I dynspec
x3 = x2 + gap_c/tot_w # left edge of Stokes V dynspec
x4 = x3 + dsplot_w/tot_w # right edge of Stokes V dynspec
x5 = x4 + gap_cbar/tot_w # left edge of colorbar
x6 = x5+cbar_w/tot_w # right edge of colorbar
#if obs == '13A-423_UVCet_2':
# x7 = x6 + (gap_r+gap_c)/tot_w # left edge of second Stokes V dynspec
# x8 = x
# calculate vertical positions of subplots in units from 0 to 1
# (0 is bottom edge)
dsLS_h = 3.2 * frac_BW # height of LS band dynspec in inches
dsP_h = 0.9 # height of P band dynspec in inches
gap_t = 0.43 # height of y-axis blank space at top (includes titles) in inches
gap_rows = 0.5 # heights of each gap between rows of dynspecs in inches
gap_b = 0.36 # height of y-axis blank space at bottom in inches
if dsP:
tot_h = dsLS_h + 2*dsP_h + gap_t + 2*gap_rows + gap_b # total height in inches
else:
tot_h = gap_t + dsLS_h + gap_b # total height in inches if no P band data
print 'Total height of figure in inches:', tot_h, '(goal: <=6.8)'
y1 = 1-(gap_t/tot_h) # top edge of LS band dynspec
y2 = y1 - dsLS_h/tot_h # bottom edge of LS band dynspec
y3 = y2 - gap_rows/tot_h # top edge of P band I,V dynspecs
y4 = y3 - dsP_h/tot_h # bottom edge of P band I,V dynspecs
y5 = y4 - gap_rows/tot_h # top edge of P band U dynspec
y6 = y5 - dsP_h/tot_h # bottom edge of P band U dynspec
cbarP_h = (2*dsP_h + gap_rows)/tot_h
# create figure
close('all')
figname = ds_dir+obsname+'.pdf'
if func == imag:
figname = ds_dir+obsname+'_imag.pdf'
fig=figure(figsize=(tot_w,tot_h))
# First row of plots: Stokes I LS, Stokes V LS, colorbar LS
# Format for axes command is axes([x_left, y_bottom, width, height])
# First row: y_bottom is y2, x_left is x1, x3, x5
# set flux limits for LS band
smax = fig_params['smax_LS']
if smax is None:
smax = max(percentile(real(ds.spec['i']),99)*1.1,median(real(ds.spec['i']))*2)
smin = -smax # make colorbar symmetric about zero
# set axis ratio to 'auto' in order to fill specified subplot areas
# IMPORTANT: must not include 'cbar' and 'cbar_label' in axis_labels
ar0 = 'auto'
# plot Stokes I real, LS band
ax = axes([x1,y2,dsplot_w/tot_w,dsLS_h/tot_h])
#ax.set_autoscale_on(False)
pp = {'pol':'i','smin':smin,'smax':smax,'trim_mask':False,'axis_labels':[],'ar0':ar0,'dy':0.5,'scale':fig_params['colorscale_LS'],'func':func}
if fig_params['linthresh_LS']:
pp['linthresh']=fig_params['linthresh_LS']
plt,cbar_ticks,cbar_ticklbls = ds.plot_dynspec(plot_params=pp)
#gca().xaxis.set_visible(False)
#gca().yaxis.set_label_coords(-0.2,0)
if dsP:
title('Stokes I, 1-4 GHz')
else:
title('Stokes I')
fig.text(0.01,0.5,'Frequency (GHz)',va='center',rotation='vertical',fontsize='small')
# plot Stokes V real, LS band
ax=axes([x3,y2,dsplot_w/tot_w,dsLS_h/tot_h])
pp = {'pol':'v','smin':smin,'smax':smax,'trim_mask':False,'axis_labels':['xlabel'],'ar0':ar0,'dy':0.5,'scale':fig_params['colorscale_LS'],'func':func}
if fig_params['linthresh_LS']:
pp['linthresh']=fig_params['linthresh_LS']
ds.plot_dynspec(plot_params=pp)
gca().yaxis.tick_right()
xlabel_text = ax.xaxis.get_label_text()
ax.set_xlabel('')
#gca().xaxis.set_visible(False)
if dsP:
title('Stokes V, 1-4 GHz')
else:
title('Stokes V')
# plot LS band colorbar
ax = axes([x5,y2,cbar_w/tot_w,dsLS_h/tot_h])
cbar=colorbar(plt,cax=ax)
cbar.set_ticks(cbar_ticks)
cbar.set_ticklabels(cbar_ticklbls)
ax = cbar.ax
if dsP:
cbar_label = '1-4 Flux Density (mJy)'
ycbar = 0.75
else:
cbar_label = 'Flux Density (mJy)'
ycbar=0.65
if obsname=='15A-416_UVCet_1':
ycbar=0.98
ax.text(4.2,ycbar,cbar_label,rotation=90,fontsize='small')
if dsP:
# Second row of plots: Stokes I P, apparent Stokes V P
# Format for axes command is axes([x_left, y_bottom, width, height])
# Second row: y_bottom is y4, x_left is x1, x3
# set flux limits for P band
smaxP = fig_params['smax_P']
if smaxP is None:
smaxP = dsP.get_rms('v')*6.
sminP = -smaxP
# plot Stokes I real, P band
ax = axes([x1,y4,dsplot_w/tot_w,dsP_h/tot_h])
pp = {'pol':'i','smin':sminP,'smax':smaxP,'trim_mask':False,'axis_labels':[],'dy':0.05,'ar0':ar0,'scale':fig_params['colorscale_P'],'func':func}
if fig_params['linthresh_P']:
pp['linthresh']=fig_params['linthresh_P']
dsP.plot_dynspec(plot_params=pp)
title('Stokes I, 0.2-0.5 GHz')
# plot Stokes V real, P band
ax = axes([x3,y4,dsplot_w/tot_w,dsP_h/tot_h])
pp = {'pol':'v','smin':sminP,'smax':smaxP,'trim_mask':False,'axis_labels':[],'dy':0.05,'ar0':ar0,'scale':fig_params['colorscale_P'],'func':func}
if fig_params['linthresh_P']:
pp['linthresh']=fig_params['linthresh_P']
plt,cbar_ticks,cbar_ticklbls=dsP.plot_dynspec(plot_params=pp)
gca().yaxis.tick_right()
title('Stokes V\', 0.2-0.5 GHz')
# Third row of plots: [empty], apparent Stokes U P, P band colorbar (extra height)
# Format for axes command is axes([x_left, y_bottom, width, height])
# Third row: y_bottom is y6
# x_left is x3 (Stokes U), x5 (colorbar)
# height is dsP_h (Stokes U), 2*dsP_h+gap_rows (colorbar)
# plot Stokes U real, P band
ax = axes([x3,y6,dsplot_w/tot_w,dsP_h/tot_h])
pp = {'pol':'u','smin':sminP,'smax':smaxP,'trim_mask':False,'axis_labels':[],'dy':0.05,'ar0':ar0,'scale':fig_params['colorscale_P'],'func':func}
if fig_params['linthresh_P']:
pp['linthresh']=fig_params['linthresh_P']
dsP.plot_dynspec(plot_params=pp)
gca().yaxis.tick_right()
title('Stokes U\', 0.2-0.5 GHz')
# plot P band colorbar
ax = axes([x5,y6,cbar_w/tot_w,cbarP_h])
cbar=colorbar(plt,cax=ax)
cbar.set_ticks(cbar_ticks)
cbar.set_ticklabels(cbar_ticklbls)
ax = cbar.ax
ax.text(4.2,0.9,'0.2-0.5 GHz Flux Density (mJy)',rotation=90,fontsize='small')
fig.text(0.5,0.01,xlabel_text,ha='center',fontsize='small')
date = ds.t0().split()[0]
fig_title = srcname[0:2]+' '+srcname[2:5]+' - '+date
if func == imag:
fig_title += ' - Imag(vis)'
suptitle(fig_title,y=0.99,fontsize='medium')
savefig(figname)
| 45.769231
| 193
| 0.614566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,054
| 0.493978
|
363f98c059fbed994ba92f98a94c9d889c901242
| 2,518
|
py
|
Python
|
src/utils.py
|
jungtaekkim/On-Uncertainty-Estimation-by-Tree-based-Surrogate-Models-in-SMO
|
de195a391f1f9bfc4428dadda9400850408e88ca
|
[
"MIT"
] | null | null | null |
src/utils.py
|
jungtaekkim/On-Uncertainty-Estimation-by-Tree-based-Surrogate-Models-in-SMO
|
de195a391f1f9bfc4428dadda9400850408e88ca
|
[
"MIT"
] | null | null | null |
src/utils.py
|
jungtaekkim/On-Uncertainty-Estimation-by-Tree-based-Surrogate-Models-in-SMO
|
de195a391f1f9bfc4428dadda9400850408e88ca
|
[
"MIT"
] | null | null | null |
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
def plot_1d(X_train, Y_train, X_test, Y_test, mean=None, std=None, str_figure=None, show_fig=True):
plt.rc('text', usetex=True)
fig = plt.figure(figsize=(8, 6))
ax = fig.gca()
ax.plot(X_test, Y_test, linewidth=4)
if mean is not None:
line, = ax.plot(X_test, mean, linewidth=4)
if mean is not None and std is not None:
ax.fill_between(X_test.flatten(), mean - 1.96 * std, mean + 1.96 * std, alpha=0.25, color=line.get_color())
ax.plot(X_train, Y_train, 'x', linestyle='none', markersize=10, mew=4)
ax.set_xlabel('$x$', fontsize=32)
ax.set_ylabel('$y$', fontsize=32)
ax.tick_params(labelsize=24)
ax.set_xlim([np.min(X_test), np.max(X_test)])
ax.grid()
plt.tight_layout()
if str_figure is not None:
path_figures = '../figures'
if not os.path.exists(path_figures):
os.mkdir(path_figures)
plt.savefig(
os.path.join(path_figures, str_figure + '.pdf'),
format='pdf',
transparent=True
)
if show_fig:
plt.show()
plt.close('all')
def get_parser():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-f', '--function', type=str)
args = parser.parse_args()
return parser, args
def compute_nll(preds_mu, preds_sigma, X_test, Y_test, X_train):
assert len(preds_mu.shape) == len(preds_sigma.shape) == len(X_test.shape) == len(Y_test.shape) == len(X_train.shape) == 1
assert preds_mu.shape[0] == preds_sigma.shape[0] == X_test.shape[0] == Y_test.shape[0]
nll = 0.0
for mu, sigma, x, y in zip(preds_mu, preds_sigma, X_test, Y_test):
if np.any(np.abs(X_train - x) < 0.025):
continue
log_pdf = norm.logpdf(y, loc=mu, scale=sigma)
nll -= log_pdf
nll /= preds_mu.shape[0]
return nll
def compute_kl(preds_mu, preds_sigma, mean_gp, std_gp):
assert len(preds_mu.shape) == len(preds_sigma.shape) == len(mean_gp.shape) == len(std_gp.shape) == 1
assert preds_mu.shape[0] == preds_sigma.shape[0] == mean_gp.shape[0] == std_gp.shape[0]
kl = 0.0
for mu, sigma, mu_gp, sigma_gp in zip(preds_mu, preds_sigma, mean_gp, std_gp):
cur_kl = np.log(sigma_gp / (sigma + 1e-7)) + (sigma**2 + (mu - mu_gp)**2) / (2 * sigma_gp**2) - 1 / 2
kl = cur_kl
kl /= preds_mu.shape[0]
return kl
if __name__ == '__main__':
pass
| 27.67033
| 125
| 0.623511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 81
| 0.032168
|
363fef05c1d19fcf588faad011da861494aa03e5
| 1,191
|
py
|
Python
|
cocos-example.py
|
halflings/terrasim
|
a51c0e7cb28d3a3ec0d9c687d58c1c753d956c2d
|
[
"Apache-2.0"
] | null | null | null |
cocos-example.py
|
halflings/terrasim
|
a51c0e7cb28d3a3ec0d9c687d58c1c753d956c2d
|
[
"Apache-2.0"
] | null | null | null |
cocos-example.py
|
halflings/terrasim
|
a51c0e7cb28d3a3ec0d9c687d58c1c753d956c2d
|
[
"Apache-2.0"
] | null | null | null |
import random
import cocos
from cocos.tiles import TileSet, RectCell, RectMapLayer
from cocos.director import director
from cocos.layer.scrolling import ScrollingManager
import pyglet
from game import Game
from views import WorldMap, CharacterView2
class MainLayer(cocos.layer.Layer):
is_event_handler = True
def __init__(self):
super(MainLayer, self).__init__()
# World/map management
self.seed = random.Random()
self.game = Game(seed=self.seed, world_width=30, world_height=15)
# Children
scroller = ScrollingManager()
scroller.add(WorldMap(self.game.world))
for character in self.game.characters:
scroller.add(CharacterView2(character))
self.add(scroller)
self.schedule(self.update)
def update(self, dt):
self.game.update(dt)
def on_key_press(self, symbol, modifiers):
print("Pressed " + str(symbol))
if __name__ == '__main__':
director.init(width=800, height=600, resizable=False, autoscale=False)
director.set_show_FPS(True)
main_layer = MainLayer()
main_scene = cocos.scene.Scene(main_layer)
director.run(main_scene)
| 26.466667
| 74
| 0.687657
| 691
| 0.580185
| 0
| 0
| 0
| 0
| 0
| 0
| 52
| 0.043661
|
3641d06d971b0ebba597cba4a1a138c64156e641
| 3,532
|
py
|
Python
|
view.py
|
ykmoon04/2021-2-OSSP1-Smith-3
|
66d86e01444b822414a254d0944657ca4ce7dc22
|
[
"Apache-2.0"
] | 1
|
2021-10-31T13:01:08.000Z
|
2021-10-31T13:01:08.000Z
|
view.py
|
ykmoon04/2021-2-OSSP1-Smith-3
|
66d86e01444b822414a254d0944657ca4ce7dc22
|
[
"Apache-2.0"
] | null | null | null |
view.py
|
ykmoon04/2021-2-OSSP1-Smith-3
|
66d86e01444b822414a254d0944657ca4ce7dc22
|
[
"Apache-2.0"
] | 4
|
2021-11-04T09:03:37.000Z
|
2021-12-28T06:28:15.000Z
|
from itertools import takewhile
from eunjeon import Mecab
import queue
from jamo import h2j, j2hcj
import numpy as np
import re
import json
import sys
from pkg_resources import VersionConflict
global script_table
global voice_table
global s_idx
global v_idx
script_table = []
voice_table = []
def main(ans,speak):
global script_table
global voice_table
ans = remove_marks(ans)
speak = remove_marks(speak)
mecab = Mecab()
script_table = [] # 정답 예문
voice_table = [] # speech recognition으로 받은 문장
falseWord = {}
totalcount = len(ans.replace(" ","")) # 총 글자 수
falsecount = 0 # 틀린 글자 수
percent = 0.00
# 형태소 분석
script_table = mecab.morphs(ans)
voice_table = mecab.morphs(speak)
# 형태소 분석 결과 비교 위한 형식 맞추기
reconstruct()
# 각 테이블 비교해 틀린 부분 추출
for voice, script in zip(voice_table,script_table):
if voice != script:
tmp = []
for v,s in zip(voice, script):
if v!=s:
tmp.append(v)
falsecount += 1
falseWord[voice] = tmp
# 말하다 만 경우 예문의 나머지 부분 false count
if len(voice_table) < len(script_table):
for script in script_table[len(voice_table):]:
falsecount += len(script)
# 정확도 계산
percent = round((totalcount - falsecount)/totalcount * 100,2)
data = { # Json으로 넘길 data 생성
'script_table': script_table, # 예문 형태소 분석 결과
'voice_table': voice_table, # 사용자가 말한 문장 형태소 분석 결과
'false':falseWord, # 틀린 부분 "틀린 형태소": "틀린 글자"
'percent' : percent # 정확도
}
print(json.dumps(data))
# script_table과 형식 맞추기
def reconstruct():
global s_idx
global v_idx
s_idx = 0 # script_table 인덱스
v_idx =0 # voice_table 인덱스
while needReconstruct():
# voice가 더 쪼개짐 ex) script[idx]= '해외여행' voice[idx] = '해외'
if len(script_table[s_idx])>len(voice_table[v_idx]):
diff = len(script_table[s_idx])-len(voice_table[v_idx])
while diff>0:
if len(voice_table[v_idx+1]) >= diff:
voice_table[v_idx] = voice_table[v_idx]+voice_table[v_idx+1][0:diff]
voice_table[v_idx+1] = voice_table[v_idx+1][diff:]
if(voice_table[v_idx+1]==''):
del voice_table[v_idx+1]
v_idx+=1
diff = 0
else:
voice_table[v_idx] += voice_table[v_idx+1][0:]
diff -= len(voice_table[v_idx+1])
del voice_table[v_idx+1]
s_idx +=1
# voice가 덜 쪼개짐 ex) script[idx]= '해외' voice[idx] = '해외여행'
elif len(script_table[s_idx]) < len(voice_table[v_idx]):
voice_table.insert(v_idx+1,voice_table[v_idx][:len(script_table[s_idx])])
voice_table.insert(v_idx+2,voice_table[v_idx][len(script_table[s_idx]):])
del voice_table[v_idx]
s_idx+=1
v_idx+=1
def needReconstruct():
global s_idx
global v_idx
tmp = 0
for voice, script in zip(voice_table[v_idx:],script_table[s_idx:]):
if(len(voice)!=len(script)):
v_idx += tmp
s_idx += tmp
return True
tmp += 1;
return False
def remove_marks(string): # 특수문자(마침표 포함) 제거 함수
return re.sub('[.-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', string)
if __name__=="__main__":
main(sys.argv[1], sys.argv[2]) # argv[1]: 예문, argv[2]: 연습
| 29.433333
| 88
| 0.558607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 909
| 0.234158
|
364307863e32ccdc999357c039cf0832ac94b380
| 103
|
py
|
Python
|
rboard/board/__init__.py
|
joalon/rboard
|
cc743d8c08837c20bcc9382655e36bb79aecd524
|
[
"MIT"
] | null | null | null |
rboard/board/__init__.py
|
joalon/rboard
|
cc743d8c08837c20bcc9382655e36bb79aecd524
|
[
"MIT"
] | null | null | null |
rboard/board/__init__.py
|
joalon/rboard
|
cc743d8c08837c20bcc9382655e36bb79aecd524
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
blueprint = Blueprint('board', __name__)
from rboard.board import routes
| 17.166667
| 40
| 0.796117
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.067961
|
36446df7ecc8c55d638710c593c4957d62d9704f
| 615
|
py
|
Python
|
examples/second_node.py
|
csunny/kademlia
|
5513ff7851aa00601ebc7fd9eb610de4e2147f96
|
[
"MIT"
] | 1
|
2018-11-30T13:52:37.000Z
|
2018-11-30T13:52:37.000Z
|
examples/second_node.py
|
csunny/kademlia
|
5513ff7851aa00601ebc7fd9eb610de4e2147f96
|
[
"MIT"
] | null | null | null |
examples/second_node.py
|
csunny/kademlia
|
5513ff7851aa00601ebc7fd9eb610de4e2147f96
|
[
"MIT"
] | null | null | null |
import logging
import asyncio
import sys
from kademlia.network import Server
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
log = logging.getLogger('kademlia')
log.addHandler(handler)
log.setLevel(logging.DEBUG)
loop = asyncio.get_event_loop()
loop.set_debug(True)
server = Server()
server.listen(1234)
bootstrap_node = ('0.0.0.0', 8468)
loop.run_until_complete(server.bootstrap([bootstrap_node]))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.stop()
loop.close()
| 21.206897
| 85
| 0.749593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 73
| 0.118699
|
3645c7b92794db29663c1c763622e5f0554a803c
| 1,771
|
py
|
Python
|
src/commons/big_query/big_query_job_reference.py
|
Morgenz/bbq
|
f0fd3f626841c610aee80ad08a61123b7cccb775
|
[
"Apache-2.0"
] | 41
|
2018-05-08T11:54:37.000Z
|
2022-02-09T21:19:17.000Z
|
src/commons/big_query/big_query_job_reference.py
|
Morgenz/bbq
|
f0fd3f626841c610aee80ad08a61123b7cccb775
|
[
"Apache-2.0"
] | 139
|
2018-06-07T13:45:21.000Z
|
2021-04-30T20:44:06.000Z
|
src/commons/big_query/big_query_job_reference.py
|
Morgenz/bbq
|
f0fd3f626841c610aee80ad08a61123b7cccb775
|
[
"Apache-2.0"
] | 5
|
2019-09-11T12:28:24.000Z
|
2022-02-04T21:38:29.000Z
|
from src.commons.big_query.copy_job_async.result_check.result_check_request import \
ResultCheckRequest
from src.commons.big_query.copy_job_async.task_creator import TaskCreator
class BigQueryJobReference(object):
def __init__(self, project_id, job_id, location):
self.project_id = project_id
self.job_id = job_id
self.location = location
def __str__(self):
return "BigQueryJobReference(projectId:{}, job_id:{}, location: {})" \
.format(self.project_id, self.job_id, self.location)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return type(other) is BigQueryJobReference \
and self.project_id == other.project_id \
and self.job_id == other.job_id \
and self.location == other.location
def __ne__(self, other):
return not (self == other)
def create_post_copy_action(self, copy_job_request):
TaskCreator.create_copy_job_result_check(
ResultCheckRequest(
task_name_suffix=copy_job_request.task_name_suffix,
copy_job_type_id=copy_job_request.copy_job_type_id,
job_reference=self,
retry_count=copy_job_request.retry_count,
post_copy_action_request=copy_job_request.post_copy_action_request
)
)
def to_json(self):
return dict(project_id=self.project_id,
job_id=self.job_id,
location=self.location)
@classmethod
def from_json(cls, json):
return BigQueryJobReference(project_id=json["project_id"],
job_id=json["job_id"],
location=json["location"])
| 36.142857
| 84
| 0.631846
| 1,586
| 0.895539
| 0
| 0
| 231
| 0.130435
| 0
| 0
| 91
| 0.051383
|
36472112e71a6f099b1f967e54265e83e3ef22d7
| 2,068
|
py
|
Python
|
PyInstaller/hooks/hook-numpy.py
|
mathiascode/pyinstaller
|
eaad76a75a5cc7be90e445f974f4bf1731045496
|
[
"Apache-2.0"
] | 9,267
|
2015-01-01T04:08:45.000Z
|
2022-03-31T11:42:38.000Z
|
PyInstaller/hooks/hook-numpy.py
|
bwoodsend/pyinstaller
|
2a16bc2fe0a1234d0f89836d39b7877c74b3bca1
|
[
"Apache-2.0"
] | 5,150
|
2015-01-01T12:09:56.000Z
|
2022-03-31T18:06:12.000Z
|
PyInstaller/hooks/hook-numpy.py
|
bwoodsend/pyinstaller
|
2a16bc2fe0a1234d0f89836d39b7877c74b3bca1
|
[
"Apache-2.0"
] | 2,101
|
2015-01-03T10:25:27.000Z
|
2022-03-30T11:04:42.000Z
|
#!/usr/bin/env python3
# --- Copyright Disclaimer ---
#
# In order to support PyInstaller with numpy<1.20.0 this file will be duplicated for a short period inside
# PyInstaller's repository [1]. However this file is the intellectual property of the NumPy team and is
# under the terms and conditions outlined their repository [2].
#
# .. refs:
#
# [1] PyInstaller: https://github.com/pyinstaller/pyinstaller/
# [2] NumPy's license: https://github.com/numpy/numpy/blob/master/LICENSE.txt
#
"""
This hook should collect all binary files and any hidden modules that numpy needs.
Our (some-what inadequate) docs for writing PyInstaller hooks are kept here:
https://pyinstaller.readthedocs.io/en/stable/hooks.html
PyInstaller has a lot of NumPy users so we consider maintaining this hook a high priority.
Feel free to @mention either bwoodsend or Legorooj on Github for help keeping it working.
"""
from PyInstaller.compat import is_conda, is_pure_conda
from PyInstaller.utils.hooks import collect_dynamic_libs
# Collect all DLLs inside numpy's installation folder, dump them into built app's root.
binaries = collect_dynamic_libs("numpy", ".")
# If using Conda without any non-conda virtual environment manager:
if is_pure_conda:
# Assume running the NumPy from Conda-forge and collect it's DLLs from the communal Conda bin directory. DLLs from
# NumPy's dependencies must also be collected to capture MKL, OpenBlas, OpenMP, etc.
from PyInstaller.utils.hooks import conda_support
datas = conda_support.collect_dynamic_libs("numpy", dependencies=True)
# Submodules PyInstaller cannot detect (probably because they are only imported by extension modules, which PyInstaller
# cannot read).
hiddenimports = ['numpy.core._dtype_ctypes']
if is_conda:
hiddenimports.append("six")
# Remove testing and building code and packages that are referenced throughout NumPy but are not really dependencies.
excludedimports = [
"scipy",
"pytest",
"nose",
"distutils",
"f2py",
"setuptools",
"numpy.f2py",
"numpy.distutils",
]
| 38.296296
| 119
| 0.758704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,617
| 0.781915
|
36480cab3e7b7b34c639f6dcb640a7d9ee3f2cc1
| 4,480
|
py
|
Python
|
test_proj/blog/admin.py
|
Ivan-Feofanov/django-inline-actions
|
a9410a67e9932152d65a063bea0848c98f5c8d73
|
[
"BSD-3-Clause"
] | 204
|
2016-05-10T05:38:27.000Z
|
2022-03-25T11:22:28.000Z
|
test_proj/blog/admin.py
|
Ivan-Feofanov/django-inline-actions
|
a9410a67e9932152d65a063bea0848c98f5c8d73
|
[
"BSD-3-Clause"
] | 45
|
2016-07-18T15:39:48.000Z
|
2022-02-28T17:06:38.000Z
|
test_proj/blog/admin.py
|
Ivan-Feofanov/django-inline-actions
|
a9410a67e9932152d65a063bea0848c98f5c8d73
|
[
"BSD-3-Clause"
] | 40
|
2016-09-23T07:27:50.000Z
|
2022-03-22T09:44:10.000Z
|
from django.contrib import admin, messages
from django.shortcuts import render
from django.utils.translation import gettext_lazy as _
from inline_actions.actions import DefaultActionsMixin, ViewAction
from inline_actions.admin import InlineActionsMixin, InlineActionsModelAdminMixin
from . import forms
from .models import Article, Author, AuthorProxy
class UnPublishActionsMixin(object):
def get_inline_actions(self, request, obj=None):
actions = super(UnPublishActionsMixin, self).get_inline_actions(request, obj)
if obj:
if obj.status == Article.DRAFT:
actions.append('publish')
elif obj.status == Article.PUBLISHED:
actions.append('unpublish')
return actions
def publish(self, request, obj, parent_obj=None):
obj.status = Article.PUBLISHED
obj.save()
messages.info(request, _("Article published."))
publish.short_description = _("Publish") # type: ignore
def unpublish(self, request, obj, parent_obj=None):
obj.status = Article.DRAFT
obj.save()
messages.info(request, _("Article unpublished."))
unpublish.short_description = _("Unpublish") # type: ignore
class TogglePublishActionsMixin(object):
def get_inline_actions(self, request, obj=None):
actions = super(TogglePublishActionsMixin, self).get_inline_actions(
request=request, obj=obj
)
actions.append('toggle_publish')
return actions
def toggle_publish(self, request, obj, parent_obj=None):
if obj.status == Article.DRAFT:
obj.status = Article.PUBLISHED
else:
obj.status = Article.DRAFT
obj.save()
status = 'unpublished' if obj.status == Article.DRAFT else 'published'
messages.info(request, _("Article {}.".format(status)))
def get_toggle_publish_label(self, obj):
label = 'publish' if obj.status == Article.DRAFT else 'unpublish'
return 'Toggle {}'.format(label)
def get_toggle_publish_css(self, obj):
return 'button object-tools' if obj.status == Article.DRAFT else 'default'
class ChangeTitleActionsMixin(object):
def get_inline_actions(self, request, obj=None):
actions = super(ChangeTitleActionsMixin, self).get_inline_actions(request, obj)
actions.append('change_title')
return actions
def change_title(self, request, obj, parent_obj=None):
# explictly check whether the submit button has been pressed
if '_save' in request.POST:
form = forms.ChangeTitleForm(request.POST, instance=obj)
form.save()
return None # return back to list view
elif '_back' in request.POST:
return None # return back to list view
else:
form = forms.ChangeTitleForm(instance=obj)
return render(request, 'change_title.html', context={'form': form})
class ArticleInline(
DefaultActionsMixin,
UnPublishActionsMixin,
TogglePublishActionsMixin,
InlineActionsMixin,
admin.TabularInline,
):
model = Article
fields = (
'title',
'status',
)
readonly_fields = (
'title',
'status',
)
def has_add_permission(self, request, obj=None):
return False
class ArticleNoopInline(InlineActionsMixin, admin.TabularInline):
model = Article
fields = (
'title',
'status',
)
readonly_fields = (
'title',
'status',
)
def get_inline_actions(self, request, obj=None):
actions = super(ArticleNoopInline, self).get_inline_actions(
request=request, obj=obj
)
actions.append('noop_action')
return actions
def noop_action(self, request, obj, parent_obj=None):
pass
@admin.register(AuthorProxy)
class AuthorMultipleInlinesAdmin(InlineActionsModelAdminMixin, admin.ModelAdmin):
inlines = [ArticleInline, ArticleNoopInline]
list_display = ('name',)
inline_actions = None
@admin.register(Author)
class AuthorAdmin(InlineActionsModelAdminMixin, admin.ModelAdmin):
inlines = [ArticleInline]
list_display = ('name',)
inline_actions = None
@admin.register(Article)
class ArticleAdmin(
UnPublishActionsMixin,
TogglePublishActionsMixin,
ChangeTitleActionsMixin,
ViewAction,
InlineActionsModelAdminMixin,
admin.ModelAdmin,
):
list_display = ('title', 'status', 'author')
| 29.668874
| 87
| 0.667857
| 4,024
| 0.898214
| 0
| 0
| 644
| 0.14375
| 0
| 0
| 497
| 0.110938
|
3649038aeb95961f992580df722315d018924dd9
| 12,731
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/macInMACv42_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/macInMACv42_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/macInMACv42_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class MacInMACv42(Base):
__slots__ = ()
_SDM_NAME = 'macInMACv42'
_SDM_ATT_MAP = {
'HeaderBDstAddress': 'macInMACv42.header.bDstAddress-1',
'HeaderBSrcAddress': 'macInMACv42.header.bSrcAddress-2',
'BTAGEthertypeEthertypeValue': 'macInMACv42.header.bTAGEthertype.ethertypeValue-3',
'BTagPcp': 'macInMACv42.header.bTAGEthertype.bTag.pcp-4',
'BTagDei': 'macInMACv42.header.bTAGEthertype.bTag.dei-5',
'BTagVlanID': 'macInMACv42.header.bTAGEthertype.bTag.vlanID-6',
'ITAGEthertypeEthertypeValue': 'macInMACv42.header.iTAGEthertype.ethertypeValue-7',
'ITAGPcp': 'macInMACv42.header.iTAGEthertype.iTAG.pcp-8',
'ITAGDrop': 'macInMACv42.header.iTAGEthertype.iTAG.drop-9',
'ITAGFmt': 'macInMACv42.header.iTAGEthertype.iTAG.fmt-10',
'ITAGReserved': 'macInMACv42.header.iTAGEthertype.iTAG.reserved-11',
'ITAGISID': 'macInMACv42.header.iTAGEthertype.iTAG.iSID-12',
'HeaderCDstAddress': 'macInMACv42.header.cDstAddress-13',
'HeaderCSrcAddress': 'macInMACv42.header.cSrcAddress-14',
'STAGSTAGEthertype': 'macInMACv42.header.sTAGCTAG.tag.sTAG.sTAGEthertype-15',
'STAGPcp': 'macInMACv42.header.sTAGCTAG.tag.sTAG.sTAG.pcp-16',
'STAGDei': 'macInMACv42.header.sTAGCTAG.tag.sTAG.sTAG.dei-17',
'STAGVlanID': 'macInMACv42.header.sTAGCTAG.tag.sTAG.sTAG.vlanID-18',
'CTAGCTAGEthertype': 'macInMACv42.header.sTAGCTAG.tag.cTAG.cTAGEthertype-19',
'CTAGUserPriority': 'macInMACv42.header.sTAGCTAG.tag.cTAG.cTAG.userPriority-20',
'CTAGCfi': 'macInMACv42.header.sTAGCTAG.tag.cTAG.cTAG.cfi-21',
'CTAGVlanId': 'macInMACv42.header.sTAGCTAG.tag.cTAG.cTAG.vlanId-22',
'BothSTAGCTAGSTAGEthertype': 'macInMACv42.header.sTAGCTAG.tag.bothSTAGCTAG.sTAGEthertype-23',
'BothstagctagSTAGPcp': 'macInMACv42.header.sTAGCTAG.tag.bothSTAGCTAG.sTAG.pcp-24',
'BothstagctagSTAGDei': 'macInMACv42.header.sTAGCTAG.tag.bothSTAGCTAG.sTAG.dei-25',
'BothstagctagSTAGVlanID': 'macInMACv42.header.sTAGCTAG.tag.bothSTAGCTAG.sTAG.vlanID-26',
'BothSTAGCTAGCTAGEthertype': 'macInMACv42.header.sTAGCTAG.tag.bothSTAGCTAG.cTAGEthertype-27',
'BothstagctagCTAGUserPriority': 'macInMACv42.header.sTAGCTAG.tag.bothSTAGCTAG.cTAG.userPriority-28',
'BothstagctagCTAGCfi': 'macInMACv42.header.sTAGCTAG.tag.bothSTAGCTAG.cTAG.cfi-29',
'BothstagctagCTAGVlanId': 'macInMACv42.header.sTAGCTAG.tag.bothSTAGCTAG.cTAG.vlanId-30',
'TagNoSTAGCTAG': 'macInMACv42.header.sTAGCTAG.tag.noSTAGCTAG-31',
}
def __init__(self, parent, list_op=False):
super(MacInMACv42, self).__init__(parent, list_op)
@property
def HeaderBDstAddress(self):
"""
Display Name: B-Destination Address (Ethernet)
Default Value: 00:00:00:00:00:00
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderBDstAddress']))
@property
def HeaderBSrcAddress(self):
"""
Display Name: B-Source Address (Ethernet)
Default Value: 00:00:00:00:00:00
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderBSrcAddress']))
@property
def BTAGEthertypeEthertypeValue(self):
"""
Display Name: Ethertype value
Default Value: 0x88A8
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BTAGEthertypeEthertypeValue']))
@property
def BTagPcp(self):
"""
Display Name: B-TAG PCP
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BTagPcp']))
@property
def BTagDei(self):
"""
Display Name: B-TAG DEI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BTagDei']))
@property
def BTagVlanID(self):
"""
Display Name: B-TAG VLAN ID
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BTagVlanID']))
@property
def ITAGEthertypeEthertypeValue(self):
"""
Display Name: Ethertype value
Default Value: 0x88E7
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ITAGEthertypeEthertypeValue']))
@property
def ITAGPcp(self):
"""
Display Name: I-TAG PCP
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ITAGPcp']))
@property
def ITAGDrop(self):
"""
Display Name: I-TAG DEI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ITAGDrop']))
@property
def ITAGFmt(self):
"""
Display Name: FMT
Default Value: 0
Value Format: decimal
Available enum values: Payload Encapsulated Wi Fcs, 0, Payload Encapsulated Wo Fcs, 1, No Encapsulation, 2, Reserved, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ITAGFmt']))
@property
def ITAGReserved(self):
"""
Display Name: Reserved
Default Value: 0x0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ITAGReserved']))
@property
def ITAGISID(self):
"""
Display Name: I-SID
Default Value: 256
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ITAGISID']))
@property
def HeaderCDstAddress(self):
"""
Display Name: C-Destination Address (Ethernet)
Default Value: 00:00:00:00:00:00
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderCDstAddress']))
@property
def HeaderCSrcAddress(self):
"""
Display Name: C-Source Address (Ethernet)
Default Value: 00:00:00:00:00:00
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderCSrcAddress']))
@property
def STAGSTAGEthertype(self):
"""
Display Name: S-TAG Ethertype
Default Value: 0x88A8
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['STAGSTAGEthertype']))
@property
def STAGPcp(self):
"""
Display Name: S-TAG PCP
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['STAGPcp']))
@property
def STAGDei(self):
"""
Display Name: S-TAG DEI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['STAGDei']))
@property
def STAGVlanID(self):
"""
Display Name: S-TAG VLAN ID
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['STAGVlanID']))
@property
def CTAGCTAGEthertype(self):
"""
Display Name: C-TAG Ethertype
Default Value: 0x8100
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CTAGCTAGEthertype']))
@property
def CTAGUserPriority(self):
"""
Display Name: C-TAG User Priority
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CTAGUserPriority']))
@property
def CTAGCfi(self):
"""
Display Name: C-TAG CFI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CTAGCfi']))
@property
def CTAGVlanId(self):
"""
Display Name: C-TAG VLAN ID
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CTAGVlanId']))
@property
def BothSTAGCTAGSTAGEthertype(self):
"""
Display Name: S-TAG Ethertype
Default Value: 0x88A8
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BothSTAGCTAGSTAGEthertype']))
@property
def BothstagctagSTAGPcp(self):
"""
Display Name: S-TAG PCP
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BothstagctagSTAGPcp']))
@property
def BothstagctagSTAGDei(self):
"""
Display Name: S-TAG DEI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BothstagctagSTAGDei']))
@property
def BothstagctagSTAGVlanID(self):
"""
Display Name: S-TAG VLAN ID
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BothstagctagSTAGVlanID']))
@property
def BothSTAGCTAGCTAGEthertype(self):
"""
Display Name: C-TAG Ethertype
Default Value: 0x8100
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BothSTAGCTAGCTAGEthertype']))
@property
def BothstagctagCTAGUserPriority(self):
"""
Display Name: C-TAG User Priority
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BothstagctagCTAGUserPriority']))
@property
def BothstagctagCTAGCfi(self):
"""
Display Name: C-TAG CFI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BothstagctagCTAGCfi']))
@property
def BothstagctagCTAGVlanId(self):
"""
Display Name: C-TAG VLAN ID
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BothstagctagCTAGVlanId']))
@property
def TagNoSTAGCTAG(self):
"""
Display Name: No S-TAG/C-TAG
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TagNoSTAGCTAG']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 35.561453
| 127
| 0.657843
| 12,648
| 0.99348
| 0
| 0
| 9,681
| 0.760427
| 0
| 0
| 6,153
| 0.483308
|
36490aa7054830d00893922cc4300184b33b2ea9
| 1,037
|
py
|
Python
|
aries_cloudagent/commands/__init__.py
|
ldej/aries-cloudagent-python
|
25b7a9c08921e67b0962c434102489884ac403b2
|
[
"Apache-2.0"
] | 1
|
2021-01-15T01:04:43.000Z
|
2021-01-15T01:04:43.000Z
|
aries_cloudagent/commands/__init__.py
|
ldej/aries-cloudagent-python
|
25b7a9c08921e67b0962c434102489884ac403b2
|
[
"Apache-2.0"
] | 1
|
2020-03-06T12:11:29.000Z
|
2020-03-06T12:11:29.000Z
|
aries_cloudagent/commands/__init__.py
|
ldej/aries-cloudagent-python
|
25b7a9c08921e67b0962c434102489884ac403b2
|
[
"Apache-2.0"
] | 1
|
2021-01-15T08:45:02.000Z
|
2021-01-15T08:45:02.000Z
|
"""Commands module common setup."""
from importlib import import_module
from typing import Sequence
def available_commands():
"""Index available commands."""
return [
{"name": "help", "summary": "Print available commands"},
{"name": "provision", "summary": "Provision an agent"},
{"name": "start", "summary": "Start a new agent process"},
]
def load_command(command: str):
"""Load the module corresponding with a named command."""
module = None
module_path = None
for cmd in available_commands():
if cmd["name"] == command:
module = cmd["name"]
module_path = cmd.get("module")
break
if module and not module_path:
module_path = f"{__package__}.{module}"
if module_path:
return import_module(module_path)
def run_command(command: str, argv: Sequence[str] = None):
"""Execute a named command with command line arguments."""
module = load_command(command) or load_command("help")
module.execute(argv)
| 29.628571
| 66
| 0.636451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 374
| 0.360656
|
36493db41d822a42cd12a9cb95ab495245aeb761
| 3,646
|
py
|
Python
|
AI/Lab 2/astar.py
|
abikoraj/CSIT
|
68ba4944d2b6366a8d5b70b92bdc16b19b7e9208
|
[
"MIT"
] | 9
|
2021-11-29T00:56:41.000Z
|
2022-03-19T04:41:05.000Z
|
AI/Lab 2/astar.py
|
abikoraj/CSIT
|
68ba4944d2b6366a8d5b70b92bdc16b19b7e9208
|
[
"MIT"
] | null | null | null |
AI/Lab 2/astar.py
|
abikoraj/CSIT
|
68ba4944d2b6366a8d5b70b92bdc16b19b7e9208
|
[
"MIT"
] | 3
|
2021-11-29T06:30:33.000Z
|
2022-03-18T14:27:23.000Z
|
gScore = 0 #use this to index g(n)
fScore = 1 #use this to index f(n)
previous = 2 #use this to index previous node
inf = 10000 #use this for value of infinity
#we represent the graph usind adjacent list
#as dictionary of dictionaries
G = {
'biratnagar' : {'itahari' : 22, 'biratchowk' : 30, 'rangeli': 25},
'itahari' : {'biratnagar' : 22, 'dharan' : 20, 'biratchowk' : 11},
'dharan' : {'itahari' : 20},
'biratchowk' : {'biratnagar' : 30, 'itahari' : 11, 'kanepokhari' :10},
'rangeli' : {'biratnagar' : 25, 'kanepokhari' : 25, 'urlabari' : 40},
'kanepokhari' : {'rangeli' : 25, 'biratchowk' : 10, 'urlabari' : 12},
'urlabari' : {'rangeli' : 40, 'kanepokhari' : 12, 'damak' : 6},
'damak' : {'urlabari' : 6}
}
def h(city):
#returns straight line distance from a city to damak
h = {
'biratnagar' : 46,
'itahari' : 39,
'dharan' : 41,
'rangeli' : 28,
'biratchowk' : 29,
'kanepokhari' : 17,
'urlabari' : 6,
'damak' : 0
}
return h[city]
def getMinimum(unvisited):
#returns city with minimum f(n)
currDist = inf
leastFScoreCity = ''
for city in unvisited:
if unvisited[city][fScore] < currDist:
currDist = unvisited[city][fScore]
leastFScoreCity = city
return leastFScoreCity
def aStar(G, start, goal):
visited = {} #we declare visited list as empty dict
unvisited = {} #we declare unvisited list as empty dict
#we now add every city to the unvisited
for city in G.keys():
unvisited[city] = [inf, inf, ""]
hScore = h(start)
#for starting node, the g(n) is 0, so f(n) will be h(n)
unvisited[start] = [0, hScore, ""]
finished = False
while finished == False:
#if there are no nodes to evaluate in unvisited
if len(unvisited) == 0:
finished = True
else:
#find the node with lowest f(n) from open list
currentNode = getMinimum(unvisited)
if currentNode == goal:
finished = True
#copy data to visited list
visited[currentNode] = unvisited[currentNode]
else:
#we examine the neighbors of currentNode
for neighbor in G[currentNode]:
#we only check unvisited neighbors
if neighbor not in visited:
newGScore = unvisited[currentNode][gScore] + G[currentNode][neighbor]
if newGScore < unvisited[neighbor][gScore]:
unvisited[neighbor][gScore] = newGScore
unvisited[neighbor][fScore] = newGScore + h(neighbor)
unvisited[neighbor][previous] = currentNode
#we now add currentNode to the visited list
visited[currentNode] = unvisited[currentNode]
#we now remove the currentNode from unvisited
del unvisited[currentNode]
return visited
def findPath(visitSequence, goal):
answer = []
answer.append(goal)
currCity = goal
while visitSequence[currCity][previous] != '':
prevCity = visitSequence[currCity][previous]
answer.append(prevCity)
currCity = prevCity
return answer[::-1]
start = 'biratnagar'
goal = 'damak'
visitSequence = aStar(G, start, goal)
path = findPath(visitSequence, goal)
print(path)
| 33.145455
| 94
| 0.545804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,135
| 0.3113
|
3649d664027df60736783975e94228ac3542abe3
| 19,918
|
py
|
Python
|
plio/io/io_controlnetwork.py
|
jlaura/plio
|
980c92d88cc78d27729392c14b3113cfac4f89cd
|
[
"Unlicense"
] | 11
|
2018-02-01T02:56:26.000Z
|
2022-02-21T12:08:12.000Z
|
plio/io/io_controlnetwork.py
|
jlaura/plio
|
980c92d88cc78d27729392c14b3113cfac4f89cd
|
[
"Unlicense"
] | 151
|
2016-06-15T21:31:37.000Z
|
2021-11-15T16:55:53.000Z
|
plio/io/io_controlnetwork.py
|
jlaura/plio
|
980c92d88cc78d27729392c14b3113cfac4f89cd
|
[
"Unlicense"
] | 21
|
2016-06-17T17:02:39.000Z
|
2021-03-08T20:47:50.000Z
|
from enum import IntEnum
from time import gmtime, strftime
import warnings
import pandas as pd
import numpy as np
import pvl
import struct
from plio.io import ControlNetFileV0002_pb2 as cnf
from plio.io import ControlNetFileHeaderV0005_pb2 as cnh5
from plio.io import ControlPointFileEntryV0005_pb2 as cnp5
from plio.utils.utils import xstr, find_in_dict
HEADERSTARTBYTE = 65536
DEFAULTUSERNAME = 'None'
def write_filelist(lst, path="fromlist.lis"):
"""
Writes a filelist to a file so it can be used in ISIS3.
Parameters
----------
lst : list
A list containing full paths to the images used, as strings.
path : str
The name of the file to write out. Default: fromlist.lis
"""
handle = open(path, 'w')
for filename in lst:
handle.write(filename)
handle.write('\n')
return
class MeasureMessageType(IntEnum):
"""
An enum to mirror the ISIS3 MeasureLogData enum.
"""
GoodnessOfFit = 2
MinimumPixelZScore = 3
MaximumPixelZScore = 4
PixelShift = 5
WholePixelCorrelation = 6
SubPixelCorrelation = 7
class MeasureLog():
def __init__(self, messagetype, value):
"""
A protobuf compliant measure log object.
Parameters
----------
messagetype : int or str
Either the integer or string representation from the MeasureMessageType enum
value : int or float
The value to be stored in the message log
"""
if isinstance(messagetype, int):
# by value
self.messagetype = MeasureMessageType(messagetype)
else:
# by name
self.messagetype = MeasureMessageType[messagetype]
if not isinstance(value, (float, int)):
raise TypeError(f'{value} is not a numeric type')
self.value = value
def __repr__(self):
return f'{self.messagetype.name}: {self.value}'
def to_protobuf(self, version=2):
"""
Return protobuf compliant measure log object representation
of this class.
Returns
-------
log_message : obj
MeasureLogData object suitable to append to a MeasureLog
repeated field.
"""
# I do not see a better way to get to the inner MeasureLogData obj than this
# imports were not working because it looks like these need to instantiate off
# an object
if version == 2:
log_message = cnf.ControlPointFileEntryV0002().Measure().MeasureLogData()
elif version == 5:
log_message = cnp5.ControlPointFileEntryV0005().Measure().MeasureLogData()
log_message.doubleDataValue = self.value
log_message.doubleDataType = self.messagetype
return log_message
@classmethod
def from_protobuf(cls, protobuf):
return cls(protobuf.doubleDataType, protobuf.doubleDataValue)
class IsisControlNetwork(pd.DataFrame):
# normal properties
_metadata = ['header']
@property
def _constructor(self):
return IsisControlNetwork
def from_isis(path, remove_empty=True):
# Now get ready to work with the binary
with IsisStore(path, mode='rb') as store:
df = store.read()
return df
def to_isis(obj, path, mode='wb', version=2,
headerstartbyte=HEADERSTARTBYTE,
networkid='None', targetname='None',
description='None', username=DEFAULTUSERNAME,
creation_date=None, modified_date=None,
pointid_prefix=None, pointid_suffix=None):
if targetname == 'None':
warnings.warn("Users should provide a targetname to this function such as 'Moon' or 'Mars' in order to generate a valid ISIS control network.")
with IsisStore(path, mode) as store:
if not creation_date:
creation_date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
if not modified_date:
modified_date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
point_messages, point_sizes = store.create_points(obj, pointid_prefix, pointid_suffix)
points_bytes = sum(point_sizes)
buffer_header, buffer_header_size = store.create_buffer_header(networkid,
targetname,
description,
username,
point_sizes,
creation_date,
modified_date)
# Write the buffer header
store.write(buffer_header, HEADERSTARTBYTE)
# Then write the points, so we know where to start writing, + 1 to avoid overwrite
point_start_offset = HEADERSTARTBYTE + buffer_header_size
for i, point in enumerate(point_messages):
store.write(point, point_start_offset)
point_start_offset += point_sizes[i]
header = store.create_pvl_header(version, headerstartbyte, networkid,
targetname, description, username,
buffer_header_size, points_bytes,
creation_date, modified_date)
store.write(header.encode('utf-8'))
class IsisStore(object):
"""
Class to manage IO of an ISIS control network (version 2).
Attributes
----------
pointid : int
The current index to be assigned to newly added points
"""
point_field_map = {
'type' : 'pointType',
'chooserName' : 'pointChoosername',
'datetime' : 'pointDatetime',
'editLock' : 'pointEditLock',
'ignore' : 'pointIgnore',
'jigsawRejected' : 'pointJigsawRejected',
'log' : 'pointLog'
}
measure_field_map = {
'type' : 'measureType',
'choosername' : 'measureChoosername',
'datetime' : 'measureDatetime',
'editLock' : 'measureEditLock',
'ignore' : 'measureIgnore',
'jigsawRejected' : 'measureJigsawRejected',
'log' : 'measureLog'
}
def __init__(self, path, mode=None, **kwargs):
self.nmeasures = 0
self.npoints = 0
# Conversion from buffer types to Python types
bt = {1: float,
5: int,
8: bool,
9: str,
11: list,
14: int}
self.header_attrs = [(i.name, bt[i.type]) for i in cnf._CONTROLNETFILEHEADERV0002.fields]
self.point_attrs = [(i.name, bt[i.type]) for i in cnf._CONTROLPOINTFILEENTRYV0002.fields]
self.measure_attrs = [(i.name, bt[i.type]) for i in cnf._CONTROLPOINTFILEENTRYV0002_MEASURE.fields]
self._path = path
if not mode:
mode = 'a' # pragma: no cover
self._mode = mode
self._handle = None
self._open()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, traceback):
self.close()
def close(self):
if self._handle is not None:
self._handle.close()
self._handle = None
def _open(self):
self._handle = open(self._path, self._mode)
def read(self):
"""
Given an ISIS store, read the underlying ISIS3 compatible control network and
return an IsisControlNetwork dataframe.
"""
pvl_header = pvl.load(self._path, grammar=pvl.grammar.ISISGrammar())
header_start_byte = find_in_dict(pvl_header, 'HeaderStartByte')
header_bytes = find_in_dict(pvl_header, 'HeaderBytes')
point_start_byte = find_in_dict(pvl_header, 'PointsStartByte')
version = find_in_dict(pvl_header, 'Version')
if version == 2:
self.point_attrs = [i for i in cnf._CONTROLPOINTFILEENTRYV0002.fields_by_name if i != 'measures']
self.measure_attrs = [i for i in cnf._CONTROLPOINTFILEENTRYV0002_MEASURE.fields_by_name]
cp = cnf.ControlPointFileEntryV0002()
self._handle.seek(header_start_byte)
pbuf_header = cnf.ControlNetFileHeaderV0002()
pbuf_header.ParseFromString(self._handle.read(header_bytes))
self._handle.seek(point_start_byte)
cp = cnf.ControlPointFileEntryV0002()
pts = []
for s in pbuf_header.pointMessageSizes:
cp.ParseFromString(self._handle.read(s))
pt = [getattr(cp, i) for i in self.point_attrs if i != 'measures']
for measure in cp.measures:
meas = pt + [getattr(measure, j) for j in self.measure_attrs]
pts.append(meas)
elif version == 5:
self.point_attrs = [i for i in cnp5._CONTROLPOINTFILEENTRYV0005.fields_by_name if i != 'measures']
self.measure_attrs = [i for i in cnp5._CONTROLPOINTFILEENTRYV0005_MEASURE.fields_by_name]
cp = cnp5.ControlPointFileEntryV0005()
self._handle.seek(header_start_byte)
pbuf_header = cnh5.ControlNetFileHeaderV0005()
pbuf_header.ParseFromString(self._handle.read(header_bytes))
self._handle.seek(point_start_byte)
cp = cnp5.ControlPointFileEntryV0005()
pts = []
byte_count = 0
while byte_count < find_in_dict(pvl_header, 'PointsBytes'):
message_size = struct.unpack('I', self._handle.read(4))[0]
cp.ParseFromString(self._handle.read(message_size))
pt = [getattr(cp, i) for i in self.point_attrs if i != 'measures']
for measure in cp.measures:
meas = pt + [getattr(measure, j) for j in self.measure_attrs]
pts.append(meas)
byte_count += 4 + message_size
# Some point and measure fields have the same name, so mangle them as point_ and measure_
point_cols = [self.point_field_map[attr] if attr in self.point_field_map else attr for attr in self.point_attrs]
measure_cols = [self.measure_field_map[attr] if attr in self.measure_field_map else attr for attr in self.measure_attrs]
cols = point_cols + measure_cols
df = IsisControlNetwork(pts, columns=cols)
# Convert the (0.5, 0.5) origin pixels back to (0,0) pixels
df['line'] -= 0.5
df['sample'] -= 0.5
if 'aprioriline' in df.columns:
df['aprioriline'] -= 0.5
df['apriorisample'] -= 0.5
# Munge the MeasureLogData into Python objs
df['measureLog'] = df['measureLog'].apply(lambda x: [MeasureLog.from_protobuf(i) for i in x])
df.header = pvl_header
return df
def write(self, data, offset=0):
"""
Parameters
----------
data : bytes
Encoded header to be written to the file
offset : int
The byte offset into the output binary
"""
self._handle.seek(offset)
self._handle.write(data)
def create_points(self, df, pointid_prefix, pointid_suffix):
"""
Step through a control network (C) and return protocol buffer point objects
Parameters
----------
df : DataFrame
with the appropriate attributes: point_id, point_type, serial,
measure_type, x, y required.
The entries in the list must support grouping by the point_id attribute.
Returns
-------
point_messages : list
of serialized points buffers
point_sizes : list
of integer point sizes
"""
def _set_pid(pointid):
return '{}{}{}'.format(xstr(pointid_prefix),
pointid,
xstr(pointid_suffix))
# TODO: Rewrite using apply syntax for performance
point_sizes = []
point_messages = []
for i, g in df.groupby('id'):
# Get the point specification from the protobuf
point_spec = cnf.ControlPointFileEntryV0002()
# Set the ID and then loop over all of the attributes that the
# point has and check for corresponding columns in the group and
# set with the correct type
#point_spec.id = _set_pid(i)
point_spec.id = _set_pid(i)
point_spec.type = g.iloc[0].pointType
try:
point_spec.referenceIndex = g.iloc[0].referenceIndex
except:
warnings.warn(f'Unable to identify referenceIndex for point {point_spec.id}. Defaulting to index 0.')
point_spec.referenceIndex = 0
for attr, attrtype in self.point_attrs:
# Un-mangle common attribute names between points and measures
df_attr = self.point_field_map.get(attr, attr)
if df_attr in g.columns:
if df_attr == 'pointLog':
# Currently pointLog is not supported.
warnings.warn('The pointLog field is currently unsupported. Any pointLog data will not be saved.')
continue
# As per protobuf docs for assigning to a repeated field.
if df_attr == 'aprioriCovar' or df_attr == 'adjustedCovar':
arr = g.iloc[0][df_attr]
if isinstance(arr, np.ndarray):
arr = arr.ravel().tolist()
if arr:
point_spec.aprioriCovar.extend(arr)
# If field is repeated you must extend instead of assign
elif cnf._CONTROLPOINTFILEENTRYV0002.fields_by_name[attr].label == 3:
getattr(point_spec, attr).extend(g.iloc[0][df_attr])
else:
setattr(point_spec, attr, attrtype(g.iloc[0][df_attr]))
# A single extend call is cheaper than many add calls to pack points
measure_iterable = []
for node_id, m in g.iterrows():
measure_spec = point_spec.Measure()
# For all of the attributes, set if they are an dict accessible attr of the obj.
for attr, attrtype in self.measure_attrs:
# Un-mangle common attribute names between points and measures
df_attr = self.measure_field_map.get(attr, attr)
if df_attr in g.columns:
if df_attr == 'measureLog':
[getattr(measure_spec, attr).extend([i.to_protobuf()]) for i in m[df_attr]]
# If field is repeated you must extend instead of assign
elif cnf._CONTROLPOINTFILEENTRYV0002_MEASURE.fields_by_name[attr].label == 3:
getattr(measure_spec, attr).extend(m[df_attr])
else:
setattr(measure_spec, attr, attrtype(m[df_attr]))
# ISIS pixels are centered on (0.5, 0.5). NDArrays are (0,0) based.
measure_spec.sample = m['sample'] + 0.5
measure_spec.line = m['line'] + 0.5
if 'apriorisample' in g.columns:
measure_spec.apriorisample = m['apriorisample'] + 0.5
measure_spec.aprioriline = m['aprioriline'] + 0.5
measure_iterable.append(measure_spec)
self.nmeasures += 1
self.npoints += 1
point_spec.measures.extend(measure_iterable)
point_message = point_spec.SerializeToString()
point_sizes.append(point_spec.ByteSize())
point_messages.append(point_message)
return point_messages, point_sizes
def create_buffer_header(self, networkid, targetname,
description, username, point_sizes,
creation_date,
modified_date):
"""
Create the Google Protocol Buffer header using the
protobuf spec.
Parameters
----------
networkid : str
The user defined identifier of this control network
targetname : str
The name of the target, e.g. Moon
description : str
A description for the network.
username : str
The name of the user / application that created the control network
point_sizes : list
of the point sizes for each point message
Returns
-------
header_message : str
The serialized message to write
header_message_size : int
The size of the serialized header, in bytes
"""
raw_header_message = cnf.ControlNetFileHeaderV0002()
raw_header_message.created = creation_date
raw_header_message.lastModified = modified_date
raw_header_message.networkId = networkid
raw_header_message.description = description
raw_header_message.targetName = targetname
raw_header_message.userName = username
raw_header_message.pointMessageSizes.extend(point_sizes)
header_message_size = raw_header_message.ByteSize()
header_message = raw_header_message.SerializeToString()
return header_message, header_message_size
def create_pvl_header(self, version, headerstartbyte,
networkid, targetname, description, username,
buffer_header_size, points_bytes,
creation_date, modified_date):
"""
Create the PVL header object
Parameters
----------
version : int
The current ISIS version to write, defaults to 2
headerstartbyte : int
The seek offset that the protocol buffer header starts at
networkid : str
The name of the network
targetname : str
The name of the target, e.g. Moon
description : str
A description for the network.
username : str
The name of the user / application that created the control network
buffer_header_size : int
Total size of the header in bytes
points_bytes : int
The total number of bytes all points require
Returns
-------
: object
An ISIS compliant PVL header object
"""
encoder = pvl.encoder.ISISEncoder(end_delimiter=False)
header_bytes = buffer_header_size
points_start_byte = HEADERSTARTBYTE + buffer_header_size
header = pvl.PVLModule([
('ProtoBuffer',
({'Core':{'HeaderStartByte': headerstartbyte,
'HeaderBytes': header_bytes,
'PointsStartByte': points_start_byte,
'PointsBytes': points_bytes},
'ControlNetworkInfo': pvl.PVLGroup([
('NetworkId', networkid),
('TargetName', targetname),
('UserName', username),
('Created', creation_date),
('LastModified', modified_date),
('Description', description),
('NumberOfPoints', self.npoints),
('NumberOfMeasures', self.nmeasures),
('Version', version)
])
}),
)
])
return pvl.dumps(header, encoder=encoder)
| 39.836
| 151
| 0.570288
| 16,697
| 0.838287
| 0
| 0
| 191
| 0.009589
| 0
| 0
| 6,543
| 0.328497
|
364ab9b65eb9e9388a14433c72e77abdba6bec4c
| 4,028
|
py
|
Python
|
resources.py
|
slowiklukasz/qgis-inventories
|
6bd247f41ec3340964522b3cac9dd9a924cefbf2
|
[
"MIT"
] | null | null | null |
resources.py
|
slowiklukasz/qgis-inventories
|
6bd247f41ec3340964522b3cac9dd9a924cefbf2
|
[
"MIT"
] | null | null | null |
resources.py
|
slowiklukasz/qgis-inventories
|
6bd247f41ec3340964522b3cac9dd9a924cefbf2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x02\x05\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x12\x74\x00\x00\x12\x74\x01\xde\x66\
\x1f\x78\x00\x00\x01\x9a\x49\x44\x41\x54\x58\x47\xc5\x94\x3b\x4e\
\x03\x41\x10\x44\x7d\x01\x22\x12\x02\x9c\x20\x0e\x40\xc2\x2d\xe0\
\x42\xdc\x84\x63\x10\x70\x25\x32\x62\x42\xa3\xb2\x54\xab\x47\x6f\
\xf5\x78\x96\x9f\x83\x27\xe1\xe9\xea\xee\xb7\xe3\xc5\xbb\xd7\xb7\
\xfd\xe1\x9c\x4c\x0b\xdc\x3f\xdd\xc5\x73\x32\x93\xa9\x4c\x09\x68\
\xb0\x49\x75\x31\x93\x49\xfc\x89\xc0\xe3\xf3\x65\xcc\x24\x4e\x0a\
\x6c\x19\xcc\xec\xcd\xcb\xc3\x42\xca\x9a\x4d\x02\xa9\x4e\x98\x95\
\xec\xc5\xc7\xd5\x91\x91\xc4\xbf\x08\x8c\x24\x86\x02\x75\x60\xca\
\x54\xd8\xf3\xab\x02\xa9\x9e\x60\xcf\xd9\x05\xfc\x35\x74\xcb\xdf\
\xaf\x6f\xd7\x02\x0a\x8b\x3a\xa8\xe6\x46\xb0\x77\xb4\x7c\x25\xa0\
\xb0\xaf\x8c\x43\x98\x99\xe1\x54\xaf\x97\xeb\xef\x45\x80\xcb\xab\
\x40\xf7\x14\x1d\xec\x4d\x75\x2f\x17\x51\x80\x03\x74\xfd\x3f\x11\
\x10\xac\xf1\xe9\xc5\x49\x01\x7d\xde\x2a\x20\x38\x43\xfd\xa2\x2e\
\x17\xab\x77\x80\x8d\x6e\x66\x66\x16\xce\xf0\x62\x51\xe7\x7d\x11\
\x10\x6c\xdc\xfa\xf6\x13\xce\x11\x5a\xee\x1b\xa6\xc4\x50\xa0\xd6\
\xcc\x4c\x46\x30\xe7\x1b\x18\x0a\xb0\x41\xb0\xd6\x65\xba\x9c\x60\
\x46\x8b\x2d\xc1\x4c\x2b\x90\xae\x9f\xf5\x4a\xcd\xa6\xbc\x9e\xbc\
\x4a\xb4\x02\x3c\xaf\xb5\x0e\xe6\xb5\x44\x0f\x91\xea\x94\x58\x04\
\x18\x64\x38\xd5\x7c\x3b\x75\x81\xe1\x02\x9e\x73\xa6\x33\x51\x80\
\xd7\xcf\x73\xe1\x73\xd3\x49\xb8\x9e\xce\x4c\x2b\x90\xce\x78\x5e\
\x19\x49\xd4\x5a\xed\x3d\x0a\x30\xe0\xa7\xe7\x99\x60\x93\xd0\x0b\
\x45\xd4\xd7\x89\x90\x3a\x67\x25\x50\x3f\xfb\x8c\x68\xa1\x7f\x54\
\xcc\xac\x44\x9d\xb5\x12\xa8\xd4\x86\xb4\xdc\xa8\xa6\xcc\x16\x89\
\x5d\x0a\x18\x06\xcd\x8c\x80\x18\xdd\x06\xe7\xb5\x02\x0c\x91\x59\
\x01\xd1\x49\x30\x13\xbf\x02\x06\x12\x49\xa2\x2e\x37\x49\x82\xf5\
\xe5\xdf\x70\x2b\x5a\x48\x52\x66\x86\x6f\x0b\xfc\x0e\xfb\xc3\x27\
\x2f\x90\x9e\xc6\xb7\x8c\xf7\x21\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x13\
\x0e\xb7\x46\xa2\
\x00\x69\
\x00\x6e\x00\x76\x00\x65\x00\x6e\x00\x74\x00\x6f\x00\x72\x00\x79\x00\x5f\x00\x76\x00\x61\x00\x6c\x00\x69\x00\x64\x00\x61\x00\x74\
\x00\x6f\x00\x72\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x40\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x40\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x7e\xb7\x66\x8e\xd2\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 41.102041
| 130
| 0.708292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,376
| 0.838133
|
364b2da593ffc26a8e80419fd18f3ad6526af7ad
| 2,130
|
py
|
Python
|
pulsarpy_to_encodedcc/scripts/patch_r2_paired_with.py
|
yunhailuo/pulsarpy-to-encodedcc
|
9fd0ce2b81b502dbd2e1e39910f373bd9635f787
|
[
"MIT"
] | null | null | null |
pulsarpy_to_encodedcc/scripts/patch_r2_paired_with.py
|
yunhailuo/pulsarpy-to-encodedcc
|
9fd0ce2b81b502dbd2e1e39910f373bd9635f787
|
[
"MIT"
] | null | null | null |
pulsarpy_to_encodedcc/scripts/patch_r2_paired_with.py
|
yunhailuo/pulsarpy-to-encodedcc
|
9fd0ce2b81b502dbd2e1e39910f373bd9635f787
|
[
"MIT"
] | 1
|
2020-02-21T18:09:12.000Z
|
2020-02-21T18:09:12.000Z
|
#!/usr/bin/env python
"""
Given one or more DCC experiment IDs, looks at all read2s that were submitted and updates each r2 file
object such that it's paired_with property points to the correct r1. This works by looking at the aliases
in the r2 file object to see if there is one with _R2_001 in it. If so, it sets paired_with to be
the same alias, but with that segment replace with _R1_001. Thus, this script is nice if submissions
went wrong with regard to the file pairings, and this is one way to fix that.
"""
import argparse
import encode_utils.connection as euc
import re
def get_parser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-i", "--infile", required=True, help="""
The input file with a DCC experiment on each line.""")
return parser
def main():
conn = euc.Connection("prod")
reg = re.compile("_R2_001")
parser = get_parser()
args = parser.parse_args()
ids = []
fh = open(args.infile)
for line in fh:
line = line.strip()
if not line or line.startswith("#"):
continue
ids.append(line)
for i in ids:
h = conn.get_fastqfile_replicate_hash(exp_id=i)
for bio_rep in h:
for tech_rep in h[bio_rep]:
read_files = h[bio_rep][tech_rep].get(2)
# read_files is a list of file objects
if not read_files:
continue
for r in read_files:
aliases = r["aliases"]
for a in aliases:
match = reg.search(a)
if match:
paired_with_name = a.replace(reg.pattern, "_R1_001")
payload = {conn.ENCID_KEY: a}
payload["paired_with"] = paired_with_name
try:
conn.patch(payload=payload)
except Exception:
break
break
if __name__ == "__main__":
main()
| 37.368421
| 105
| 0.553521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 690
| 0.323944
|
364b86ae80f99f078899fde9b937f621e0386d77
| 1,022
|
py
|
Python
|
ibsng/handler/bw/update_node.py
|
ParspooyeshFanavar/pyibsng
|
d48bcf4f25e3f23461528bf0ff8870cc3d537444
|
[
"MIT"
] | 6
|
2018-03-06T10:16:36.000Z
|
2021-12-05T12:43:10.000Z
|
ibsng/handler/bw/update_node.py
|
ParspooyeshFanavar/pyibsng
|
d48bcf4f25e3f23461528bf0ff8870cc3d537444
|
[
"MIT"
] | 3
|
2018-03-06T10:27:08.000Z
|
2022-01-02T15:21:27.000Z
|
ibsng/handler/bw/update_node.py
|
ParspooyeshFanavar/pyibsng
|
d48bcf4f25e3f23461528bf0ff8870cc3d537444
|
[
"MIT"
] | 3
|
2018-01-06T16:28:31.000Z
|
2018-09-17T19:47:19.000Z
|
"""Update node API method."""
from ibsng.handler.handler import Handler
class updateNode(Handler):
"""Update node class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.node_id, int)
self.is_valid_content(self.node_id, self.ID_PATTERN)
self.is_valid(self.rate_kbits, int)
self.is_valid(self.ceil_kbits, int)
self.is_valid(self.priority, int)
self.is_valid_content(self.priority, self.POSITIVE_NUMBER)
def setup(self, node_id, rate_kbits, ceil_kbits, priority):
"""Setup required parameters.
:param int node_id: node ID
:param int rate_kbits: new kilo bits
:param int ceil_kbits: new kilo bits
:param int priority: new priority
:return: None
:rtype: None
"""
self.node_id = node_id
self.rate_kbits = rate_kbits
self.ceil_kbits = ceil_kbits
self.priority = priority
| 28.388889
| 66
| 0.629159
| 947
| 0.926614
| 0
| 0
| 0
| 0
| 0
| 0
| 401
| 0.392368
|
364be06117f32488ff211fd30ee702031b4b63f0
| 935
|
py
|
Python
|
make_mozilla/events/tests/test_paginators.py
|
Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c
|
f80e7c0cff97a1e9b301aa04015db983c7645778
|
[
"BSD-3-Clause"
] | 4
|
2015-05-08T16:58:53.000Z
|
2019-09-06T05:30:59.000Z
|
make_mozilla/events/tests/test_paginators.py
|
Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c
|
f80e7c0cff97a1e9b301aa04015db983c7645778
|
[
"BSD-3-Clause"
] | 2
|
2019-02-17T17:44:53.000Z
|
2019-03-28T03:54:39.000Z
|
make_mozilla/events/tests/test_paginators.py
|
Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c
|
f80e7c0cff97a1e9b301aa04015db983c7645778
|
[
"BSD-3-Clause"
] | 7
|
2015-05-21T15:38:29.000Z
|
2019-10-28T23:39:06.000Z
|
from django.utils import unittest
from nose.tools import eq_, ok_
from make_mozilla.events.paginators import results_page
sample_results = [1,2,3,4,5,6,7,8,9,0]
class TestResultsPage(unittest.TestCase):
def test_returns_page_1_if_page_unspecified(self):
page = results_page(sample_results, 4)
eq_(page.number, 1)
def test_returns_page_2_if_asked_for_it(self):
page = results_page(sample_results, 4, page = '2')
eq_(page.number, 2)
def test_returns_page_1_if_asked_for_a_non_number(self):
page = results_page(sample_results, 4, page = 'NaN')
eq_(page.number, 1)
def test_returns_page_3_if_asked_for_a_page_gt_3(self):
page = results_page(sample_results, 4, page = '4')
eq_(page.number, 3)
def test_still_returns_something_if_there_are_no_results(self):
page = results_page([], 4)
eq_(page.number, 1)
| 28.333333
| 67
| 0.686631
| 770
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.011765
|
364bea1cc3a268196e079579f42571e86d9befb2
| 644
|
py
|
Python
|
river/ensemble/test_streaming_random_patches.py
|
brcharron/creme
|
25290780f6bba0eb030215194e81b120d0219389
|
[
"BSD-3-Clause"
] | 1
|
2020-12-04T18:56:19.000Z
|
2020-12-04T18:56:19.000Z
|
river/ensemble/test_streaming_random_patches.py
|
brcharron/creme
|
25290780f6bba0eb030215194e81b120d0219389
|
[
"BSD-3-Clause"
] | null | null | null |
river/ensemble/test_streaming_random_patches.py
|
brcharron/creme
|
25290780f6bba0eb030215194e81b120d0219389
|
[
"BSD-3-Clause"
] | null | null | null |
import copy
import pytest
from river import utils
from river import ensemble
estimator = ensemble.SRPClassifier(
n_models=3, # Smaller ensemble than the default to avoid bottlenecks
seed=42)
@pytest.mark.parametrize('estimator, check', [
pytest.param(
estimator,
check,
id=f'{estimator}:{check.__name__}'
)
for check in utils.estimator_checks.yield_checks(estimator)
# Skipping this test since shuffling features is expected to impact SRP
if check.__name__ not in {'check_shuffle_features_no_impact'}
])
def test_check_estimator(estimator, check):
check(copy.deepcopy(estimator))
| 24.769231
| 75
| 0.726708
| 0
| 0
| 0
| 0
| 436
| 0.677019
| 0
| 0
| 210
| 0.326087
|
364c9e4c77bae14954a377098632009151fcd659
| 2,071
|
py
|
Python
|
antlir/nspawn_in_subvol/plugin_hooks.py
|
lhl2617/antlir
|
1041732e8163c1316d3e45c0ba4db7937faa4809
|
[
"MIT"
] | null | null | null |
antlir/nspawn_in_subvol/plugin_hooks.py
|
lhl2617/antlir
|
1041732e8163c1316d3e45c0ba4db7937faa4809
|
[
"MIT"
] | null | null | null |
antlir/nspawn_in_subvol/plugin_hooks.py
|
lhl2617/antlir
|
1041732e8163c1316d3e45c0ba4db7937faa4809
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"The core logic of how plugins integrate with `popen_nspawn`"
import functools
import subprocess
from contextlib import contextmanager
from typing import Callable, ContextManager, Iterable, Tuple, Union
from antlir.subvol_utils import Subvol
from .args import PopenArgs, _NspawnOpts
from .cmd import _nspawn_setup, _NspawnSetup, _nspawn_subvol_setup
from .plugins import NspawnPlugin
_PopenResult = Tuple[subprocess.Popen, subprocess.Popen]
_SetupSubvolCtxMgr = Callable[[_NspawnOpts], ContextManager[Subvol]]
_NspawnSetupCtxMgr = Callable[
[_NspawnOpts, PopenArgs], ContextManager[_NspawnSetup]
]
_PostSetupPopenCtxMgr = Callable[[_NspawnSetup], ContextManager[_PopenResult]]
@contextmanager
def _setup_subvol(opts: _NspawnOpts) -> Iterable[Subvol]:
with _nspawn_subvol_setup(opts) as subvol:
yield subvol
@contextmanager
def _setup(
subvol: Subvol, opts: _NspawnOpts, popen_args: PopenArgs
) -> Iterable[_NspawnSetup]:
with _nspawn_setup(subvol, opts, popen_args) as setup:
yield setup
@contextmanager
def _popen_plugin_driver(
opts: _NspawnOpts,
popen_args: PopenArgs,
post_setup_popen: _PostSetupPopenCtxMgr,
plugins: Iterable[NspawnPlugin],
) -> _PopenResult:
# Apply the plugins
setup = _setup
setup_subvol = _setup_subvol
for p in plugins:
if p.wrap_setup_subvol is not None:
setup_subvol = functools.partial(p.wrap_setup_subvol, setup_subvol)
if p.wrap_setup is not None:
setup = functools.partial(p.wrap_setup, setup)
if p.wrap_post_setup_popen is not None:
post_setup_popen = functools.partial(
p.wrap_post_setup_popen, post_setup_popen
)
with setup_subvol(opts) as subvol, setup(
subvol, opts, popen_args
) as setup, post_setup_popen(setup) as popen_res:
yield popen_res
| 30.910448
| 79
| 0.740222
| 0
| 0
| 1,127
| 0.544182
| 1,175
| 0.567359
| 0
| 0
| 275
| 0.132786
|
364d2da5e343e2ce74256399a60ddf18ac23eadf
| 4,577
|
py
|
Python
|
horseDB.py
|
maf-kakimoto/bet
|
3da7c57bca88cee8f5565e605fae38168f6b21fa
|
[
"Apache-2.0"
] | null | null | null |
horseDB.py
|
maf-kakimoto/bet
|
3da7c57bca88cee8f5565e605fae38168f6b21fa
|
[
"Apache-2.0"
] | null | null | null |
horseDB.py
|
maf-kakimoto/bet
|
3da7c57bca88cee8f5565e605fae38168f6b21fa
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import time
import pandas as pd
# self-made
import manage_mysql
def horseDB(table,search):
con = manage_mysql.connect()
c = con.cursor()
column=[]
value=[]
for i in range(len(search)):
if i%2 == 0:
column.append(search[i])
else:
value.append(search[i])
sql='SELECT * FROM '+table+' where '
for i in range(len(column)):
if i != 0:
sql+=' and '
sql+=column[i]+' = "'+str(value[i])+'"'
result = pd.read_sql(sql,con)
con.close()
return result
def fukusho(group,search,limit):
sql='SELECT '+group+',count(*),sum(fukusho),sum(fukusho)/count(*) From mergedresult '
if search['column'] == '':
sql+='where '
if limit != 0:
ut=time.time()
epoch=int(ut)-60*60*24*limit
else:
epoch=0
sql+='epoch >= '+str(epoch)+' '
elif search['column'] == 'sex':
sql+='where '
sql+=search['column']+' = "'+search['value']+'" '
elif search['column'] == 'age':
sql+='where '
if search['value'] == 7:
sql+='year - birth >= '+str(search['value'])+' '
else:
sql+='year - birth = '+str(search['value'])+' '
elif search['column'] == 'road':
sql+='where '
value=search['value'].split("_")
roadbed=value[1]
roadCondition=value[2]
if roadbed == 'turf':
roadbed = '0'
elif roadbed == 'dirt':
roadbed = '1'
sql+='roadbed = "'+roadbed+'" and '
if roadCondition == 'good':
sql+='(roadCondition = "0") '
elif roadCondition == 'bad':
sql+='(roadCondition = "1" or roadCondition = "2" or roadCondition = "3") '
elif search['column'] == 'distance_category':
sql+='where distance_category = "'
value=search['value'].split("_")
category=value[1]
if category == 'sprint':
category = '0'
elif category == 'mile':
category = '1'
elif category == 'intermediate':
category = '2'
elif category == 'long':
category = '3'
elif category == 'extended':
category = '4'
sql+=category+'" '
elif search['column'] == 'win_class':
sql+='where '
value=search['value'].split("_")
grade=value[1]
sql+=search['column']+' = "'+str(grade)+'" '
elif search['column'] == 'track':
sql+='where '
track=search['value']
track=track.split("_")
course=track[1]
roadbed=track[2]
if roadbed == 'turf':
roadbed = '0'
elif roadbed == 'dirt':
roadbed = '1'
sql+='course = "'+course+'" and roadbed = "'+roadbed+'" '
if len(track) == 4: # (04 or 08) and turf
inout=track[3]
if course == '04':
if inout == 'in':
sql+=' and distance like "%in" '
elif inout == 'out':
sql+=' and distance like "%out" '
elif course == '08':
if inout == 'in':
sql+=' and distance like "%in" '
elif inout == 'out':
sql+=' and distance like "%out" '
elif search['column'] == 'rotation_epoch':
sql+='where '
value=search['value'].split("_")
rotationEpoch=value[2]
if rotationEpoch == 'short': # threshold: 6weeks
sql+=search['column']+' <= 60*60*24*7*6 and '+search['column']+' != 0 '
elif rotationEpoch == 'long':
sql+=search['column']+' > 60*60*24*7*6 '
elif search['column'] == 'rotation_roadbed':
sql+='where '
value=search['value'].split("_")
rotationRoadbed=value[2]
if rotationRoadbed == 'toTurf':
sql+=search['column']+' = 1 and roadbed = "0" '
elif rotationRoadbed == 'toDirt':
sql+=search['column']+' = 1 and roadbed = "1" '
elif search['column'] == 'rotation_distance':
sql+='where '
value=search['value'].split("_")
rotationDistance=value[2]
if rotationDistance == 'shortening':
sql+='distance/(distance-'+search['column']+') < 0.9 '
elif rotationDistance == 'extension':
sql+='distance/(distance-'+search['column']+') > 1.1 '
sql+='GROUP BY '+group
print(sql)
con = manage_mysql.connect()
result = pd.read_sql(sql,con)
con.commit()
con.close()
return result
| 28.968354
| 89
| 0.489404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,343
| 0.293424
|
364dafdcafb142ed50351e67323fec6552de2b84
| 325
|
py
|
Python
|
maskrcnn_benchmark/data/datasets/__init__.py
|
lipengfeizju/Detection
|
efe00b221725be05e30fd67957958f97ae42b3cf
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/data/datasets/__init__.py
|
lipengfeizju/Detection
|
efe00b221725be05e30fd67957958f97ae42b3cf
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/data/datasets/__init__.py
|
lipengfeizju/Detection
|
efe00b221725be05e30fd67957958f97ae42b3cf
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .coco import COCODataset
from .voc import PascalVOCDataset
from .concat_dataset import ConcatDataset
__all__ = ["COCODataset", "ConcatDataset", "PascalVOCDataset"]
# if isinstance(dataset, datasets.MyDataset):
# return coco_evaluation(**args)
| 36.111111
| 71
| 0.778462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 198
| 0.609231
|
364dc99efa920ea79a2d2856b41d0a11a59412b1
| 68
|
py
|
Python
|
social/actions.py
|
raccoongang/python-social-auth
|
81c0a542d158772bd3486d31834c10af5d5f08b0
|
[
"BSD-3-Clause"
] | 1,987
|
2015-01-01T16:12:45.000Z
|
2022-03-29T14:24:25.000Z
|
social/actions.py
|
raccoongang/python-social-auth
|
81c0a542d158772bd3486d31834c10af5d5f08b0
|
[
"BSD-3-Clause"
] | 731
|
2015-01-01T22:55:25.000Z
|
2022-03-10T15:07:51.000Z
|
virtual/lib/python3.6/site-packages/social/actions.py
|
dennismwaniki67/awards
|
80ed10541f5f751aee5f8285ab1ad54cfecba95f
|
[
"MIT"
] | 1,082
|
2015-01-01T16:27:26.000Z
|
2022-03-22T21:18:33.000Z
|
from social_core.actions import do_auth, do_complete, do_disconnect
| 34
| 67
| 0.867647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
364de668db5e04abf8c4ddb3813bc74fcc464515
| 3,097
|
py
|
Python
|
src/alphazero/data.py
|
Whillikers/seldon
|
0d3ff7b25c7272d76a9aba38ee22efd910750f84
|
[
"MIT"
] | 1
|
2019-11-03T20:18:16.000Z
|
2019-11-03T20:18:16.000Z
|
src/alphazero/data.py
|
Whillikers/seldon
|
0d3ff7b25c7272d76a9aba38ee22efd910750f84
|
[
"MIT"
] | null | null | null |
src/alphazero/data.py
|
Whillikers/seldon
|
0d3ff7b25c7272d76a9aba38ee22efd910750f84
|
[
"MIT"
] | null | null | null |
"""
Code for working with data.
In-memory format (as a list):
- board: Tensor (8, 8, 2) [bool; one-hot]
- move: Tensor (64,) [bool; one-hot]
- value: Tensor () [float32]
On-disk format (to save space and quicken loading):
- board: int64
- move: int64
- value: float32
"""
from typing import Dict, Tuple
import tensorflow as tf # type: ignore
from board import BOARD_SHAPE, BOARD_SQUARES, Board, Loc
EXAMPLE_SPEC = {
"board": tf.io.FixedLenFeature([2], tf.int64),
"move": tf.io.FixedLenFeature([], tf.int64),
"value": tf.io.FixedLenFeature([], tf.float32),
}
# Hack to allow storing bitboards efficiently as tf.Int64.
# Necessary because boards are all valid uint64 but not necessarily valid int64.
# Taken from: https://stackoverflow.com/questions/20766813/how-to-convert-signed-to-
# unsigned-integer-in-python
def _signed_representation(unsigned: int) -> int:
"""Convert an "unsigned" int to its equivalent C "signed" representation."""
return (unsigned & ((1 << 63) - 1)) - (unsigned & (1 << 63))
def _unsigned_representation(signed: int) -> int:
"""Convert a "signed" int to its equivalent C "unsigned" representation."""
return signed & 0xFFFFFFFFFFFFFFFF
# See: https://stackoverflow.com/questions/48333210/tensorflow-how-to-convert-an-
# integer-tensor-to-the-corresponding-binary-tensor
def decode_bitboard(encoded: tf.Tensor) -> tf.Tensor:
"""
Convert from uint64 board representation to a tf.Tensor board.
"""
flat = tf.math.mod(
tf.bitwise.right_shift(encoded, tf.range(BOARD_SQUARES, dtype=tf.int64)), 2
)
board = tf.reshape(flat, BOARD_SHAPE)
# Hack to allow using rot90 on a 2D tensor
return tf.image.rot90(tf.expand_dims(board, axis=-1), k=2)[:, :, 0]
def serialize_example(board: Board, move: Loc, value: float) -> str:
"""
Serialize a single training example into a string.
"""
black = _signed_representation(int(board.black))
white = _signed_representation(int(board.white))
features = {
"board": tf.train.Feature(int64_list=tf.train.Int64List(value=[black, white])),
"move": tf.train.Feature(int64_list=tf.train.Int64List(value=[move.as_int])),
"value": tf.train.Feature(float_list=tf.train.FloatList(value=[value])),
}
ex = tf.train.Example(features=tf.train.Features(feature=features))
return ex.SerializeToString()
def preprocess_example(
serialized: str
) -> Tuple[Dict[str, tf.Tensor], Dict[str, tf.Tensor]]:
"""
Turn a serialized example into the training-ready format.
"""
example = tf.io.parse_single_example(serialized, EXAMPLE_SPEC)
bitboards = example["board"]
black_bb = bitboards[0]
white_bb = bitboards[1]
black = decode_bitboard(black_bb)
white = decode_bitboard(white_bb)
board = tf.stack([black, white], axis=-1)
move = tf.one_hot(example["move"], BOARD_SQUARES)
# TODO: better solution to multi-input Keras model training
return (
{"board": board},
{"policy_softmax": move, "tf_op_layer_Tanh": example["value"]},
)
| 34.032967
| 87
| 0.680336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,262
| 0.407491
|
364e9aa16c6e94c8b02ddd7f95c3691e35f760e3
| 140
|
py
|
Python
|
Codechef Factorial.py
|
zoya-0509/Practice-codes
|
4d71b1b004f309025c215e55a504c7817b00e8c9
|
[
"MIT"
] | null | null | null |
Codechef Factorial.py
|
zoya-0509/Practice-codes
|
4d71b1b004f309025c215e55a504c7817b00e8c9
|
[
"MIT"
] | null | null | null |
Codechef Factorial.py
|
zoya-0509/Practice-codes
|
4d71b1b004f309025c215e55a504c7817b00e8c9
|
[
"MIT"
] | null | null | null |
t=int(input(""))
while (t>0):
n=int(input(""))
f=1
for i in range(1,n+1):
f=f*i
print(f)
t=t-1
| 15.555556
| 28
| 0.378571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0.028571
|
364ed94d670a685e7be3d3182211a00338f863e8
| 269
|
py
|
Python
|
LC/263.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | 2
|
2018-02-24T17:20:02.000Z
|
2018-02-24T17:25:43.000Z
|
LC/263.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | null | null | null |
LC/263.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | null | null | null |
class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 0:
return False
for x in [2,3,5]:
while(num % x ==0):
num /= x
return num==1
| 22.416667
| 31
| 0.386617
| 269
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 59
| 0.219331
|
364f0f926540cab854ab6053e2e09c0e19eaaacc
| 1,569
|
py
|
Python
|
pkgcore/ebuild/errors.py
|
pombreda/pkgcore
|
b438fc573af1a031d7ce12adbbf299bab5338451
|
[
"BSD-3-Clause"
] | 1
|
2021-07-05T13:10:18.000Z
|
2021-07-05T13:10:18.000Z
|
pkgcore/ebuild/errors.py
|
vapier/pkgcore
|
35a7e4f4f0fc61dd9c4dc72d35a57e2e9d5b832f
|
[
"BSD-3-Clause"
] | 8
|
2015-03-24T14:21:44.000Z
|
2015-03-24T14:21:44.000Z
|
pkgcore/ebuild/errors.py
|
vapier/pkgcore
|
35a7e4f4f0fc61dd9c4dc72d35a57e2e9d5b832f
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright: 2005 Brian Harring <ferringb@gmail.com>
# License: GPL2/BSD
# "More than one statement on a single line"
# pylint: disable-msg=C0321
"""
atom exceptions
"""
__all__ = ("MalformedAtom", "InvalidVersion", "InvalidCPV", "ParseError")
from pkgcore.package import errors
class MalformedAtom(errors.InvalidDependency):
def __init__(self, atom, err=''):
errors.InvalidDependency.__init__(
self, "atom '%s' is malformed: error %s" % (atom, err))
self.atom, self.err = atom, err
class InvalidVersion(errors.InvalidDependency):
def __init__(self, ver, rev, err=''):
errors.InvalidDependency.__init__(
self,
"Version restriction ver='%s', rev='%s', is malformed: error %s" %
(ver, rev, err))
self.ver, self.rev, self.err = ver, rev, err
class InvalidCPV(errors.InvalidPackageName):
"""Raised if an invalid cpv was passed in.
:ivar args: single-element tuple containing the invalid string.
:type args: C{tuple}
"""
class ParseError(errors.InvalidDependency):
def __init__(self, s, token=None, msg=None):
if msg is None:
str_msg = ''
else:
str_msg = ': %s' % msg
if token is not None:
Exception.__init__(self,
"%s is unparseable%s\nflagged token- %s" %
(s, str_msg, token))
else:
Exception.__init__(self,
"%s is unparseable%s" % (s, str_msg))
self.dep_str, self.token, self.msg = s, token, msg
| 27.526316
| 78
| 0.599745
| 1,274
| 0.811982
| 0
| 0
| 0
| 0
| 0
| 0
| 535
| 0.340982
|
3652d826b86718a511de34e46553dd7eade808bc
| 10,578
|
py
|
Python
|
trainer/train_doc_ml.py
|
dainis-boumber/AA_CNN
|
649612215c7e290ede1c51625268ad9fd7b46228
|
[
"MIT"
] | 1
|
2021-09-27T09:39:11.000Z
|
2021-09-27T09:39:11.000Z
|
trainer/train_doc_ml.py
|
dainis-boumber/AA_CNN
|
649612215c7e290ede1c51625268ad9fd7b46228
|
[
"MIT"
] | null | null | null |
trainer/train_doc_ml.py
|
dainis-boumber/AA_CNN
|
649612215c7e290ede1c51625268ad9fd7b46228
|
[
"MIT"
] | 4
|
2018-03-21T23:19:40.000Z
|
2021-03-05T15:09:01.000Z
|
#! /usr/bin/env python
import datetime
import os
import time
import tensorflow as tf
from datahelpers import data_helper_ml_mulmol6_OnTheFly as dh
from evaluators import eval_pan_archy as evaler
from networks.cnn_ml_archy import TextCNN
def init_data(embed_dimension, do_dev_split=False):
dater = dh.DataHelperMLFly(doc_level=True, embed_dim=embed_dimension, target_sent_len=40, target_doc_len=200)
# Model Hyperparameters
tf.flags.DEFINE_integer("num_classes", dater.num_of_classes, "Number of possible labels")
tf.flags.DEFINE_integer("embedding_dim", dater.embedding_dim,
"Dimensionality of character embedding")
tf.flags.DEFINE_string("filter_sizes", "3", "Comma-separated filter sizes (default: '3,4,5')")
# tf.flags.DEFINE_integer("num_filters", 100, "Number of filters per filter size (default: 128)")
# tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
# tf.flags.DEFINE_float("l2_reg_lambda", 0.6, "L2 regularizaion lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 4, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 200, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 250, "Save model after this many steps (default: 100)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
print("")
# Load data
print("Loading data...")
x_shuffled, y_shuffled, vocabulary, vocabulary_inv, embed_matrix = dater.load_data()
print(("Vocabulary Size: {:d}".format(len(vocabulary))))
# Split train/test set
# TODO: This is very crude, should use cross-validation
if do_dev_split:
x_train, x_dev = x_shuffled[:-500], x_shuffled[-500:]
y_train, y_dev = y_shuffled[:-500], y_shuffled[-500:]
print(("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev))))
else:
x_train = x_shuffled
x_dev = None
y_train = y_shuffled
y_dev = None
print("No Train/Dev split")
return dater, FLAGS, x_train, x_dev, y_train, y_dev, vocabulary, embed_matrix
# Training
def training(DO_DEV_SPLIT, FLAGS, scheme_name, vocabulary, embed_matrix, x_train, x_dev, y_train, y_dev,
num_filters, dropout_prob, l2_lambda, test_x, test_y):
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn = TextCNN(
doc_sent_len=x_train.shape[1],
sent_len=x_train.shape[2],
num_classes=FLAGS.num_classes, # Number of classification classes
vocab_size=len(vocabulary),
embedding_size=FLAGS.embedding_dim,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=num_filters,
l2_reg_lambda=l2_lambda,
init_embedding=embed_matrix)
# Define Training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Keep track of gradient values and sparsity (optional)
with tf.name_scope('grad_summary'):
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.histogram_summary("{}/grad/hist".format(v.name), g)
sparsity_summary = tf.scalar_summary("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.merge_summary(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", scheme_name, timestamp))
print(("Writing to {}\n".format(out_dir)))
# Summaries for loss and accuracy
loss_summary = tf.scalar_summary("loss", cnn.loss)
pred_ratio_summary = []
for i in range(FLAGS.num_classes):
pred_ratio_summary.append(
tf.scalar_summary("prediction/label_" + str(i) + "_percentage", cnn.rate_percentage[i]))
acc_summary = tf.scalar_summary("accuracy", cnn.accuracy)
# Train Summaries
with tf.name_scope('train_summary'):
train_summary_op = tf.merge_summary(
[loss_summary, acc_summary, pred_ratio_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.train.SummaryWriter(train_summary_dir, sess.graph_def)
# Dev summaries
with tf.name_scope('dev_summary'):
dev_summary_op = tf.merge_summary([loss_summary, acc_summary, pred_ratio_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.train.SummaryWriter(dev_summary_dir, sess.graph_def)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=7)
# Initialize all variables
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: dropout_prob
}
_, step, summaries, loss, accuracy = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print(("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy)))
train_summary_writer.add_summary(summaries, step)
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1
}
step, summaries, loss, accuracy = sess.run(
[global_step, dev_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print(("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy)))
if writer:
writer.add_summary(summaries, step)
# Generate batches
batches = dh.DataHelperMLFly.batch_iter(list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
if test_x is not None and test_y is not None:
test_x_1 = test_x[:100]
test_y_1 = test_y[:100]
test_x_2 = test_x[100:200]
test_y_2 = test_y[100:200]
# Training loop. For each batch...
for batch in batches:
if len(batch) == 0:
continue
x_batch, y_batch = list(zip(*batch))
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if DO_DEV_SPLIT and current_step % FLAGS.evaluate_every == 0:
print("\nEvaluation:")
dev_batches = dh.DataHelperMLFly.batch_iter(list(zip(x_dev, y_dev)), 100, 1)
for dev_batch in dev_batches:
if len(dev_batch) > 0:
small_dev_x, small_dev_y = list(zip(*dev_batch))
dev_step(small_dev_x, small_dev_y, writer=dev_summary_writer)
print("")
elif test_x is not None and test_y is not None and current_step % 200 == 0:
dev_step(test_x_1, test_y_1, writer=dev_summary_writer)
dev_step(test_x_2, test_y_2, writer=dev_summary_writer)
if current_step % FLAGS.checkpoint_every == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print(("Saved model checkpoint to {}\n".format(path)))
# if current_step >= 3000:
# break
return timestamp
DO_DEV_SPLIT = False
bold_step = [2500, 3000, 3500, 4000, 4500]
bold_step2 = [2000, 2250, 2500, 2750, 3000, 3250, 3500]
embed_dim = 100
output_file = open("ml_100d_doc.txt", mode="aw")
dir_name = "ml_100d_doc"
[dater, FLAGS, x_train, x_dev, y_train, y_dev, vocabulary, embed_matrix] =\
init_data(embed_dim, DO_DEV_SPLIT)
ev = evaler.evaler()
test_x, test_y, test_y_scalar = ev.load(dater)
for f_size in [50]:
for l2 in [0.1]:
for drop in [0.50]:
output_file.write("===== Filter Size: "+str(f_size)+"\n")
output_file.write("===== L2 Norm: "+str(l2)+"\n")
output_file.write("===== Drop Out: "+str(drop)+"\n\n\n")
ts = training(DO_DEV_SPLIT, FLAGS, dir_name, vocabulary, embed_matrix, x_train, x_dev, y_train, y_dev,
f_size, drop, l2, test_x, test_y)
for train_step in [3000]:
checkpoint_dir = "./runs/" + dir_name + "/" + str(ts) + "/checkpoints/"
ev.test(checkpoint_dir, train_step, output_file, documentAcc=True)
output_file.close()
| 45.012766
| 119
| 0.616752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,095
| 0.198053
|
365493226ef8ff623b67882ccdb1d5957462cc3b
| 80
|
py
|
Python
|
releases/_version.py
|
Nicusor97/releases
|
97a763e41bbe7374106a1c648b89346a0d935429
|
[
"BSD-2-Clause"
] | null | null | null |
releases/_version.py
|
Nicusor97/releases
|
97a763e41bbe7374106a1c648b89346a0d935429
|
[
"BSD-2-Clause"
] | null | null | null |
releases/_version.py
|
Nicusor97/releases
|
97a763e41bbe7374106a1c648b89346a0d935429
|
[
"BSD-2-Clause"
] | null | null | null |
__version_info__ = (1, 6, 1)
__version__ = '.'.join(map(str, __version_info__))
| 26.666667
| 50
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0.0375
|
3656cab44b971cc68a2561efdd667d02fb35d8b4
| 442
|
py
|
Python
|
areaofrectangle.py
|
Ahmad-Aiman/Calculate-Area-of-Rectangle
|
ff33a2eab14bffc1a8c29a9134cabea48b69538b
|
[
"MIT"
] | null | null | null |
areaofrectangle.py
|
Ahmad-Aiman/Calculate-Area-of-Rectangle
|
ff33a2eab14bffc1a8c29a9134cabea48b69538b
|
[
"MIT"
] | null | null | null |
areaofrectangle.py
|
Ahmad-Aiman/Calculate-Area-of-Rectangle
|
ff33a2eab14bffc1a8c29a9134cabea48b69538b
|
[
"MIT"
] | null | null | null |
#Area of a rectangle = width x length
#Perimeter of a rectangle = 2 x [length + width#
width_input = float (input("\nPlease enter width: "))
length_input = float (input("Please enter length: "))
areaofRectangle = width_input * length_input
perimeterofRectangle = 2 * (width_input * length_input)
print ("\nArea of Rectangle is: " , areaofRectangle, "CM")
print("\nPerimeter of Rectangle is: ", perimeterofRectangle, "CM")
| 29.466667
| 67
| 0.70362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 199
| 0.450226
|
3657fec70ed8fedb4b5b14e5e0ae343bef42588d
| 2,324
|
py
|
Python
|
python/src/main/python/pygw/store/rocksdb/options.py
|
Maxar-Corp/sh-geowave
|
675781d3898b50c09ee66f57e74cf788286b05d5
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pygw/store/rocksdb/options.py
|
Maxar-Corp/sh-geowave
|
675781d3898b50c09ee66f57e74cf788286b05d5
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pygw/store/rocksdb/options.py
|
Maxar-Corp/sh-geowave
|
675781d3898b50c09ee66f57e74cf788286b05d5
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
#===============================================================================================
from pygw.config import geowave_pkg
from pygw.store import DataStoreOptions
class RocksDBOptions(DataStoreOptions):
"""
RocksDB data store options.
"""
def __init__(self):
"""
Initializes the RocksDB options class.
"""
super().__init__(geowave_pkg.datastore.rocksdb.config.RocksDBOptions())
def set_directory(self, directory):
"""
Sets the directory of the RocksDB database.
Args:
directory (str): The directory for the database.
"""
self._java_ref.setDirectory(directory)
def get_directory(self):
"""
Returns:
The directory for the RocksDB database.
"""
return self._java_ref.getDirectory()
def set_compact_on_write(self, compact_on_write):
"""
Sets whether or not to perform compaction on write.
Args:
compact_on_write (bool): Whether or not to perform compaction on write.
"""
self._java_ref.setCompactOnWrite(compact_on_write)
def is_compact_on_write(self):
"""
Returns:
True if compaction on write is enabled, False otherwise.
"""
return self._java_ref.isCompactOnWrite()
def set_batch_write_size(self, batch_write_size):
"""
Sets the number of entries to be gathered before performing a batch write
operation on the data store.
Args:
batch_write_size (int): The number of entries to write in batch write operations.
"""
self._java_ref.setBatchWriteSize(batch_write_size)
def get_batch_write_size(self):
"""
Returns:
The number of entries to write in batch write operations.
"""
return self._java_ref.getBatchWriteSize()
| 31.835616
| 96
| 0.63167
| 1,727
| 0.743115
| 0
| 0
| 0
| 0
| 0
| 0
| 1,461
| 0.628657
|
36595769c1ee20b5e029d4e12f235050f6967122
| 33,084
|
py
|
Python
|
server/miscellaneous.py
|
dewancse/SMT-PMR
|
8d280ff5d169a021a73ffa30c8159581ab859c62
|
[
"MIT"
] | null | null | null |
server/miscellaneous.py
|
dewancse/SMT-PMR
|
8d280ff5d169a021a73ffa30c8159581ab859c62
|
[
"MIT"
] | 10
|
2017-05-16T22:08:40.000Z
|
2017-10-30T21:07:47.000Z
|
server/miscellaneous.py
|
dewancse/SMT-PMR
|
8d280ff5d169a021a73ffa30c8159581ab859c62
|
[
"MIT"
] | null | null | null |
import requests
from libcellml import *
import lxml.etree as ET
# pre-generated model recipe in JSON format
model_recipe = [
{
"med_fma": "http://purl.obolibrary.org/obo/FMA_84666",
"med_pr": "http://purl.obolibrary.org/obo/PR_P26433",
"med_pr_text": "sodium/hydrogen exchanger 3 (rat)",
"med_pr_text_syn": "NHE3",
"model_entity": "weinstein_1995.cellml#NHE3.J_NHE3_Na",
"model_entity2": "",
"model_entity3": "",
"protein_name": "http://purl.obolibrary.org/obo/PR_P26433",
"sink_fma": "http://purl.obolibrary.org/obo/FMA_66836",
"sink_fma2": "",
"sink_fma3": "",
"solute_chebi": "http://purl.obolibrary.org/obo/CHEBI_29101",
"solute_chebi2": "",
"solute_chebi3": "",
"solute_text": "Na+",
"solute_text2": "",
"solute_text3": "",
"source_fma": "http://purl.obolibrary.org/obo/FMA_74550",
"source_fma2": "",
"source_fma3": "",
"variable_text": "J_NHE3_Na",
"variable_text2": "flux",
"variable_text3": "flux"
},
{
"med_fma": "http://purl.obolibrary.org/obo/FMA_84669",
"med_pr": "http://purl.obolibrary.org/obo/PR_Q9ET37",
"med_pr_text": "low affinity sodium-glucose cotransporter (mouse)",
"med_pr_text_syn": "Q9ET37",
"model_entity": "mackenzie_1996-mouse-baso.cellml#NBC_current.J_Na",
"model_entity2": "mackenzie_1996-mouse-baso.cellml#NBC_current.J_Na",
"model_entity3": "",
"protein_name": "http://purl.obolibrary.org/obo/PR_Q9ET37",
"sink_fma": "http://purl.obolibrary.org/obo/FMA_9673",
"sink_fma2": "http://purl.obolibrary.org/obo/FMA_66836",
"sink_fma3": "",
"solute_chebi": "http://purl.obolibrary.org/obo/CHEBI_29101",
"solute_chebi2": "http://purl.obolibrary.org/obo/CHEBI_29101",
"solute_chebi3": "",
"solute_text": "Na+",
"solute_text2": "Na+",
"solute_text3": "",
"source_fma": "http://purl.obolibrary.org/obo/FMA_66836",
"source_fma2": "http://purl.obolibrary.org/obo/FMA_9673",
"source_fma3": "",
"variable_text": "J_Na",
"variable_text2": "J_Na",
"variable_text3": ""
},
{
"med_fma": "http://purl.obolibrary.org/obo/FMA_84666",
"med_pr": "http://purl.obolibrary.org/obo/PR_P55018",
"med_pr_text": "solute carrier family 12 member 3 (rat)",
"med_pr_text_syn": "TSC",
"model_entity": "chang_fujita_b_1999.cellml#total_transepithelial_sodium_flux.J_mc_Na",
"model_entity2": "chang_fujita_b_1999.cellml#solute_concentrations.J_mc_Cl",
"model_entity3": "",
"protein_name": "http://purl.obolibrary.org/obo/CL_0000066",
"sink_fma": "http://purl.obolibrary.org/obo/FMA_66836",
"sink_fma2": "http://purl.obolibrary.org/obo/FMA_66836",
"sink_fma3": "",
"solute_chebi": "http://purl.obolibrary.org/obo/CHEBI_29101",
"solute_chebi2": "http://purl.obolibrary.org/obo/CHEBI_17996",
"solute_chebi3": "",
"solute_text": "Na+",
"solute_text2": "Cl-",
"solute_text3": "",
"source_fma": "http://purl.obolibrary.org/obo/FMA_74550",
"source_fma2": "http://purl.obolibrary.org/obo/FMA_74550",
"source_fma3": "",
"variable_text": "J_mc_Na",
"variable_text2": "J_mc_Cl",
"variable_text3": ""
},
{
"med_fma": "http://purl.obolibrary.org/obo/FMA_84666",
"med_pr": "http://purl.obolibrary.org/obo/PR_Q63633",
"med_pr_text": "solute carrier family 12 member 5 (rat)",
"med_pr_text_syn": "Q63633",
"model_entity": "chang_fujita_b_1999.cellml#solute_concentrations.J_mc_Cl",
"model_entity2": "chang_fujita_b_1999.cellml#total_transepithelial_potassium_flux.J_mc_K",
"model_entity3": "",
"protein_name": "http://purl.obolibrary.org/obo/CL_0000066",
"sink_fma": "http://purl.obolibrary.org/obo/FMA_66836",
"sink_fma2": "http://purl.obolibrary.org/obo/FMA_66836",
"sink_fma3": "",
"solute_chebi": "http://purl.obolibrary.org/obo/CHEBI_17996",
"solute_chebi2": "http://purl.obolibrary.org/obo/CHEBI_29103",
"solute_chebi3": "",
"solute_text": "Cl-",
"solute_text2": "K+",
"solute_text3": "",
"source_fma": "http://purl.obolibrary.org/obo/FMA_74550",
"source_fma2": "http://purl.obolibrary.org/obo/FMA_74550",
"source_fma3": "",
"variable_text": "J_mc_Cl",
"variable_text2": "J_mc_K",
"variable_text3": ""
},
{
"med_fma": "http://purl.obolibrary.org/obo/FMA_84666",
"med_pr": "http://purl.obolibrary.org/obo/PR_P37089",
"med_pr_text": "amiloride-sensitive sodium channel subunit alpha (rat)",
"med_pr_text_syn": "RENAC",
"model_entity": "chang_fujita_b_1999.cellml#mc_sodium_flux.G_mc_Na",
"model_entity2": "",
"model_entity3": "",
"protein_name": "http://purl.obolibrary.org/obo/CL_0000066",
"sink_fma": "http://purl.obolibrary.org/obo/FMA_66836",
"sink_fma2": "channel",
"sink_fma3": "channel",
"solute_chebi": "http://purl.obolibrary.org/obo/CHEBI_29101",
"solute_chebi2": "channel",
"solute_chebi3": "channel",
"solute_text": "Na+",
"solute_text2": "channel",
"solute_text3": "channel",
"source_fma": "http://purl.obolibrary.org/obo/FMA_74550",
"source_fma2": "channel",
"source_fma3": "channel",
"variable_text": "G_mc_Na",
"variable_text2": "channel",
"variable_text3": "channel"
},
{
"med_fma": "http://purl.obolibrary.org/obo/FMA_84666",
"med_pr": "http://purl.obolibrary.org/obo/PR_Q06393",
"med_pr_text": "chloride channel protein ClC-Ka (rat)",
"med_pr_text_syn": "CLCNK1",
"model_entity": "chang_fujita_b_1999.cellml#mc_chloride_flux.G_mc_Cl",
"model_entity2": "",
"model_entity3": "",
"protein_name": "http://purl.obolibrary.org/obo/CL_0000066",
"sink_fma": "http://purl.obolibrary.org/obo/FMA_66836",
"sink_fma2": "channel",
"sink_fma3": "channel",
"solute_chebi": "http://purl.obolibrary.org/obo/CHEBI_17996",
"solute_chebi2": "channel",
"solute_chebi3": "channel",
"solute_text": "Cl-",
"solute_text2": "channel",
"solute_text3": "channel",
"source_fma": "http://purl.obolibrary.org/obo/FMA_74550",
"source_fma2": "channel",
"source_fma3": "channel",
"variable_text": "G_mc_Cl",
"variable_text2": "channel",
"variable_text3": "channel"
},
{
"med_fma": "http://purl.obolibrary.org/obo/FMA_84666",
"med_pr": "http://purl.obolibrary.org/obo/PR_P15387",
"med_pr_text": "potassium voltage-gated channel subfamily B member 1 (rat)",
"med_pr_text_syn": "P15387",
"model_entity": "chang_fujita_b_1999.cellml#mc_potassium_flux.G_mc_K",
"model_entity2": "",
"model_entity3": "",
"protein_name": "http://purl.obolibrary.org/obo/CL_0000066",
"sink_fma": "http://purl.obolibrary.org/obo/FMA_66836",
"sink_fma2": "channel",
"sink_fma3": "channel",
"solute_chebi": "http://purl.obolibrary.org/obo/CHEBI_29103",
"solute_chebi2": "channel",
"solute_chebi3": "channel",
"solute_text": "K+",
"solute_text2": "channel",
"solute_text3": "channel",
"source_fma": "http://purl.obolibrary.org/obo/FMA_74550",
"source_fma2": "channel",
"source_fma3": "channel",
"variable_text": "G_mc_K",
"variable_text2": "channel",
"variable_text3": "channel"
},
{
"med_fma": "http://purl.obolibrary.org/obo/FMA_84669",
"med_pr": "http://purl.obolibrary.org/obo/PR_P06685",
"med_pr_text": "sodium/potassium-transporting ATPase subunit alpha-1 (rat)",
"med_pr_text_syn": "P06685",
"model_entity": "chang_fujita_b_1999.cellml#solute_concentrations.J_sc_Na",
"model_entity2": "chang_fujita_b_1999.cellml#sc_potassium_flux.J_sc_K",
"model_entity3": "",
"protein_name": "http://purl.obolibrary.org/obo/CL_0000066",
"sink_fma": "http://purl.obolibrary.org/obo/FMA_9673",
"sink_fma2": "http://purl.obolibrary.org/obo/FMA_66836",
"sink_fma3": "",
"solute_chebi": "http://purl.obolibrary.org/obo/CHEBI_29101",
"solute_chebi2": "http://purl.obolibrary.org/obo/CHEBI_29103",
"solute_chebi3": "",
"solute_text": "Na+",
"solute_text2": "K+",
"solute_text3": "",
"source_fma": "http://purl.obolibrary.org/obo/FMA_66836",
"source_fma2": "http://purl.obolibrary.org/obo/FMA_9673",
"source_fma3": "",
"variable_text": "J_sc_Na",
"variable_text2": "J_sc_K",
"variable_text3": ""
},
{
"med_fma": "http://purl.obolibrary.org/obo/FMA_84669",
"med_pr": "http://purl.obolibrary.org/obo/PR_Q06393",
"med_pr_text": "chloride channel protein ClC-Ka (rat)",
"med_pr_text_syn": "CLCNK1",
"model_entity": "chang_fujita_b_1999.cellml#sc_chloride_flux.G_sc_Cl",
"model_entity2": "",
"model_entity3": "",
"protein_name": "http://purl.obolibrary.org/obo/CL_0000066",
"sink_fma": "http://purl.obolibrary.org/obo/FMA_66836",
"sink_fma2": "channel",
"sink_fma3": "channel",
"solute_chebi": "http://purl.obolibrary.org/obo/CHEBI_17996",
"solute_chebi2": "channel",
"solute_chebi3": "channel",
"solute_text": "Cl-",
"solute_text2": "channel",
"solute_text3": "channel",
"source_fma": "http://purl.obolibrary.org/obo/FMA_9673",
"source_fma2": "channel",
"source_fma3": "channel",
"variable_text": "G_sc_Cl",
"variable_text2": "channel",
"variable_text3": "channel"
},
{
"med_fma": "http://purl.obolibrary.org/obo/FMA_84669",
"med_pr": "http://purl.obolibrary.org/obo/PR_P15387",
"med_pr_text": "potassium voltage-gated channel subfamily B member 1 (rat)",
"med_pr_text_syn": "P15387",
"model_entity": "chang_fujita_b_1999.cellml#sc_potassium_flux.G_sc_K",
"model_entity2": "",
"model_entity3": "",
"protein_name": "http://purl.obolibrary.org/obo/CL_0000066",
"sink_fma": "http://purl.obolibrary.org/obo/FMA_66836",
"sink_fma2": "channel",
"sink_fma3": "channel",
"solute_chebi": "http://purl.obolibrary.org/obo/CHEBI_29103",
"solute_chebi2": "channel",
"solute_chebi3": "channel",
"solute_text": "K+",
"solute_text2": "channel",
"solute_text3": "channel",
"source_fma": "http://purl.obolibrary.org/obo/FMA_9673",
"source_fma2": "channel",
"source_fma3": "channel",
"variable_text": "G_sc_K",
"variable_text2": "channel",
"variable_text3": "channel"
},
{
"med_fma": "http://purl.obolibrary.org/obo/FMA_67394",
"med_pr": "http://purl.obolibrary.org/obo/PR_Q9Z0S6",
"med_pr_text": "claudin-10 (mouse)",
"med_pr_text_syn": "CLDN10A",
"model_entity": "chang_fujita_b_1999.cellml#ms_sodium_flux.G_ms_Na",
"model_entity2": "",
"model_entity3": "",
"protein_name": "http://purl.obolibrary.org/obo/CL_0000066",
"sink_fma": "http://purl.obolibrary.org/obo/FMA_9673",
"sink_fma2": "diffusiveflux",
"sink_fma3": "diffusiveflux",
"solute_chebi": "http://purl.obolibrary.org/obo/CHEBI_29101",
"solute_chebi2": "diffusiveflux",
"solute_chebi3": "diffusiveflux",
"solute_text": "Na+",
"solute_text2": "diffusiveflux",
"solute_text3": "diffusiveflux",
"source_fma": "http://purl.obolibrary.org/obo/FMA_74550",
"source_fma2": "diffusiveflux",
"source_fma3": "diffusiveflux",
"variable_text": "G_ms_Na",
"variable_text2": "diffusiveflux",
"variable_text3": "diffusiveflux"
},
{
"med_fma": "http://purl.obolibrary.org/obo/FMA_67394",
"med_pr": "http://purl.obolibrary.org/obo/PR_O35054",
"med_pr_text": "claudin-4 (mouse)",
"med_pr_text_syn": "CPETR1",
"model_entity": "chang_fujita_b_1999.cellml#ms_chloride_flux.G_ms_Cl",
"model_entity2": "",
"model_entity3": "",
"protein_name": "http://purl.obolibrary.org/obo/CL_0000066",
"sink_fma": "http://purl.obolibrary.org/obo/FMA_9673",
"sink_fma2": "diffusiveflux",
"sink_fma3": "diffusiveflux",
"solute_chebi": "http://purl.obolibrary.org/obo/CHEBI_17996",
"solute_chebi2": "diffusiveflux",
"solute_chebi3": "diffusiveflux",
"solute_text": "Cl-",
"solute_text2": "diffusiveflux",
"solute_text3": "diffusiveflux",
"source_fma": "http://purl.obolibrary.org/obo/FMA_74550",
"source_fma2": "diffusiveflux",
"source_fma3": "diffusiveflux",
"variable_text": "G_ms_Cl",
"variable_text2": "diffusiveflux",
"variable_text3": "diffusiveflux"
},
{
"med_fma": "http://purl.obolibrary.org/obo/FMA_67394",
"med_pr": "http://purl.obolibrary.org/obo/PR_F1LZ52",
"med_pr_text": "kelch-like protein 3 (rat)",
"med_pr_text_syn": "F1LZ52",
"model_entity": "chang_fujita_b_1999.cellml#ms_potassium_flux.G_ms_K",
"model_entity2": "",
"model_entity3": "",
"protein_name": "http://purl.obolibrary.org/obo/CL_0000066",
"sink_fma": "http://purl.obolibrary.org/obo/FMA_9673",
"sink_fma2": "diffusiveflux",
"sink_fma3": "diffusiveflux",
"solute_chebi": "http://purl.obolibrary.org/obo/CHEBI_29103",
"solute_chebi2": "diffusiveflux",
"solute_chebi3": "diffusiveflux",
"solute_text": "K+",
"solute_text2": "diffusiveflux",
"solute_text3": "diffusiveflux",
"source_fma": "http://purl.obolibrary.org/obo/FMA_74550",
"source_fma2": "diffusiveflux",
"source_fma3": "diffusiveflux",
"variable_text": "G_ms_K",
"variable_text2": "diffusiveflux",
"variable_text3": "diffusiveflux"
}
]
# sparql endpoint in PMR
sparqlendpoint = "https://models.physiomeproject.org/pmr2_virtuoso_search"
# workspace url where we have all models
workspaceURL = "https://models.physiomeproject.org/workspace/267/rawfile/HEAD/"
# reference URIs of anatomical locations
lumen_fma = "http://purl.obolibrary.org/obo/FMA_74550"
cytosol_fma = "http://purl.obolibrary.org/obo/FMA_66836"
interstitialfluid_fma = "http://purl.obolibrary.org/obo/FMA_9673"
# solutes dictionary to map URI to name
dict_solutes = [
{
"http://purl.obolibrary.org/obo/CHEBI_29101": "Na",
"http://purl.obolibrary.org/obo/CHEBI_17996": "Cl",
"http://purl.obolibrary.org/obo/CHEBI_29103": "K"
}
]
# get channels and diffusive fluxes equations from source model
def getChannelsEquation(str_channel, v, compartment, importedModel, m, epithelial):
# string index of "id=" and "</math>" inside MathML
str_index = []
# save here required variables to make channels and diffusive fluxes equations
# e.g. ['C_c_Na', 'RT', 'psi_c', 'P_mc_Na', 'F', 'psi_m']
list_of_variables = []
# remove C_c_Na from here ['C_c_Na', 'RT', 'psi_c', 'P_mc_Na', 'F', 'psi_m'] and save in this variable
list_of_variables_2 = []
for i in range(len(str_channel)):
if "id=" in str_channel[i]:
str_index.append(i) # insert variables equation
elif "</math>" in str_channel[i]:
str_index.append(i) # insert math index to note end of math
# print(str_index)
for i in range(len(str_index)):
flag = False
if i + 1 == len(str_index):
break
else:
my_str = str_channel[str_index[i]:str_index[i + 1] - 1]
for i in range(len(my_str)):
if "<eq/>" in my_str[i] and "<ci>" + v + "</ci>" in my_str[i + 1]:
channel_str = ""
for s in my_str:
channel_str += s
channel_str = "<math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" + channel_str + "</apply>\n</math>\n"
# check that whether this channel already exists in this component
# we are doing this because G_mc_Na, etc comes twice in the epithelial component!
mth = compartment.math()
if channel_str not in mth:
compartment.appendMath(channel_str)
# extract variables from this math string
for i in range(len(my_str)):
if "<ci>" in my_str[i]:
start_index = my_str[i].find("<ci>")
end_index = my_str[i].find("</ci>")
if my_str[i][start_index + 4:end_index] != v:
list_of_variables.append(my_str[i][start_index + 4:end_index])
flag = True
break
if flag == True:
break
# remove variables if already exists in the component
for i in range(compartment.variableCount()):
var = compartment.variable(i)
# we will remove C_c_Na from the list below after constructing lumen, cytosol and interstitial fluid component
# e.g. ['C_c_Na', 'RT', 'psi_c', 'P_mc_Na', 'F', 'psi_m']
if var.name() in list_of_variables:
list_of_variables.remove(var.name())
# unique elements in the list
list_of_variables = list(set(list_of_variables))
# save all components including a parent component into a mycomponent variable
# for now, we have considered 3 encapsulation stages: grandparent -> parent -> children
mycomponent = Component()
for i in range(importedModel.componentCount()):
c = importedModel.component(i)
mycomponent.addComponent(c)
for j in range(c.componentCount()):
c2 = c.component(j)
mycomponent.addComponent(c2)
for k in range(c2.componentCount()):
c3 = c2.component(k)
mycomponent.addComponent(c3)
for item in list_of_variables:
# iterate over components
for i in range(mycomponent.componentCount()):
c = mycomponent.component(i)
# variables within a component
for j in range(c.variableCount()):
v = c.variable(j)
if v.name() == item and v.initialValue() != "":
# add units
addUnitsModel(v.units(), importedModel, m)
if epithelial.variable(v.name()) == None:
v_epithelial = Variable()
# insert this variable in the epithelial component
createComponent(v_epithelial, v.name(), v.units(), "public_and_private",
v.initialValue(), epithelial, v)
if compartment.variable(v.name()) == None:
v_compartment = Variable()
# insert this variable in the lumen/cytosol/interstitial fluid component
createComponent(v_compartment, v.name(), v.units(), "public", None, compartment, v)
# user-defined function to append a substring of ODE based equations
def subMath(sign, vFlux):
return " <apply>\n" \
" <" + sign + "/>\n" + \
" <ci>" + vFlux + "</ci>\n" + \
" </apply>"
# user-defined function to define ODE based equations
def fullMath(vConcentration, subMath):
return "<math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" \
" <apply id=" + '"' + vConcentration + "_diff_eq" + '"' + ">\n" + \
" <eq/>\n" \
" <apply>\n" \
" <diff/>\n" \
" <bvar>\n" \
" <ci>time</ci>\n" \
" </bvar>\n" \
" <ci>" + vConcentration + "</ci>\n" + \
" </apply>\n" \
" <apply>\n" \
" <plus/>\n" \
"" + subMath + "\n" + \
" </apply>\n" \
" </apply>\n" \
"</math>\n"
# insert ODE equations for lumen, cytosol and interstitial fluid component
def insertODEMathEquation(math_dict, compartment, v_cons, v_flux, sign):
# ODE equations for lumen
if compartment.name() == "lumen":
if v_cons.name() not in math_dict[0]["lumen"].keys():
math_dict[0]["lumen"][v_cons.name()] = subMath(sign, v_flux.name())
else:
math_dict[0]["lumen"][v_cons.name()] = \
math_dict[0]["lumen"][v_cons.name()] + "\n" + subMath(sign, v_flux.name())
# ODE equations for cytosol
if compartment.name() == "cytosol":
if v_cons.name() not in math_dict[0]["cytosol"].keys():
math_dict[0]["cytosol"][v_cons.name()] = subMath(sign, v_flux.name())
else:
math_dict[0]["cytosol"][v_cons.name()] = \
math_dict[0]["cytosol"][v_cons.name()] + "\n" + subMath(sign, v_flux.name())
# ODE equations for interstitial fluid
if compartment.name() == "interstitialfluid":
if v_cons.name() not in math_dict[0]["interstitialfluid"].keys():
math_dict[0]["interstitialfluid"][v_cons.name()] = subMath(sign, v_flux.name())
else:
math_dict[0]["interstitialfluid"][v_cons.name()] = \
math_dict[0]["interstitialfluid"][v_cons.name()] + "\n" + subMath(sign, v_flux.name())
# math for total fluxes in the lumen, cytosol and interstitial fluid component
def fullMathTotalFlux(vTotalFlux, sMath):
return "<math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" \
" <apply id=" + '"' + vTotalFlux + "_calculation" + '"' + ">\n" + \
" <eq/>\n" \
" <ci>" + vTotalFlux + "</ci>\n" + \
" <apply>\n" \
" <plus/>\n" \
"" + sMath + "\n" + \
" </apply>\n" \
" </apply>\n" \
"</math>\n"
# user-defined function to append a substring of total fluxes and channels equations
def subMathTotalFluxAndChannel(sign, vFlux):
return " <apply>\n" \
" <" + sign + "/>\n" + \
" <ci>" + vFlux + "</ci>\n" + \
" </apply>"
# insert equations for total fluxes
def insertMathsForTotalFluxes(compartment, math_dict_Total_Flux, dict_solutes, chebi, sign, v_flux):
if compartment.name() == "lumen":
lumen_flux = "J_" + dict_solutes[0][chebi] + "_lumen"
if lumen_flux not in math_dict_Total_Flux[0]["lumen"].keys():
math_dict_Total_Flux[0]["lumen"][lumen_flux] = subMathTotalFluxAndChannel(sign, v_flux.name())
else:
math_dict_Total_Flux[0]["lumen"][lumen_flux] = \
math_dict_Total_Flux[0]["lumen"][lumen_flux] + "\n" + \
subMathTotalFluxAndChannel(sign, v_flux.name())
if compartment.name() == "cytosol":
cytosol_flux = "J_" + dict_solutes[0][chebi] + "_cytosol"
if cytosol_flux not in math_dict_Total_Flux[0]["cytosol"].keys():
math_dict_Total_Flux[0]["cytosol"][cytosol_flux] = \
subMathTotalFluxAndChannel(sign, v_flux.name())
else:
math_dict_Total_Flux[0]["cytosol"][cytosol_flux] = \
math_dict_Total_Flux[0]["cytosol"][cytosol_flux] + "\n" + \
subMathTotalFluxAndChannel(sign, v_flux.name())
if compartment.name() == "interstitialfluid":
interstitialfluid_flux = "J_" + dict_solutes[0][chebi] + "_interstitialfluid"
if interstitialfluid_flux not in math_dict_Total_Flux[0]["interstitialfluid"].keys():
math_dict_Total_Flux[0]["interstitialfluid"][interstitialfluid_flux] = \
subMathTotalFluxAndChannel(sign, v_flux.name())
else:
math_dict_Total_Flux[0]["interstitialfluid"][interstitialfluid_flux] = \
math_dict_Total_Flux[0]["interstitialfluid"][interstitialfluid_flux] + "\n" + \
subMathTotalFluxAndChannel(sign, v_flux.name())
# insert equations for channels and diffusive fluxes
def insertMathsForTotalChannels(compartment, math_dict_Total_Flux, dict_solutes, chebi, sign, flux_name):
if compartment.name() == "lumen":
lumen_flux = "J_" + dict_solutes[0][chebi] + "_lumen"
if lumen_flux not in math_dict_Total_Flux[0]["lumen"].keys():
math_dict_Total_Flux[0]["lumen"][lumen_flux] = subMathTotalFluxAndChannel(sign, flux_name)
else:
math_dict_Total_Flux[0]["lumen"][lumen_flux] = \
math_dict_Total_Flux[0]["lumen"][lumen_flux] + "\n" + subMathTotalFluxAndChannel(sign, flux_name)
if compartment.name() == "cytosol":
cytosol_flux = "J_" + dict_solutes[0][chebi] + "_cytosol"
if cytosol_flux not in math_dict_Total_Flux[0]["cytosol"].keys():
math_dict_Total_Flux[0]["cytosol"][cytosol_flux] = subMathTotalFluxAndChannel(sign, flux_name)
else:
math_dict_Total_Flux[0]["cytosol"][cytosol_flux] = \
math_dict_Total_Flux[0]["cytosol"][cytosol_flux] + "\n" + subMathTotalFluxAndChannel(sign, flux_name)
if compartment.name() == "interstitialfluid":
interstitialfluid_flux = "J_" + dict_solutes[0][chebi] + "_interstitialfluid"
if interstitialfluid_flux not in math_dict_Total_Flux[0]["interstitialfluid"].keys():
math_dict_Total_Flux[0]["interstitialfluid"][interstitialfluid_flux] = \
subMathTotalFluxAndChannel(sign, flux_name)
else:
math_dict_Total_Flux[0]["interstitialfluid"][interstitialfluid_flux] = \
math_dict_Total_Flux[0]["interstitialfluid"][interstitialfluid_flux] + "\n" + \
subMathTotalFluxAndChannel(sign, flux_name)
# assign plus or minus sign in the equations
def odeSignNotation(compartment, source_fma, sink_fma):
# lumen
if compartment.name() == "lumen":
if source_fma == lumen_fma and sink_fma == cytosol_fma:
sign = "minus"
elif source_fma == lumen_fma and sink_fma == interstitialfluid_fma:
sign = "minus"
elif source_fma == cytosol_fma and sink_fma == lumen_fma:
sign = "plus"
elif source_fma == interstitialfluid_fma and sink_fma == lumen_fma:
sign = "plus"
# cytosol
if compartment.name() == "cytosol":
if source_fma == cytosol_fma and sink_fma == lumen_fma:
sign = "minus"
elif source_fma == cytosol_fma and sink_fma == interstitialfluid_fma:
sign = "minus"
elif source_fma == lumen_fma and sink_fma == cytosol_fma:
sign = "plus"
elif source_fma == interstitialfluid_fma and sink_fma == cytosol_fma:
sign = "plus"
# interstitial fluid
if compartment.name() == "interstitialfluid":
if source_fma == interstitialfluid_fma and sink_fma == cytosol_fma:
sign = "minus"
elif source_fma == interstitialfluid_fma and sink_fma == lumen_fma:
sign = "minus"
elif source_fma == cytosol_fma and sink_fma == interstitialfluid_fma:
sign = "plus"
elif source_fma == lumen_fma and sink_fma == interstitialfluid_fma:
sign = "plus"
return sign
# user-defined function to instantiate a time component and its variable attributes
# if v2 == None then variable comes from this component, e.g. environment.time
# else variable comes from other component, e.g. lumen.P_mc_Na where P_mc_Na comes from a source model
def createComponent(v, name, unit, interface, initialvalue, component, v2):
v.setName(name)
v.setUnits(unit)
v.setInterfaceType(interface)
if initialvalue != None:
v.setInitialValue(initialvalue)
if v2 == None:
v.setId(component.name() + "." + v.name())
else:
v.setId(component.name() + "." + v2.name())
component.addVariable(v)
# concentration sparql query to get a list of concentration of solutes (chebi) in the (fma) compartment
# fma and chebi are two input values to this function
def concentrationSparql(fma, chebi):
return "PREFIX semsim: <http://www.bhi.washington.edu/SemSim#>" \
"PREFIX ro: <http://www.obofoundry.org/ro/ro.owl#>" \
"PREFIX dcterms: <http://purl.org/dc/terms/>" \
"SELECT ?modelEntity " \
"WHERE { " \
"?modelEntity semsim:isComputationalComponentFor ?model_prop. " \
"?model_prop semsim:hasPhysicalDefinition <http://identifiers.org/opb/OPB_00340>. " \
"?model_prop semsim:physicalPropertyOf ?source_entity. " \
"?source_entity ro:part_of ?source_part_of_entity. " \
"?source_part_of_entity semsim:hasPhysicalDefinition <" + fma + ">. " + \
"?source_entity semsim:hasPhysicalDefinition <" + chebi + ">. " + \
"}"
# add required units from the imported models
def addUnitsModel(unit_name, importedModel, m):
i = 0
while importedModel.units(i) != None:
u = importedModel.units(i)
# u.getUnitAttributes(reference, prefix, exponent, multiplier, id))
if u.name() == unit_name:
# if this unit not exists, then add in the model
if m.units(unit_name) == None:
m.addUnits(u)
break
i += 1
# instantiate source url and create an imported component in the import section of the new model
def instantiateImportedComponent(sourceurl, component, epithelial, m):
imp = ImportSource()
imp.setUrl(sourceurl)
importedComponent = Component()
importedComponent.setName(component)
importedComponent.setSourceComponent(imp, component)
# m.addComponent(importedComponent)
if m.component(importedComponent.name()) is None:
m.addComponent(importedComponent)
# if epithelial.component(importedComponent.name()) == None:
# epithelial.addComponent(importedComponent)
# making http request to the source model
r = requests.get(sourceurl)
# parse the string representation of the model to access by libcellml
p = Parser()
impModel = p.parseModel(r.text)
# check a valid model
if p.errorCount() > 0:
for i in range(p.errorCount()):
desc = p.error(i).description()
cellmlNullNamespace = "Model element is in invalid namespace 'null'"
cellml10Namespace = "Model element is in invalid namespace 'http://www.cellml.org/cellml/1.0#'"
cellml11Namespace = "Model element is in invalid namespace 'http://www.cellml.org/cellml/1.1#'"
if desc.find(cellmlNullNamespace) != -1:
print("Error in miscellaneous.py: ", p.error(i).description())
exit()
elif desc.find(cellml10Namespace) != -1 or desc.find(cellml11Namespace) != -1:
print("Msg in miscellaneous.py: ", p.error(i).description())
# parsing cellml 1.0 or 1.1 to 2.0
dom = ET.fromstring(r.text.encode("utf-8"))
xslt = ET.parse("cellml1to2.xsl")
transform = ET.XSLT(xslt)
newdom = transform(dom)
mstr = ET.tostring(newdom, pretty_print=True)
mstr = mstr.decode("utf-8")
# parse the string representation of the model to access by libcellml
impModel = p.parseModel(mstr)
else:
print("Error in miscellaneous.py: ", p.error(i).description())
exit()
impComponent = impModel.component(importedComponent.name())
# in order to later define the connections we need, we must make sure all the variables from
# the source model are present in the imported component, we only need the name so just grab
# that from the source.
for i in range(impComponent.variableCount()):
impVariable = impComponent.variable(i)
v = Variable()
v.setName(impVariable.name())
importedComponent.addVariable(v)
# process model entities and source models' urls
def processModelEntity(modelentity, epithelial, m):
cellml_model_name = modelentity[0:modelentity.find('#')]
component_variable = modelentity[modelentity.find('#') + 1:len(modelentity)]
component = component_variable[:component_variable.find('.')]
sourceurl = workspaceURL + cellml_model_name
instantiateImportedComponent(sourceurl, component, epithelial, m)
| 44.647773
| 127
| 0.599716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16,959
| 0.512604
|
365d9e30b62ef2b43194d13cd3b143b547c83df7
| 2,383
|
py
|
Python
|
epg_grabber/sites/beinsports_id.py
|
akmalharith/epg-grabber
|
ee6bdd20f7cbb4c780d96a8ce0fe2ca68b553c33
|
[
"MIT"
] | 1
|
2022-03-16T00:42:21.000Z
|
2022-03-16T00:42:21.000Z
|
epg_grabber/sites/beinsports_id.py
|
akmalharith/epg-grabber
|
ee6bdd20f7cbb4c780d96a8ce0fe2ca68b553c33
|
[
"MIT"
] | null | null | null |
epg_grabber/sites/beinsports_id.py
|
akmalharith/epg-grabber
|
ee6bdd20f7cbb4c780d96a8ce0fe2ca68b553c33
|
[
"MIT"
] | 1
|
2022-03-17T17:16:30.000Z
|
2022-03-17T17:16:30.000Z
|
from typing import List
import requests
from pathlib import Path
from datetime import date, datetime
from bs4 import BeautifulSoup
from helper.classes import Channel, Program
from helper.utils import get_channel_by_name, get_epg_datetime
TIMEZONE_OFFSET = "+0800"
PROGRAM_URL = "https://epg.beinsports.com/utctime_id.php?cdate={date}&offset=+8&mins=00&category=sports&id=123"
def get_all_channels():
return [Channel(
"channels_1",
"beInSPORTS1.Id",
"beIN SPORTS 1",
"",
True),
Channel(
"channels_2",
"beInSPORTS2.Id",
"beIN SPORTS 2",
"",
True)]
def get_programs_by_channel(channel_name: str, *args) -> List[Program]:
# TODO: Accept days as input and increment the date_input in an outer for
# loop
date_input = date.today()
datetime_today = datetime.now().replace(
hour=0, minute=0, second=0, microsecond=0)
url = PROGRAM_URL.format(
date=date_input)
channel = get_channel_by_name(channel_name, Path(__file__).stem)
try:
r = requests.get(url)
except requests.exceptions.RequestException as e:
raise SystemExit(e)
if r.status_code != 200:
raise Exception(r.raise_for_status())
soup = BeautifulSoup(r.text, features="html.parser")
divs = soup.find_all("div", {"id": channel.id})
programs = []
for div in divs:
line = div.find_all("li", {"parent": "slider_1"})
for value in line:
time_period = str(value.find("p", {"class": "time"}).string)
time_start, time_end = time_period.split("-")
start_hour, start_minute = time_start.split(":")
start_time = datetime_today.replace(
hour=int(start_hour), minute=int(start_minute))
end_hour, end_minute = time_end.split(":")
end_time = datetime_today.replace(
hour=int(end_hour), minute=int(end_minute))
obj = Program(
channel_name=channel.tvg_id,
title=value.find("p", {"class": "title"}).string,
description=value.find("p", {"class": "format"}).string,
start=get_epg_datetime(start_time, TIMEZONE_OFFSET),
stop=get_epg_datetime(end_time, TIMEZONE_OFFSET)
)
programs.append(obj)
return programs
| 31.773333
| 111
| 0.61645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 377
| 0.158204
|
365ebdd95b4706dd8e23a6549e2d402adf342132
| 1,608
|
py
|
Python
|
C19/19-1_Blog/blogs/views.py
|
Triple-Z/Python-Crash-Course
|
7e59104420f6110e4d60668314264105534016ce
|
[
"MIT"
] | null | null | null |
C19/19-1_Blog/blogs/views.py
|
Triple-Z/Python-Crash-Course
|
7e59104420f6110e4d60668314264105534016ce
|
[
"MIT"
] | null | null | null |
C19/19-1_Blog/blogs/views.py
|
Triple-Z/Python-Crash-Course
|
7e59104420f6110e4d60668314264105534016ce
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from .models import BlogPost
from django.http import HttpResponseRedirect, Http404
from django.urls import reverse
from .forms import BlogForm
from django.contrib.auth.decorators import login_required
from .auth import check_blog_author
def index(request):
blogs = BlogPost.objects.order_by('-date_added')
context = {'blogs': blogs}
return render(request, 'blogs/index.html', context)
@login_required
def new_blog(request):
if request.method != 'POST':
form = BlogForm()
else:
form = BlogForm(request.POST)
if form.is_valid():
new_blog = form.save(commit=False)
new_blog.author = request.user
new_blog.save()
return HttpResponseRedirect(reverse('blogs:index'))
context = {'form': form}
return render(request, 'blogs/new_blog.html', context)
def blog(request, blog_id):
blog = BlogPost.objects.get(id=blog_id)
context = {'blog': blog}
return render(request, 'blogs/blog.html', context)
@login_required
def edit_blog(request, blog_id):
blog = BlogPost.objects.get(id=blog_id)
if request.method != 'POST':
if check_blog_author(request, blog_id):
form = BlogForm(instance=blog)
else:
raise Http404
else:
form = BlogForm(instance=blog, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('blogs:blog', args=[blog_id]))
context = {'blog': blog, 'form': form}
return render(request, 'blogs/edit_blog.html', context)
| 26.8
| 78
| 0.663557
| 0
| 0
| 0
| 0
| 1,005
| 0.625
| 0
| 0
| 159
| 0.098881
|
365f44e59be4486a64ab3380f2d229d1dcacfbe6
| 34
|
py
|
Python
|
SmartAPI/__init__.py
|
Kreastr/SmartAPI-HEILA
|
97dbe9e6e27267c60a4f94f60692d5f391e2ef7f
|
[
"BSD-2-Clause"
] | null | null | null |
SmartAPI/__init__.py
|
Kreastr/SmartAPI-HEILA
|
97dbe9e6e27267c60a4f94f60692d5f391e2ef7f
|
[
"BSD-2-Clause"
] | null | null | null |
SmartAPI/__init__.py
|
Kreastr/SmartAPI-HEILA
|
97dbe9e6e27267c60a4f94f60692d5f391e2ef7f
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
import site
import os
| 6.8
| 11
| 0.794118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
365f7a0a4d0f8739674bb26bb9651db410c27bb4
| 410
|
py
|
Python
|
compiler/dna/components/DNASuitEdge.py
|
AnonymousDeveloper65535/libpandadna
|
3110a8d576d22093e4c735081c5f639d28397a17
|
[
"BSD-3-Clause"
] | 36
|
2015-01-29T19:43:45.000Z
|
2022-01-19T11:49:28.000Z
|
compiler/dna/components/DNASuitEdge.py
|
AnonymousDeveloper65535/libpandadna
|
3110a8d576d22093e4c735081c5f639d28397a17
|
[
"BSD-3-Clause"
] | 44
|
2015-01-16T16:09:30.000Z
|
2022-01-25T02:29:15.000Z
|
compiler/dna/components/DNASuitEdge.py
|
AnonymousDeveloper65535/libpandadna
|
3110a8d576d22093e4c735081c5f639d28397a17
|
[
"BSD-3-Clause"
] | 42
|
2015-01-03T08:43:21.000Z
|
2022-01-11T04:29:11.000Z
|
class DNASuitEdge:
COMPONENT_CODE = 22
def __init__(self, startPoint, endPoint, zoneId):
self.startPoint = startPoint
self.endPoint = endPoint
self.zoneId = zoneId
def setStartPoint(self, startPoint):
self.startPoint = startPoint
def setEndPoint(self, endPoint):
self.endPoint = endPoint
def setZoneId(self, zoneId):
self.zoneId = zoneId
| 24.117647
| 53
| 0.656098
| 409
| 0.997561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3660a38e9b27f00fca04ce1ae0246262cce312d3
| 3,455
|
py
|
Python
|
cpplint_junit.py
|
johnthagen/cpplint-junit
|
9de4ed6762fdb415e1ebe94f1cd82d2027c2b96f
|
[
"MIT"
] | 5
|
2016-02-15T19:24:46.000Z
|
2020-05-12T12:35:24.000Z
|
cpplint_junit.py
|
johnthagen/cpplint-junit
|
9de4ed6762fdb415e1ebe94f1cd82d2027c2b96f
|
[
"MIT"
] | 2
|
2019-10-14T12:25:38.000Z
|
2019-12-15T18:34:34.000Z
|
cpplint_junit.py
|
johnthagen/cpplint-junit
|
9de4ed6762fdb415e1ebe94f1cd82d2027c2b96f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
"""Converts cpplint output to JUnit XML format."""
import argparse
import collections
import os
import re
import sys
from typing import Dict, List
from xml.etree import ElementTree
from exitstatus import ExitStatus
class CpplintError(object):
def __init__(self, file: str, line: int, message: str) -> None:
"""Constructor.
Args:
file: File error originated on.
line: Line error originated on.
message: Error message.
"""
self.file = file
self.line = line
self.message = message
def parse_arguments() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Converts cpplint output to JUnit XML format.')
parser.add_argument('input_file', type=str, help='cpplint stdout text file.')
parser.add_argument('output_file', type=str, help='JUnit XML output file.')
return parser.parse_args()
def parse_cpplint(file_name: str) -> Dict[str, List[CpplintError]]:
"""Parses a cpplint output file.
Args:
file_name: cpplint output file.
Returns:
Parsed errors grouped by file name.
Raises:
IOError: File does not exist (More specifically FileNotFoundError on Python 3).
"""
with open(file_name, 'rt') as file:
lines = file.readlines()
errors = collections.defaultdict(list)
for line in lines:
line = line.rstrip()
match = re.search(r'(\S+):(\d+):\s+(.+)', line)
if match is not None:
error = CpplintError(file=match.group(1),
line=int(match.group(2)),
message=match.group(3))
errors[error.file].append(error)
return errors
def generate_test_suite(errors: Dict[str, List[CpplintError]]) -> ElementTree.ElementTree:
"""Creates a JUnit XML tree from parsed cpplint errors.
Args:
errors: Parsed cpplint errors.
Returns:
XML test suite.
"""
test_suite = ElementTree.Element('testsuite')
test_suite.attrib['errors'] = str(len(errors))
test_suite.attrib['failures'] = str(0)
test_suite.attrib['name'] = 'cpplint errors'
test_suite.attrib['tests'] = str(len(errors))
test_suite.attrib['time'] = str(1)
for file_name, errors in errors.items():
test_case = ElementTree.SubElement(test_suite,
'testcase',
name=os.path.relpath(file_name))
for error in errors:
ElementTree.SubElement(test_case,
'error',
file=os.path.relpath(error.file),
line=str(error.line),
message='{}: {}'.format(error.line, error.message))
return ElementTree.ElementTree(test_suite)
def main() -> ExitStatus: # pragma: no cover
"""Main function.
Returns:
Exit code.
"""
args = parse_arguments()
try:
errors = parse_cpplint(args.input_file)
except IOError as e:
print(str(e))
return ExitStatus.failure
if len(errors) > 0:
tree = generate_test_suite(errors)
tree.write(args.output_file, encoding='utf-8', xml_declaration=True)
return ExitStatus.success
if __name__ == '__main__': # pragma: no cover
sys.exit(main())
| 29.279661
| 96
| 0.58958
| 351
| 0.101592
| 0
| 0
| 0
| 0
| 0
| 0
| 988
| 0.285962
|