hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e3af0c54fc348474f9b9d9f22f0f2e2bbfffd5d0
| 4,049
|
py
|
Python
|
Dragon/python/dragon/vm/tensorflow/contrib/learn/datasets/base.py
|
neopenx/Dragon
|
0e639a7319035ddc81918bd3df059230436ee0a1
|
[
"BSD-2-Clause"
] | 212
|
2015-07-05T07:57:17.000Z
|
2022-02-27T01:55:35.000Z
|
Dragon/python/dragon/vm/tensorflow/contrib/learn/datasets/base.py
|
neopenx/Dragon
|
0e639a7319035ddc81918bd3df059230436ee0a1
|
[
"BSD-2-Clause"
] | 6
|
2016-07-07T14:31:56.000Z
|
2017-12-12T02:21:15.000Z
|
Dragon/python/dragon/vm/tensorflow/contrib/learn/datasets/base.py
|
neopenx/Dragon
|
0e639a7319035ddc81918bd3df059230436ee0a1
|
[
"BSD-2-Clause"
] | 71
|
2016-03-24T09:02:41.000Z
|
2021-06-03T01:52:41.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base utilities for loading datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
import time
import shutil
from six.moves import urllib
Dataset = collections.namedtuple('Dataset', ['data', 'target'])
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
def retry(initial_delay,
max_delay,
factor=2.0,
jitter=0.25,
is_retriable=None):
"""Simple decorator for wrapping retriable functions.
Args:
initial_delay: the initial delay.
factor: each subsequent retry, the delay is multiplied by this value.
(must be >= 1).
jitter: to avoid lockstep, the returned delay is multiplied by a random
number between (1-jitter) and (1+jitter). To add a 20% jitter, set
jitter = 0.2. Must be < 1.
max_delay: the maximum delay allowed (actual max is
max_delay * (1 + jitter).
is_retriable: (optional) a function that takes an Exception as an argument
and returns true if retry should be applied.
"""
if factor < 1:
raise ValueError('factor must be >= 1; was %f' % (factor,))
if jitter >= 1:
raise ValueError('jitter must be < 1; was %f' % (jitter,))
# Generator to compute the individual delays
def delays():
delay = initial_delay
while delay <= max_delay:
yield delay * random.uniform(1 - jitter, 1 + jitter)
delay *= factor
def wrap(fn):
"""Wrapper function factory invoked by decorator magic."""
def wrapped_fn(*args, **kwargs):
"""The actual wrapper function that applies the retry logic."""
for delay in delays():
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except)
if is_retriable is None:
continue
if is_retriable(e):
time.sleep(delay)
else:
raise
return fn(*args, **kwargs)
return wrapped_fn
return wrap
_RETRIABLE_ERRNOS = {
110, # Connection timed out [socket.py]
}
def _is_retriable(e):
return isinstance(e, IOError) and e.errno in _RETRIABLE_ERRNOS
@retry(initial_delay=1.0, max_delay=16.0, is_retriable=_is_retriable)
def urlretrieve_with_retry(url, filename=None):
return urllib.request.urlretrieve(url, filename)
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not os.path.exists(work_directory):
os.makedirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
temp_file_name, _ = urlretrieve_with_retry(source_url)
shutil.copy(temp_file_name, filepath)
size = os.path.getsize(filepath)
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
| 32.918699
| 80
| 0.637935
| 0
| 0
| 1,840
| 0.454433
| 170
| 0.041986
| 0
| 0
| 2,043
| 0.504569
|
e3afe486c89ac3a00730c37ba0aa5141f39fe3fe
| 97
|
py
|
Python
|
test/integration/steps/pds.py
|
NHSDigital/list-reconciliation
|
37b1ebe99a64275e23b0e7fb6a89415b92d14306
|
[
"MIT"
] | 4
|
2021-06-25T08:28:54.000Z
|
2021-12-16T11:03:42.000Z
|
test/integration/steps/pds.py
|
NHSDigital/list-reconciliation
|
37b1ebe99a64275e23b0e7fb6a89415b92d14306
|
[
"MIT"
] | 184
|
2021-06-24T15:27:08.000Z
|
2022-03-17T12:44:28.000Z
|
test/integration/steps/pds.py
|
NHSDigital/list-reconciliation
|
37b1ebe99a64275e23b0e7fb6a89415b92d14306
|
[
"MIT"
] | 3
|
2021-11-05T10:21:44.000Z
|
2022-03-04T14:29:24.000Z
|
from behave import given
@given("we have processed PDS data")
def step_impl(context):
pass
| 13.857143
| 36
| 0.731959
| 0
| 0
| 0
| 0
| 69
| 0.71134
| 0
| 0
| 28
| 0.28866
|
e3b045a473bd87ba50f0bc065652bd367fcdfb8d
| 19,208
|
py
|
Python
|
iSearch/isearch.py
|
Twilightgo/iSearch
|
600398dc22c07ef1211209769f9fda4d2c1151d7
|
[
"MIT"
] | null | null | null |
iSearch/isearch.py
|
Twilightgo/iSearch
|
600398dc22c07ef1211209769f9fda4d2c1151d7
|
[
"MIT"
] | null | null | null |
iSearch/isearch.py
|
Twilightgo/iSearch
|
600398dc22c07ef1211209769f9fda4d2c1151d7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import sys
import argparse
import os
import re
import sqlite3
import requests
import bs4
from termcolor import colored
# Python2 compatibility
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf-8')
# Default database path is ~/.iSearch.
DEFAULT_PATH = os.path.join(os.path.expanduser('~'), '.iSearch')
CREATE_TABLE_WORD = '''
CREATE TABLE IF NOT EXISTS Word
(
name TEXT PRIMARY KEY,
expl TEXT,
pr INT DEFAULT 1,
aset CHAR[1],
addtime TIMESTAMP NOT NULL DEFAULT (DATETIME('NOW', 'LOCALTIME'))
)
'''
def get_text(url):
my_headers = {
'Accept': 'text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, */*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN, zh;q=0.8',
'Upgrade-Insecure-Requests': '1',
'Host': 'dict.youdao.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/48.0.2564.116 Safari/537.36'
}
res = requests.get(url, headers=my_headers)
data = res.text
soup = bs4.BeautifulSoup(data, 'html.parser')
expl = ''
# -----------------collins-----------------------
collins = soup.find('div', id="collinsResult")
ls1 = []
if collins:
for s in collins.descendants:
if isinstance(s, bs4.element.NavigableString):
if s.strip():
ls1.append(s.strip())
if ls1[1].startswith('('):
# Phrase
expl = expl + ls1[0] + '\n'
line = ' '.join(ls1[2:])
else:
expl = expl + (' '.join(ls1[:2])) + '\n'
line = ' '.join(ls1[3:])
text1 = re.sub('例:', '\n\n例:', line)
text1 = re.sub(r'(\d+\. )', r'\n\n\1', text1)
text1 = re.sub(r'(\s+?→\s+)', r' → ', text1)
text1 = re.sub('(\")', '\'', text1)
text1 = re.sub('\s{10}\s+', '', text1)
expl += text1
# -----------------word_group--------------------
word_group = soup.find('div', id='word_group')
ls2 = []
if word_group:
for s in word_group.descendants:
if isinstance(s, bs4.element.NavigableString):
if s.strip():
ls2.append(s.strip())
text2 = ''
expl = expl + '\n\n' + '【词组】\n\n'
if len(ls2) < 3:
text2 = text2 + ls2[0] + ' ' + ls2[1] + '\n'
else:
for i, x in enumerate(ls2[:-3]):
if i % 2:
text2 = text2 + x + '\n'
else:
text2 = text2 + x + ' '
text2 = re.sub('(\")', '\'', text2)
expl += text2
# ------------------synonyms---------------------
synonyms = soup.find('div', id='synonyms')
ls3 = []
if synonyms:
for s in synonyms.descendants:
if isinstance(s, bs4.element.NavigableString):
if s.strip():
ls3.append(s.strip())
text3 = ''
tmp_flag = True
for i in ls3:
if '.' in i:
if tmp_flag:
tmp_flag = False
text3 = text3 + '\n' + i + '\n'
else:
text3 = text3 + '\n\n' + i + '\n'
else:
text3 = text3 + i
text3 = re.sub('(\")', '\'', text3)
expl = expl + '\n\n' + '【同近义词】\n'
expl += text3
# ------------------discriminate------------------
discriminate = soup.find('div', id='discriminate')
ls4 = []
if discriminate:
for s in discriminate.descendants:
if isinstance(s, bs4.element.NavigableString):
if s.strip():
ls4.append(s.strip())
expl = expl + '\n\n' + '【词语辨析】\n\n'
text4 = '-' * 40 + '\n' + format('↓ ' + ls4[0] + ' 的辨析 ↓', '^40s') + '\n' + '-' * 40 + '\n\n'
for x in ls4[1:]:
if x in '以上来源于':
break
if re.match(r'^[a-zA-Z]+$', x):
text4 = text4 + x + ' >> '
else:
text4 = text4 + x + '\n\n'
text4 = re.sub('(\")', '\'', text4)
expl += text4
# ------------------else------------------
# If no text found, then get other information
examples = soup.find('div', id='bilingual')
ls5 = []
if examples:
for s in examples.descendants:
if isinstance(s, bs4.element.NavigableString):
if s.strip():
ls5.append(s.strip())
text5 = '\n\n【双语例句】\n\n'
pt = re.compile(r'.*?\..*?\..*?|《.*》')
for word in ls5:
if not pt.match(word):
if word.endswith(('(', '。', '?', '!', '。”', ')')):
text5 = text5 + word + '\n\n'
continue
if u'\u4e00' <= word[0] <= u'\u9fa5':
if word != '更多双语例句':
text5 += word
else:
text5 = text5 + ' ' + word
text5 = re.sub('(\")', '\'', text5)
expl += text5
return expl
def colorful_print(raw):
'''print colorful text in terminal.'''
lines = raw.split('\n')
colorful = True
detail = False
for line in lines:
if line:
if colorful:
colorful = False
print(colored(line, 'white', 'on_green') + '\n')
continue
elif line.startswith('例'):
print(line + '\n')
continue
elif line.startswith('【'):
print(colored(line, 'white', 'on_green') + '\n')
detail = True
continue
if not detail:
print(colored(line + '\n', 'yellow'))
else:
print(colored(line, 'cyan') + '\n')
def normal_print(raw):
''' no colorful text, for output.'''
lines = raw.split('\n')
for line in lines:
if line:
print(line + '\n')
def search_online(word, printer=True):
'''search the word or phrase on http://dict.youdao.com.'''
url = 'http://dict.youdao.com/w/ %s' % word
expl = get_text(url)
if printer:
colorful_print(expl)
return expl
def search_database(word):
'''offline search.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
curs.execute(r'SELECT expl, pr FROM Word WHERE name LIKE "%s%%"' % word)
res = curs.fetchall()
if res:
print(colored(word + ' 在数据库中存在', 'white', 'on_green'))
print()
print(colored('★ ' * res[0][1], 'red'), colored('☆ ' * (5 - res[0][1]), 'yellow'), sep='')
colorful_print(res[0][0])
else:
print(colored(word + ' 不在本地,从有道词典查询', 'white', 'on_red'))
search_online(word)
input_msg = '若存入本地,请输入优先级(1~5) ,否则 Enter 跳过\n>>> '
if sys.version_info[0] == 2:
add_in_db_pr = raw_input(input_msg)
else:
add_in_db_pr = input(input_msg)
if add_in_db_pr and add_in_db_pr.isdigit():
if(int(add_in_db_pr) >= 1 and int(add_in_db_pr) <= 5):
add_word(word, int(add_in_db_pr))
print(colored('单词 {word} 已加入数据库中'.format(word=word), 'white', 'on_red'))
curs.close()
conn.close()
def add_word(word, default_pr):
'''add the word or phrase to database.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word)
res = curs.fetchall()
if res:
print(colored(word + ' 在数据库中已存在,不需要添加', 'white', 'on_red'))
sys.exit()
try:
expl = search_online(word, printer=False)
curs.execute('insert into word(name, expl, pr, aset) values ("%s", "%s", %d, "%s")' % (
word, expl, default_pr, word[0].upper()))
except Exception as e:
print(colored('something\'s wrong, you can\'t add the word', 'white', 'on_red'))
print(e)
else:
conn.commit()
print(colored('%s has been inserted into database' % word, 'green'))
finally:
curs.close()
conn.close()
def delete_word(word):
'''delete the word or phrase from database.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
# search fisrt
curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word)
res = curs.fetchall()
if res:
try:
curs.execute('DELETE FROM Word WHERE name = "%s"' % word)
except Exception as e:
print(e)
else:
print(colored('%s has been deleted from database' % word, 'green'))
conn.commit()
finally:
curs.close()
conn.close()
else:
print(colored('%s not exists in the database' % word, 'white', 'on_red'))
def set_priority(word, pr):
'''
set the priority of the word.
priority(from 1 to 5) is the importance of the word.
'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word)
res = curs.fetchall()
if res:
try:
curs.execute('UPDATE Word SET pr= %d WHERE name = "%s"' % (pr, word))
except Exception as e:
print(colored('something\'s wrong, you can\'t reset priority', 'white', 'on_red'))
print(e)
else:
print(colored('the priority of %s has been reset to %s' % (word, pr), 'green'))
conn.commit()
finally:
curs.close()
conn.close()
else:
print(colored('%s not exists in the database' % word, 'white', 'on_red'))
def list_letter(aset, vb=False, output=False):
'''list words by letter, from a-z (ingore case).'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
try:
if not vb:
curs.execute('SELECT name, pr FROM Word WHERE aset = "%s"' % aset)
else:
curs.execute('SELECT expl, pr FROM Word WHERE aset = "%s"' % aset)
except Exception as e:
print(colored('something\'s wrong, catlog is from A to Z', 'red'))
print(e)
else:
if not output:
print(colored(format(aset, '-^40s'), 'green'))
else:
print(format(aset, '-^40s'))
for line in curs.fetchall():
expl = line[0]
pr = line[1]
print('\n' + '=' * 40 + '\n')
if not output:
print(colored('★ ' * pr, 'red', ), colored('☆ ' * (5 - pr), 'yellow'), sep='')
colorful_print(expl)
else:
print('★ ' * pr + '☆ ' * (5 - pr))
normal_print(expl)
finally:
curs.close()
conn.close()
def list_priority(pr, vb=False, output=False):
'''
list words by priority, like this:
1 : list words which the priority is 1,
2+ : list words which the priority is lager than 2,
3-4 : list words which the priority is from 3 to 4.
'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
try:
if not vb:
if len(pr) == 1:
curs.execute('SELECT name, pr FROM Word WHERE pr == %d ORDER by pr, name' % (int(pr[0])))
elif len(pr) == 2 and pr[1] == '+':
curs.execute('SELECT name, pr FROM Word WHERE pr >= %d ORDER by pr, name' % (int(pr[0])))
elif len(pr) == 3 and pr[1] == '-':
curs.execute('SELECT name, pr FROM Word WHERE pr >= %d AND pr<= % d ORDER by pr, name' % (
int(pr[0]), int(pr[2])))
else:
if len(pr) == 1:
curs.execute('SELECT expl, pr FROM Word WHERE pr == %d ORDER by pr, name' % (int(pr[0])))
elif len(pr) == 2 and pr[1] == '+':
curs.execute('SELECT expl, pr FROM Word WHERE pr >= %d ORDER by pr, name' % (int(pr[0])))
elif len(pr) == 3 and pr[1] == '-':
curs.execute('SELECT expl, pr FROM Word WHERE pr >= %d AND pr<= %d ORDER by pr, name' % (
int(pr[0]), int(pr[2])))
except Exception as e:
print(colored('something\'s wrong, priority must be 1-5', 'red'))
print(e)
else:
for line in curs.fetchall():
expl = line[0]
pr = line[1]
print('\n' + '=' * 40 + '\n')
if not output:
print(colored('★ ' * pr, 'red', ), colored('☆ ' * (5 - pr), 'yellow'), sep='')
colorful_print(expl)
else:
print('★ ' * pr + '☆ ' * (5 - pr))
normal_print(expl)
finally:
curs.close()
conn.close()
def list_latest(limit, vb=False, output=False):
'''list words by latest time you add to database.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
try:
if not vb:
curs.execute('SELECT name, pr, addtime FROM Word ORDER by datetime(addtime) DESC LIMIT %d' % limit)
else:
curs.execute('SELECT expl, pr, addtime FROM Word ORDER by datetime(addtime) DESC LIMIT %d' % limit)
except Exception as e:
print(e)
print(colored('something\'s wrong, please set the limit', 'red'))
else:
for line in curs.fetchall():
expl = line[0]
pr = line[1]
print('\n' + '=' * 40 + '\n')
if not output:
print(colored('★ ' * pr, 'red'), colored('☆ ' * (5 - pr), 'yellow'), sep='')
colorful_print(expl)
else:
print('★ ' * pr + '☆ ' * (5 - pr))
normal_print(expl)
finally:
curs.close()
conn.close()
def super_insert(input_file_path):
log_file_path = os.path.join(DEFAULT_PATH, 'log.txt')
baseurl = 'http://dict.youdao.com/w/'
word_list = open(input_file_path, 'r', encoding='utf-8')
log_file = open(log_file_path, 'w', encoding='utf-8')
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
for line in word_list.readlines():
word = line.strip()
print(word)
url = baseurl + word
expl = get_text(url)
try:
# insert into database.
curs.execute("INSERT INTO Word(name, expl, pr, aset) VALUES (\"%s\", \"%s\", %d, \"%s\")" \
% (word, expl, 1, word[0].upper()))
except Exception as e:
print(word, "can't insert into database")
# save the error in log file.
print(e)
log_file.write(word + '\n')
conn.commit()
curs.close()
conn.close()
log_file.close()
word_list.close()
def count_word(arg):
'''count the number of words'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
if arg[0].isdigit():
if len(arg) == 1:
curs.execute('SELECT count(*) FROM Word WHERE pr == %d' % (int(arg[0])))
elif len(arg) == 2 and arg[1] == '+':
curs.execute('SELECT count(*) FROM Word WHERE pr >= %d' % (int(arg[0])))
elif len(arg) == 3 and arg[1] == '-':
curs.execute('SELECT count(*) FROM Word WHERE pr >= %d AND pr<= % d' % (int(arg[0]), int(arg[2])))
elif arg[0].isalpha():
if arg == 'all':
curs.execute('SELECT count(*) FROM Word')
elif len(arg) == 1:
curs.execute('SELECT count(*) FROM Word WHERE aset == "%s"' % arg.upper())
res = curs.fetchall()
print(res[0][0])
curs.close()
conn.close()
def main():
parser = argparse.ArgumentParser(description='Search words')
parser.add_argument(dest='word', help='the word you want to search.', nargs='*')
parser.add_argument('-f', '--file', dest='file',
action='store', help='add words list from text file.')
parser.add_argument('-a', '--add', dest='add',
action='store', nargs='+', help='insert word into database.')
parser.add_argument('-d', '--delete', dest='delete',
action='store', nargs='+', help='delete word from database.')
parser.add_argument('-s', '--set', dest='set',
action='store', help='set priority.')
parser.add_argument('-v', '--verbose', dest='verbose',
action='store_true', help='verbose mode.')
parser.add_argument('-o', '--output', dest='output',
action='store_true', help='output mode.')
parser.add_argument('-p', '--priority', dest='priority',
action='store', help='list words by priority.')
parser.add_argument('-t', '--time', dest='time',
action='store', help='list words by time.')
parser.add_argument('-l', '--letter', dest='letter',
action='store', help='list words by letter.')
parser.add_argument('-c', '--count', dest='count',
action='store', help='count the word.')
args = parser.parse_args()
is_verbose = args.verbose
is_output = args.output
if args.add:
default_pr = 1 if not args.set else int(args.set)
add_word(' '.join(args.add), default_pr)
elif args.delete:
delete_word(' '.join(args.delete))
elif args.set:
number = args.set
if not number.isdigit():
print(colored('you forget to set the number', 'white', 'on_red'))
sys.exit()
priority = int(number)
if args.word:
set_priority(' '.join(args.word), priority)
else:
print(colored('please set the priority', 'white', 'on_red'))
elif args.letter:
list_letter(args.letter[0].upper(), is_verbose, is_output)
elif args.time:
limit = int(args.time)
list_latest(limit, is_verbose, is_output)
elif args.priority:
list_priority(args.priority, is_verbose, is_output)
elif args.file:
input_file_path = args.file
if input_file_path.endswith('.txt'):
super_insert(input_file_path)
elif input_file_path == 'default':
super_insert(os.path.join(DEFAULT_PATH, 'word_list.txt'))
else:
print(colored('please use a correct path of text file', 'white', 'on_red'))
elif args.count:
count_word(args.count)
elif args.word:
if not os.path.exists(os.path.join(DEFAULT_PATH, 'word.db')):
os.mkdir(DEFAULT_PATH)
with open(os.path.join(DEFAULT_PATH, 'word_list.txt'), 'w') as f:
pass
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
curs.execute(CREATE_TABLE_WORD)
conn.commit()
curs.close()
conn.close()
word = ' '.join(args.word)
search_database(word)
if __name__ == '__main__':
main()
| 32.834188
| 112
| 0.508434
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,633
| 0.289406
|
e3b0cc4b1724b29973099266d87959c7fe603cf2
| 108
|
wsgi
|
Python
|
mysite/auth.wsgi
|
biljiang/mysite
|
15c0a0d7bb6bd46587f4cf805ce43f4c570de1be
|
[
"BSD-3-Clause"
] | null | null | null |
mysite/auth.wsgi
|
biljiang/mysite
|
15c0a0d7bb6bd46587f4cf805ce43f4c570de1be
|
[
"BSD-3-Clause"
] | null | null | null |
mysite/auth.wsgi
|
biljiang/mysite
|
15c0a0d7bb6bd46587f4cf805ce43f4c570de1be
|
[
"BSD-3-Clause"
] | null | null | null |
def groups_for_user(environ, user):
if user == 'feng':
return ['secret-agents']
return ['']
| 21.6
| 35
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.212963
|
e3b1ad3f8a41b03310d872dbf885d93f88101fcf
| 4,925
|
py
|
Python
|
models/gcn.py
|
Louis-udm/Word-Grounded-Graph-Convolutional-Network
|
4c90bff0ec8bcdd8994154eead0efb5a3caefca7
|
[
"MIT"
] | null | null | null |
models/gcn.py
|
Louis-udm/Word-Grounded-Graph-Convolutional-Network
|
4c90bff0ec8bcdd8994154eead0efb5a3caefca7
|
[
"MIT"
] | null | null | null |
models/gcn.py
|
Louis-udm/Word-Grounded-Graph-Convolutional-Network
|
4c90bff0ec8bcdd8994154eead0efb5a3caefca7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Title: GCN models
Description:
The original Graph convolutional network model and GCN layer.
Refer to: https://arxiv.org/abs/1609.02907
"""
# =======================================
# @author Zhibin.Lu
# @email zhibin.lu@umontreal.ca
# =======================================
import collections
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class GraphConvolutionLayer(nn.Module):
"""Original Graph Convolutional Layer
Reference GCN equation:
F = A(relu(AW))W
"""
def __init__(
self,
input_dim,
output_dim,
support,
act_func=None,
featureless=False,
dropout_rate=0.0,
bias=False,
):
super().__init__()
self.support = support
self.featureless = featureless
for i in range(len(self.support)):
setattr(
self,
"W{}".format(i),
nn.Parameter(torch.randn(input_dim, output_dim)),
)
if bias:
self.b = nn.Parameter(torch.zeros(1, output_dim))
self.act_func = act_func
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x):
if not self.featureless:
x = self.dropout(x)
for i in range(len(self.support)):
if self.featureless:
pre_sup = getattr(self, "W{}".format(i))
else:
pre_sup = x.mm(getattr(self, "W{}".format(i)))
if i == 0:
out = self.support[i].mm(pre_sup)
else:
out += self.support[i].mm(pre_sup)
if self.act_func is not None:
out = self.act_func(out)
self.embedding = out
return out
class GraphConvolutionLayer_NoActBtwLayer(nn.Module):
""" GraphConvolution Layer without the activation
function between 2 graph convolution layers.
No-activation-func GCN equation:
F = (relu(A(AW)))W
"""
def __init__(
self,
input_dim,
output_dim,
support,
act_func=None,
featureless=False,
dropout_rate=0.0,
bias=False,
):
super().__init__()
self.support = support
self.featureless = featureless
for i in range(len(self.support)):
setattr(
self,
"W{}".format(i),
nn.Parameter(torch.randn(input_dim, output_dim)),
)
if bias:
self.b = nn.Parameter(torch.zeros(1, output_dim))
self.act_func = act_func
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x):
if not self.featureless:
x = self.dropout(x)
for i in range(len(self.support)):
if self.featureless:
pre_sup = self.support[i]
else:
pre_sup = self.support[i].mm(x)
if self.act_func is not None:
pre_sup = self.act_func(pre_sup)
if i == 0:
out = pre_sup.mm(getattr(self, "W{}".format(i)))
else:
out += pre_sup.mm(getattr(self, "W{}".format(i)))
self.embedding = out
return out
class GCN_2Layers(nn.Module):
""" The 2-layer GCN
1. Original GCN model when mode is "only_gcn_act",
equation is A(relu(AW))W
2. No act func btw graph layer when mode is "only_fc_act",
equation is (relu(A(AW)))W
"""
def __init__(
self,
input_dim,
support,
hid_dim=200,
dropout_rate=0.0,
num_classes=10,
act_func=None,
mode="only_gcn_act",
):
super().__init__()
# GraphConvolution
if mode == "only_gcn_act": # original Text_GCN
# A(relu(AW))W
self.layer1 = GraphConvolutionLayer(
input_dim,
hid_dim,
support,
act_func=act_func,
featureless=True,
dropout_rate=dropout_rate,
)
self.layer2 = GraphConvolutionLayer(
hid_dim, num_classes, support, dropout_rate=dropout_rate
)
elif mode == "only_fc_act":
# (relu(A(AW)))W
self.layer1 = GraphConvolutionLayer_NoActBtwLayer(
input_dim,
hid_dim,
support,
featureless=True,
dropout_rate=dropout_rate,
)
self.layer2 = GraphConvolutionLayer_NoActBtwLayer(
hid_dim,
num_classes,
support,
act_func=act_func,
dropout_rate=dropout_rate,
)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
return out
| 24.502488
| 72
| 0.520406
| 4,437
| 0.900914
| 0
| 0
| 0
| 0
| 0
| 0
| 932
| 0.189239
|
e3b1ba519d604af495caccc117a36b3a9bff6079
| 2,513
|
py
|
Python
|
tabledefinition/generate_table_definitions_for_solana.py
|
blockchain-etl/evmchain-etl-table-definition-cli
|
033d7e8ddc33f47378547a304b2688df3a0a3746
|
[
"MIT"
] | 1
|
2022-03-04T11:24:31.000Z
|
2022-03-04T11:24:31.000Z
|
tabledefinition/generate_table_definitions_for_solana.py
|
blockchain-etl/evmchain-etl-table-definition-cli
|
033d7e8ddc33f47378547a304b2688df3a0a3746
|
[
"MIT"
] | null | null | null |
tabledefinition/generate_table_definitions_for_solana.py
|
blockchain-etl/evmchain-etl-table-definition-cli
|
033d7e8ddc33f47378547a304b2688df3a0a3746
|
[
"MIT"
] | null | null | null |
SOLIDITY_TO_BQ_TYPES = {
'address': 'STRING',
}
table_description = ''
def abi_to_table_definitions_for_solana(
abi,
dataset_name,
contract_name,
contract_address=None,
include_functions=False
):
result = {}
for a in abi.get('events') if abi.get('events') else []:
parser_type = 'log'
table_name = create_table_name(a, contract_name, parser_type)
result[table_name] = abi_to_table_definition(a, contract_address, dataset_name, contract_name, parser_type)
if include_functions:
for a in abi.get('instructions') if abi.get('instructions') else []:
parser_type = 'instruction'
table_name = create_table_name(a, contract_name, parser_type)
result[table_name] = abi_to_table_definition(a, contract_address, dataset_name, contract_name, parser_type)
return result
def abi_to_table_definition(abi, contract_address, dataset_name, contract_name, parser_type):
table_name = create_table_name(abi, contract_name, parser_type)
result = {}
result['parser'] = {
'type': parser_type,
'contract_address': contract_address,
'idl': abi,
'field_mapping': {}
}
inputs = abi.get('args') if parser_type == 'instruction' else abi.get('fields')
schema = [
{
'name': x.get('name'),
'description': '',
'type': 'STRING' # we sometimes get parsing errors, so safest to make all STRING
} for x in inputs
]
if parser_type == 'instruction' and abi.get('accounts'):
schema.append({
'name': 'accounts',
'description': 'accounts',
'type': 'RECORD',
'fields': [
{
'name': acc.get('name'),
'description': '',
'type': 'STRING'
} for acc in abi.get('accounts')
]
})
result['table'] = {
'dataset_name': dataset_name,
'table_name': table_name,
'table_description': table_description,
'schema': schema
}
return result
def create_table_name(abi, contract_name, parser_type):
if parser_type == 'log':
return contract_name + '_event_' + abi['name']
else:
return contract_name + '_call_' + abi['name']
def get_columns_from_event_abi(event_abi):
return [a.get('name') for a in event_abi['inputs']]
| 32.217949
| 119
| 0.578591
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 507
| 0.201751
|
e3b286c18d71e706ee97d4e448587e741b1515a4
| 587
|
py
|
Python
|
number-guessing-game.py
|
DataSciPyCodes/Python-Projects
|
0c62477f2177d6ec7431875da6aa53778a790bf6
|
[
"MIT"
] | null | null | null |
number-guessing-game.py
|
DataSciPyCodes/Python-Projects
|
0c62477f2177d6ec7431875da6aa53778a790bf6
|
[
"MIT"
] | null | null | null |
number-guessing-game.py
|
DataSciPyCodes/Python-Projects
|
0c62477f2177d6ec7431875da6aa53778a790bf6
|
[
"MIT"
] | null | null | null |
#Method-1 guess the number game
import random
number = random.randint(1,10)
guess = 0
count = 0
print("You can exit the game anytime. Just enter 'exit'.")
while guess != number and guess != "exit":
guess = input("Guess a number between 1 to 10 :- ")
if guess == "exit":
print("Closing the game...")
break
guess = int(guess)
count += 1
if guess < number:
print("Too low!")
elif guess > number:
print("Too high!")
else:
print("\nCongratulation, You got it!")
print("You have tried ", count ," times")
| 23.48
| 58
| 0.577513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 228
| 0.388416
|
e3b312bcfe15753efff73463e7b650e5bc126303
| 10,014
|
py
|
Python
|
docking/dock_and_equilibrate.py
|
proteneer/timemachine
|
feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701
|
[
"Apache-2.0"
] | 91
|
2019-01-05T17:03:04.000Z
|
2022-03-11T09:08:46.000Z
|
docking/dock_and_equilibrate.py
|
proteneer/timemachine
|
feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701
|
[
"Apache-2.0"
] | 474
|
2019-01-07T14:33:15.000Z
|
2022-03-31T19:15:12.000Z
|
docking/dock_and_equilibrate.py
|
proteneer/timemachine
|
feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701
|
[
"Apache-2.0"
] | 12
|
2019-01-13T00:40:36.000Z
|
2022-01-14T10:23:54.000Z
|
"""Solvates a host, inserts guest(s) into solvated host, equilibrates
"""
import os
import time
import tempfile
import numpy as np
from rdkit import Chem
from md import builders, minimizer
from fe import pdb_writer, free_energy
from ff import Forcefield
from ff.handlers.deserialize import deserialize_handlers
from timemachine.lib import custom_ops, LangevinIntegrator
from docking import report
def dock_and_equilibrate(
host_pdbfile,
guests_sdfile,
max_lambda,
insertion_steps,
eq_steps,
outdir,
fewer_outfiles=False,
constant_atoms=[],
):
"""Solvates a host, inserts guest(s) into solvated host, equilibrates
Parameters
----------
host_pdbfile: path to host pdb file to dock into
guests_sdfile: path to input sdf with guests to pose/dock
max_lambda: lambda value the guest should insert from or delete to
(recommended: 1.0 for work calulation, 0.25 to stay close to original pose)
(must be =1 for work calculation to be applicable)
insertion_steps: how many steps to insert the guest over (recommended: 501)
eq_steps: how many steps of equilibration to do after insertion (recommended: 15001)
outdir: where to write output (will be created if it does not already exist)
fewer_outfiles: if True, will only write frames for the equilibration, not insertion
constant_atoms: atom numbers from the host_pdbfile to hold mostly fixed across the simulation
(1-indexed, like PDB files)
Output
------
A pdb & sdf file for the last step of insertion
(outdir/<guest_name>/<guest_name>_ins_<step>_[host.pdb/guest.sdf])
A pdb & sdf file every 1000 steps of equilibration
(outdir/<guest_name>/<guest_name>_eq_<step>_[host.pdb/guest.sdf])
stdout corresponding to the files written noting the lambda value and energy
stdout for each guest noting the work of transition, if applicable
stdout for each guest noting how long it took to run
Note
----
The work will not be calculated if the du_dl endpoints are not close to 0 or if any norm of
force per atom exceeds 20000 kJ/(mol*nm) [MAX_NORM_FORCE defined in docking/report.py]
"""
if not os.path.exists(outdir):
os.makedirs(outdir)
print(
f"""
HOST_PDBFILE = {host_pdbfile}
GUESTS_SDFILE = {guests_sdfile}
OUTDIR = {outdir}
MAX_LAMBDA = {max_lambda}
INSERTION_STEPS = {insertion_steps}
EQ_STEPS = {eq_steps}
"""
)
# Prepare host
# TODO: handle extra (non-transitioning) guests?
print("Solvating host...")
(
solvated_host_system,
solvated_host_coords,
_,
_,
host_box,
solvated_topology,
) = builders.build_protein_system(host_pdbfile)
_, solvated_host_pdb = tempfile.mkstemp(suffix=".pdb", text=True)
writer = pdb_writer.PDBWriter([solvated_topology], solvated_host_pdb)
writer.write_frame(solvated_host_coords)
writer.close()
solvated_host_mol = Chem.MolFromPDBFile(solvated_host_pdb, removeHs=False)
os.remove(solvated_host_pdb)
guest_ff_handlers = deserialize_handlers(
open(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"..",
"ff/params/smirnoff_1_1_0_ccc.py",
)
).read()
)
ff = Forcefield(guest_ff_handlers)
# Run the procedure
print("Getting guests...")
suppl = Chem.SDMolSupplier(guests_sdfile, removeHs=False)
for guest_mol in suppl:
start_time = time.time()
guest_name = guest_mol.GetProp("_Name")
guest_conformer = guest_mol.GetConformer(0)
orig_guest_coords = np.array(guest_conformer.GetPositions(), dtype=np.float64)
orig_guest_coords = orig_guest_coords / 10 # convert to md_units
minimized_coords = minimizer.minimize_host_4d(
[guest_mol], solvated_host_system, solvated_host_coords, ff, host_box
)
afe = free_energy.AbsoluteFreeEnergy(guest_mol, ff)
ups, sys_params, combined_masses, _ = afe.prepare_host_edge(
ff.get_ordered_params(), solvated_host_system, minimized_coords
)
combined_bps = []
for up, sp in zip(ups, sys_params):
combined_bps.append(up.bind(sp))
x0 = np.concatenate([minimized_coords, orig_guest_coords])
v0 = np.zeros_like(x0)
print(f"SYSTEM", f"guest_name: {guest_name}", f"num_atoms: {len(x0)}")
for atom_num in constant_atoms:
combined_masses[atom_num - 1] += 50000
seed = 2021
intg = LangevinIntegrator(300.0, 1.5e-3, 1.0, combined_masses, seed).impl()
u_impls = []
for bp in combined_bps:
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
ctxt = custom_ops.Context(x0, v0, host_box, intg, u_impls)
# insert guest
insertion_lambda_schedule = np.linspace(max_lambda, 0.0, insertion_steps)
calc_work = True
# collect a du_dl calculation once every other step
subsample_interval = 1
full_du_dls, _, _ = ctxt.multiple_steps(insertion_lambda_schedule, subsample_interval)
step = len(insertion_lambda_schedule) - 1
lamb = insertion_lambda_schedule[-1]
ctxt.step(lamb)
report.report_step(
ctxt,
step,
lamb,
host_box,
combined_bps,
u_impls,
guest_name,
insertion_steps,
"INSERTION",
)
if not fewer_outfiles:
host_coords = ctxt.get_x_t()[: len(solvated_host_coords)] * 10
guest_coords = ctxt.get_x_t()[len(solvated_host_coords) :] * 10
report.write_frame(
host_coords,
solvated_host_mol,
guest_coords,
guest_mol,
guest_name,
outdir,
str(step).zfill(len(str(insertion_steps))),
"ins",
)
if report.too_much_force(ctxt, lamb, host_box, combined_bps, u_impls):
print("Not calculating work (too much force)")
calc_work = False
continue
# Note: this condition only applies for ABFE, not RBFE
if abs(full_du_dls[0]) > 0.001 or abs(full_du_dls[-1]) > 0.001:
print("Not calculating work (du_dl endpoints are not ~0)")
calc_work = False
if calc_work:
work = np.trapz(full_du_dls, insertion_lambda_schedule[::subsample_interval])
print(f"guest_name: {guest_name}\tinsertion_work: {work:.2f}")
# equilibrate
for step in range(eq_steps):
ctxt.step(0.00)
if step % 1000 == 0:
report.report_step(
ctxt,
step,
0.00,
host_box,
combined_bps,
u_impls,
guest_name,
eq_steps,
"EQUILIBRATION",
)
if (not fewer_outfiles) or (step == eq_steps - 1):
host_coords = ctxt.get_x_t()[: len(solvated_host_coords)] * 10
guest_coords = ctxt.get_x_t()[len(solvated_host_coords) :] * 10
report.write_frame(
host_coords,
solvated_host_mol,
guest_coords,
guest_mol,
guest_name,
outdir,
str(step).zfill(len(str(eq_steps))),
"eq",
)
if step in (0, int(eq_steps / 2), eq_steps - 1):
if report.too_much_force(ctxt, 0.00, host_box, combined_bps, u_impls):
break
end_time = time.time()
print(f"{guest_name} took {(end_time - start_time):.2f} seconds")
def main():
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"-p",
"--host_pdbfile",
default="tests/data/hif2a_nowater_min.pdb",
help="host to dock into",
)
parser.add_argument(
"-s",
"--guests_sdfile",
default="tests/data/ligands_40__first-two-ligs.sdf",
help="guests to pose",
)
parser.add_argument(
"--max_lambda",
type=float,
default=1.0,
help=(
"lambda value the guest should insert from or delete to "
"(must be =1 for the work calculation to be applicable)"
),
)
parser.add_argument(
"--insertion_steps",
type=int,
default=501,
help="how many steps to take while phasing in each guest",
)
parser.add_argument(
"--eq_steps",
type=int,
default=15001,
help="equilibration length (1 step = 1.5 femtoseconds)",
)
parser.add_argument("-o", "--outdir", default="dock_equil_out", help="where to write output")
parser.add_argument("--fewer_outfiles", action="store_true", help="write fewer output pdb/sdf files")
parser.add_argument(
"-c",
"--constant_atoms_file",
help="file containing comma-separated atom numbers to hold ~fixed (1-indexed)",
)
args = parser.parse_args()
constant_atoms_list = []
if args.constant_atoms_file:
with open(args.constant_atoms_file, "r") as rfile:
for line in rfile.readlines():
atoms = [int(x.strip()) for x in line.strip().split(",")]
constant_atoms_list += atoms
dock_and_equilibrate(
args.host_pdbfile,
args.guests_sdfile,
args.max_lambda,
args.insertion_steps,
args.eq_steps,
args.outdir,
args.fewer_outfiles,
constant_atoms_list,
)
if __name__ == "__main__":
main()
| 33.049505
| 105
| 0.603056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,142
| 0.313761
|
e3b3a2b9c400072459039396551edf7edb2673da
| 5,552
|
py
|
Python
|
Lessons/source/bases.py
|
ericanaglik/cs13
|
6dc2dd41e0b82a43999145b226509d8fc0adb366
|
[
"MIT"
] | null | null | null |
Lessons/source/bases.py
|
ericanaglik/cs13
|
6dc2dd41e0b82a43999145b226509d8fc0adb366
|
[
"MIT"
] | 8
|
2019-04-26T06:29:56.000Z
|
2019-08-17T01:48:07.000Z
|
Lessons/source/bases.py
|
ericanaglik/cs13
|
6dc2dd41e0b82a43999145b226509d8fc0adb366
|
[
"MIT"
] | null | null | null |
#!python
import string
# Hint: Use these string constants to encode/decode hexadecimal digits and more
# string.digits is '0123456789'
# string.hexdigits is '0123456789abcdefABCDEF'
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
# string.printable is digits + ascii_letters + punctuation + whitespace
digit_value = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15, 'g': 16, 'h': 17, 'i': 18, 'j': 19, 'k': 20, 'l': 21, 'm': 22, 'n': 23, 'o': 24, 'p': 25, 'q': 26, 'r': 27, 's': 28, 't': 29, 'u': 30, 'v': 31, 'w': 32, 'x': 33, 'y': 34, 'z': 35}
value_digit = {0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f', 16: 'g', 17: 'h', 18: 'i', 19: 'j', 20: 'k', 21: 'l', 22: 'm', 23: 'n', 24: 'o', 25: 'p', 26: 'q', 27: 'r', 28: 's', 29: 't', 30: 'u', 31: 'v', 32: 'w', 33: 'x', 34: 'y', 35: 'z'}
def decode(digits, base):
"""Decode given digits in given base to number in base 10.
digits: str -- string representation of number (in given base)
base: int -- base of given number
return: int -- integer representation of number (in base 10)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# TODO: Decode digits from binary (base 2)
digits_list = list(digits.lower())
digits_list.reverse()
# print(digits_list)
# go through the array and figure out what each 1 and 0 mean
total = 0
for i, value in enumerate(digits_list):
place_value = base ** i
# print(place_value, value)
total += digit_value[value] * place_value
# print(place_value, digit_value[value], digit_value[value] * place_value, total)
return total
# ...
# TODO: Decode digits from hexadecimal (base 16)
# TODO: Decode digits from any base (2 up to 36)
# ...
def encode(number, base):
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# Handle unsigned numbers only for now
assert number >= 0, 'number is negative: {}'.format(number)
# TODO: Encode number in binary (base 2)
numbers = []
while number > 0:
remainder = number % base
if number < base:
remainder = number
number = number//base
numbers.append(value_digit[remainder])
numbers.reverse()
numbers_string = ''.join(numbers)
return numbers_string
# TODO: Encode number in hexadecimal (base 16)
# ...
# TODO: Encode number in any base (2 up to 36)
# ...
def convert(digits, base1, base2):
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, 'base1 is out of range: {}'.format(base1)
assert 2 <= base2 <= 36, 'base2 is out of range: {}'.format(base2)
decoded = decode(digits, base1)
encoded = encode(decoded, base2)
return encoded
# TODO: Convert digits from base 2 to base 16 (and vice versa)
# ...
# TODO: Convert digits from base 2 to base 10 (and vice versa)
# ...
# TODO: Convert digits from base 10 to base 16 (and vice versa)
# ...
# TODO: Convert digits from any base to any base (2 up to 36)
result = decode(digits, base1)
return encode(result, base2)
# ...
def convert_fractional(digits, base1, base2):
# begin with the decimal fraction and multiply by 2
# grab the whole number from the result and add to the right of the point
# convert to string
# string split at decimal
# create a var for everything right of the decimal and then multiply by 2
#convert a fractional num from base1 to decimal
#convert that decimal fraction to base2
# split string at decimal
digits = digits.split(".")
# convert the whole number to binary
whole = convert(digits[0], 10, 2)
# cleaning up decimal so I can convert to binary
deci = "." + digits[1]
deci = float(deci)
to_binary = ""
while deci > 0:
deci *= base2
if deci >= 1:
to_binary += "1"
deci -= 1
else:
to_binary += "0"
return whole + "." + to_binary
def convert_negative(digits, base1, base2):
pass
def main():
"""Read command-line arguments and convert given digits between bases."""
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 3:
digits = args[0]
base1 = int(args[1])
base2 = int(args[2])
# Convert given digits between bases
result = convert(digits, base1, base2)
print('{} in base {} is {} in base {}'.format(digits, base1, result, base2))
else:
print('Usage: {} digits base1 base2'.format(sys.argv[0]))
print('Converts digits from base1 to base2')
if __name__ == '__main__':
# main()
print(convert_fractional(".625", 10, 2))
| 36.287582
| 328
| 0.600865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,059
| 0.550973
|
e3b3eb4f092c715b7640f0a297086182d40badaa
| 3,667
|
py
|
Python
|
ecl/provider_connectivity/v2/address_assignment.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | null | null | null |
ecl/provider_connectivity/v2/address_assignment.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | null | null | null |
ecl/provider_connectivity/v2/address_assignment.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from ecl.provider_connectivity import provider_connectivity_service
from ecl import resource2
from ecl.network.v2 import network
from ecl.network.v2 import subnet
import hashlib
class AddressAssignment(resource2.Resource):
resources_key = "address_assignments"
resource_key = "address_assignment"
service = provider_connectivity_service.ProviderConnectivityService("v2.0")
base_path = '/' + service.version + \
'/tenant_connection_requests/' \
'%(tenant_connection_request_id)s/address_assignments'
# capabilities
allow_list = True
#: tenant_connection_request unique ID.
tenant_connection_request_id = resource2.URI(
"tenant_connection_request_id")
#: tenant_connection unique ID.
tenant_connection_id = resource2.Body("tenant_connection_id")
#: Network unique ID
network_id = resource2.Body("network_id")
#: mac address assigned with port
mac_address = resource2.Body("mac_address")
#: List of fixes IP addresses assign to port.
fixed_ips = resource2.Body("fixed_ips")
#: Allowed address pairs
allowed_address_pairs = resource2.Body("allowed_address_pairs")
@staticmethod
def _get_id(value):
if isinstance(value, resource2.Resource):
# Don't check _alternate_id unless we need to. It's an uncommon
# case and it involves looping through the class' dict.
id = value.id or getattr(
value, value._alternate_id(),
hashlib.new('md5', str(value)).hexdigest())
return id
else:
return value
def __getattribute__(self, name):
"""Return an attribute on this instance
This is mostly a pass-through except for a specialization on
the 'id' name, as this can exist under a different name via the
`alternate_id` argument to resource.Body.
"""
if name == "id":
if name in self._body:
return self._body[name]
elif self._alternate_id():
return self._body[self._alternate_id()]
else:
return hashlib.new('md5', str(self)).hexdigest()
else:
return object.__getattribute__(self, name)
class ICCNetwork(network.Network):
service = provider_connectivity_service.ProviderConnectivityService("v2.0")
base_path = '/' + service.version + \
'/tenant_connection_requests/' \
'%(tenant_connection_request_id)s/network'
# Capabilities
allow_list = False
allow_create = False
allow_delete = False
allow_update = False
allow_get = True
def get(self, session, tenant_connection_request_id):
uri = self.base_path % {
"tenant_connection_request_id": tenant_connection_request_id
}
resp = session.get(uri, endpoint_filter=self.service)
self._translate_response(resp, has_body=True)
return self
class ICCSubnet(subnet.Subnet):
service = provider_connectivity_service.ProviderConnectivityService("v2.0")
base_path = '/' + service.version + \
'/tenant_connection_requests/' \
'%(tenant_connection_request_id)s/subnets'
id = resource2.Body("id")
tenant_connection_request_id = resource2.URI(
"tenant_connection_request_id")
# Capabilities
allow_list = True
allow_create = False
allow_delete = False
allow_update = False
allow_get = True
dhcp_server_address = resource2.Body('dhcp_server_address')
| 32.166667
| 79
| 0.648487
| 3,454
| 0.941914
| 0
| 0
| 436
| 0.118898
| 0
| 0
| 1,124
| 0.306518
|
e3b455062720d39836f878d513bb8f75e9ad6e80
| 675
|
py
|
Python
|
tests/test_gifGenerator.py
|
wmokrogulski/gifGenerator
|
fa2b36d082e32f310583935a361d7b7a2bf29fe6
|
[
"MIT"
] | null | null | null |
tests/test_gifGenerator.py
|
wmokrogulski/gifGenerator
|
fa2b36d082e32f310583935a361d7b7a2bf29fe6
|
[
"MIT"
] | 2
|
2021-12-23T11:01:14.000Z
|
2022-03-12T01:01:15.000Z
|
tests/test_gifGenerator.py
|
wmokrogulski/gifGenerator
|
fa2b36d082e32f310583935a361d7b7a2bf29fe6
|
[
"MIT"
] | null | null | null |
import unittest
from unittest import TestCase
from src.gifGenerator import GifGenerator
class TestGifGenerator(TestCase):
def setUp(self) -> None:
self.gg = GifGenerator()
def test_set_text_position(self):
position = (50, 90)
self.gg.setTextPosition(position)
self.assertEqual(self.gg.text_position, position)
def test_set_font(self):
self.assertTrue(True)
def test_load_image(self):
# path='test.png'
self.assertTrue(True)
def test_crop_images(self):
self.assertTrue(True)
def test_generate(self):
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| 20.454545
| 57
| 0.666667
| 534
| 0.791111
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.04
|
e3b8997cfd0dae36bdb5f953799806c281136e2c
| 9,915
|
py
|
Python
|
PSP/GAME/Python/python/bsddb/test/test_dbshelve.py
|
TheMindVirus/pspy
|
e9d1bba4f6b7486c3010bede93d88afdfc036492
|
[
"MIT"
] | 7
|
2015-04-06T15:17:13.000Z
|
2020-10-21T04:57:00.000Z
|
PSP/GAME/Python/python/bsddb/test/test_dbshelve.py
|
TheMindVirus/pspy
|
e9d1bba4f6b7486c3010bede93d88afdfc036492
|
[
"MIT"
] | 1
|
2021-04-11T15:01:12.000Z
|
2021-04-11T15:01:12.000Z
|
PSP/GAME/Python/python/bsddb/test/test_dbshelve.py
|
TheMindVirus/pspy
|
e9d1bba4f6b7486c3010bede93d88afdfc036492
|
[
"MIT"
] | 4
|
2016-05-16T17:53:08.000Z
|
2020-11-28T17:18:50.000Z
|
"""
TestCases for checking dbShelve objects.
"""
import sys, os, string
import tempfile, random
from pprint import pprint
from types import *
import unittest
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbshelve
except ImportError:
# For Python 2.3
from bsddb import db, dbshelve
from test_all import verbose
#----------------------------------------------------------------------
# We want the objects to be comparable so we can test dbshelve.values
# later on.
class DataClass:
def __init__(self):
self.value = random.random()
def __cmp__(self, other):
return cmp(self.value, other)
class DBShelveTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
self.do_open()
def tearDown(self):
self.do_close()
try:
os.remove(self.filename)
except os.error:
pass
def mk(self, key):
"""Turn key into an appropriate key type for this db"""
# override in child class for RECNO
return key
def populateDB(self, d):
for x in string.letters:
d[self.mk('S' + x)] = 10 * x # add a string
d[self.mk('I' + x)] = ord(x) # add an integer
d[self.mk('L' + x)] = [x] * 10 # add a list
inst = DataClass() # add an instance
inst.S = 10 * x
inst.I = ord(x)
inst.L = [x] * 10
d[self.mk('O' + x)] = inst
# overridable in derived classes to affect how the shelf is created/opened
def do_open(self):
self.d = dbshelve.open(self.filename)
# and closed...
def do_close(self):
self.d.close()
def test01_basics(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_basics..." % self.__class__.__name__
self.populateDB(self.d)
self.d.sync()
self.do_close()
self.do_open()
d = self.d
l = len(d)
k = d.keys()
s = d.stat()
f = d.fd()
if verbose:
print "length:", l
print "keys:", k
print "stats:", s
assert 0 == d.has_key(self.mk('bad key'))
assert 1 == d.has_key(self.mk('IA'))
assert 1 == d.has_key(self.mk('OA'))
d.delete(self.mk('IA'))
del d[self.mk('OA')]
assert 0 == d.has_key(self.mk('IA'))
assert 0 == d.has_key(self.mk('OA'))
assert len(d) == l-2
values = []
for key in d.keys():
value = d[key]
values.append(value)
if verbose:
print "%s: %s" % (key, value)
self.checkrec(key, value)
dbvalues = d.values()
assert len(dbvalues) == len(d.keys())
values.sort()
dbvalues.sort()
assert values == dbvalues
items = d.items()
assert len(items) == len(values)
for key, value in items:
self.checkrec(key, value)
assert d.get(self.mk('bad key')) == None
assert d.get(self.mk('bad key'), None) == None
assert d.get(self.mk('bad key'), 'a string') == 'a string'
assert d.get(self.mk('bad key'), [1, 2, 3]) == [1, 2, 3]
d.set_get_returns_none(0)
self.assertRaises(db.DBNotFoundError, d.get, self.mk('bad key'))
d.set_get_returns_none(1)
d.put(self.mk('new key'), 'new data')
assert d.get(self.mk('new key')) == 'new data'
assert d[self.mk('new key')] == 'new data'
def test02_cursors(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_cursors..." % self.__class__.__name__
self.populateDB(self.d)
d = self.d
count = 0
c = d.cursor()
rec = c.first()
while rec is not None:
count = count + 1
if verbose:
print rec
key, value = rec
self.checkrec(key, value)
rec = c.next()
del c
assert count == len(d)
count = 0
c = d.cursor()
rec = c.last()
while rec is not None:
count = count + 1
if verbose:
print rec
key, value = rec
self.checkrec(key, value)
rec = c.prev()
assert count == len(d)
c.set(self.mk('SS'))
key, value = c.current()
self.checkrec(key, value)
del c
def test03_append(self):
# NOTE: this is overridden in RECNO subclass, don't change its name.
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_append..." % self.__class__.__name__
self.assertRaises(dbshelve.DBShelveError,
self.d.append, 'unit test was here')
def checkrec(self, key, value):
# override this in a subclass if the key type is different
x = key[1]
if key[0] == 'S':
assert type(value) == StringType
assert value == 10 * x
elif key[0] == 'I':
assert type(value) == IntType
assert value == ord(x)
elif key[0] == 'L':
assert type(value) == ListType
assert value == [x] * 10
elif key[0] == 'O':
assert type(value) == InstanceType
assert value.S == 10 * x
assert value.I == ord(x)
assert value.L == [x] * 10
else:
raise AssertionError, 'Unknown key type, fix the test'
#----------------------------------------------------------------------
class BasicShelveTestCase(DBShelveTestCase):
def do_open(self):
self.d = dbshelve.DBShelf()
self.d.open(self.filename, self.dbtype, self.dbflags)
def do_close(self):
self.d.close()
class BTreeShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE
class HashShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_HASH
dbflags = db.DB_CREATE
class ThreadBTreeShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE | db.DB_THREAD
class ThreadHashShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_HASH
dbflags = db.DB_CREATE | db.DB_THREAD
#----------------------------------------------------------------------
class BasicEnvShelveTestCase(DBShelveTestCase):
def do_open(self):
self.homeDir = homeDir = os.path.join(
os.path.dirname(sys.argv[0]), 'db_home')
try: os.mkdir(homeDir)
except os.error: pass
self.env = db.DBEnv()
self.env.open(homeDir, self.envflags | db.DB_INIT_MPOOL | db.DB_CREATE)
self.filename = os.path.split(self.filename)[1]
self.d = dbshelve.DBShelf(self.env)
self.d.open(self.filename, self.dbtype, self.dbflags)
def do_close(self):
self.d.close()
self.env.close()
def tearDown(self):
self.do_close()
import glob
files = glob.glob(os.path.join(self.homeDir, '*'))
for file in files:
os.remove(file)
class EnvBTreeShelveTestCase(BasicEnvShelveTestCase):
envflags = 0
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE
class EnvHashShelveTestCase(BasicEnvShelveTestCase):
envflags = 0
dbtype = db.DB_HASH
dbflags = db.DB_CREATE
class EnvThreadBTreeShelveTestCase(BasicEnvShelveTestCase):
envflags = db.DB_THREAD
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE | db.DB_THREAD
class EnvThreadHashShelveTestCase(BasicEnvShelveTestCase):
envflags = db.DB_THREAD
dbtype = db.DB_HASH
dbflags = db.DB_CREATE | db.DB_THREAD
#----------------------------------------------------------------------
# test cases for a DBShelf in a RECNO DB.
class RecNoShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_RECNO
dbflags = db.DB_CREATE
def setUp(self):
BasicShelveTestCase.setUp(self)
# pool to assign integer key values out of
self.key_pool = list(range(1, 5000))
self.key_map = {} # map string keys to the number we gave them
self.intkey_map = {} # reverse map of above
def mk(self, key):
if key not in self.key_map:
self.key_map[key] = self.key_pool.pop(0)
self.intkey_map[self.key_map[key]] = key
return self.key_map[key]
def checkrec(self, intkey, value):
key = self.intkey_map[intkey]
BasicShelveTestCase.checkrec(self, key, value)
def test03_append(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_append..." % self.__class__.__name__
self.d[1] = 'spam'
self.d[5] = 'eggs'
self.assertEqual(6, self.d.append('spam'))
self.assertEqual(7, self.d.append('baked beans'))
self.assertEqual('spam', self.d.get(6))
self.assertEqual('spam', self.d.get(1))
self.assertEqual('baked beans', self.d.get(7))
self.assertEqual('eggs', self.d.get(5))
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBShelveTestCase))
suite.addTest(unittest.makeSuite(BTreeShelveTestCase))
suite.addTest(unittest.makeSuite(HashShelveTestCase))
suite.addTest(unittest.makeSuite(ThreadBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(ThreadHashShelveTestCase))
suite.addTest(unittest.makeSuite(EnvBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(EnvHashShelveTestCase))
suite.addTest(unittest.makeSuite(EnvThreadBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(EnvThreadHashShelveTestCase))
suite.addTest(unittest.makeSuite(RecNoShelveTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 27.618384
| 79
| 0.558548
| 8,279
| 0.834997
| 0
| 0
| 0
| 0
| 0
| 0
| 1,557
| 0.157035
|
e3b8e41843e13fa56ad91af90735c93477b63c0f
| 2,940
|
py
|
Python
|
lib/pyexcel/pyexcel/sources/file_source_output.py
|
logice/QQ-Groups-Spider
|
a161282c6832ed40183905e96205edb5a57e8a05
|
[
"MIT"
] | null | null | null |
lib/pyexcel/pyexcel/sources/file_source_output.py
|
logice/QQ-Groups-Spider
|
a161282c6832ed40183905e96205edb5a57e8a05
|
[
"MIT"
] | null | null | null |
lib/pyexcel/pyexcel/sources/file_source_output.py
|
logice/QQ-Groups-Spider
|
a161282c6832ed40183905e96205edb5a57e8a05
|
[
"MIT"
] | 1
|
2021-04-12T07:48:42.000Z
|
2021-04-12T07:48:42.000Z
|
"""
pyexcel.sources.file
~~~~~~~~~~~~~~~~~~~
Representation of file sources
:copyright: (c) 2015-2016 by Onni Software Ltd.
:license: New BSD License
"""
from pyexcel import params
from pyexcel.factory import FileSource
from pyexcel.sources.rendererfactory import RendererFactory
from pyexcel.sources import renderer
RendererFactory.register_renderers(renderer.renderers)
try:
import pyexcel_text as text
RendererFactory.register_renderers(text.renderers)
except ImportError:
pass
file_types = tuple(RendererFactory.renderer_factories.keys())
class IOSource(FileSource):
"""
Get excel data from file source
"""
@classmethod
def can_i_handle(cls, action, file_type):
if action == params.WRITE_ACTION:
status = file_type in file_types
else:
status = False
return status
class SheetSource(IOSource):
"""Pick up 'file_name' field and do single sheet based read and write
"""
fields = [params.FILE_NAME]
targets = (params.SHEET,)
actions = (params.WRITE_ACTION,)
def __init__(self, file_name=None, **keywords):
self.file_name = file_name
self.keywords = keywords
self.file_type = file_name.split(".")[-1]
self.renderer = RendererFactory.get_renderer(self.file_type)
def write_data(self, sheet):
self.renderer.render_sheet_to_file(self.file_name,
sheet, **self.keywords)
class BookSource(SheetSource):
"""Pick up 'file_name' field and do multiple sheet based read and write
"""
targets = (params.BOOK,)
def write_data(self, book):
self.renderer.render_book_to_file(self.file_name, book,
**self.keywords)
class WriteOnlySheetSource(IOSource):
fields = [params.FILE_TYPE]
targets = (params.SHEET,)
actions = (params.WRITE_ACTION,)
def __init__(self, file_type=None, file_stream=None, **keywords):
self.renderer = RendererFactory.get_renderer(file_type)
if file_stream:
self.content = file_stream
else:
self.content = self.renderer.get_io()
self.file_type = file_type
self.keywords = keywords
def write_data(self, sheet):
self.renderer.render_sheet_to_stream(self.content,
sheet, **self.keywords)
class WriteOnlyBookSource(WriteOnlySheetSource):
"""
Multiple sheet data source for writting back to memory
"""
targets = (params.BOOK,)
def write_data(self, book):
self.renderer.render_book_to_stream(self.content, book,
**self.keywords)
sources = (
WriteOnlySheetSource,
WriteOnlyBookSource,
SheetSource,
BookSource
)
| 27.735849
| 76
| 0.618707
| 2,194
| 0.746259
| 0
| 0
| 214
| 0.072789
| 0
| 0
| 465
| 0.158163
|
e3b964ad8299bef44ea12f1a518924e1fbba8289
| 920
|
py
|
Python
|
setup.py
|
vmyrgiotis/MDF_DALEC_Grass
|
fdd168ce7845c925f8e95fc792e2204b440cca2e
|
[
"CC0-1.0"
] | null | null | null |
setup.py
|
vmyrgiotis/MDF_DALEC_Grass
|
fdd168ce7845c925f8e95fc792e2204b440cca2e
|
[
"CC0-1.0"
] | null | null | null |
setup.py
|
vmyrgiotis/MDF_DALEC_Grass
|
fdd168ce7845c925f8e95fc792e2204b440cca2e
|
[
"CC0-1.0"
] | null | null | null |
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
VERSION = '0.1.0'
PACKAGE_NAME = 'MDF_DALEC_GRASS'
AUTHOR = 'Vasilis Myrgiotis'
AUTHOR_EMAIL = 'v.myrgioti@ed.ac.uk'
URL = 'https://github.com/vmyrgiotis/MDF_DALEC_GRASS'
LICENSE = 'MIT'
DESCRIPTION = 'A Bayesian model-data fusion algorithm for simulating carbon dynamics in grassland ecosystems'
LONG_DESCRIPTION = (HERE / "README.md").read_text()
LONG_DESC_TYPE = "text/markdown"
INSTALL_REQUIRES = ["numpy", "pandas","spotpy","sklearn","sentinelhub", "shapely", "datetime", "geopandas", "cdsapi"]
PYTHON_REQUIRES = '>=3.8'
setup(name=PACKAGE_NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESC_TYPE,
author=AUTHOR,
license=LICENSE,
author_email=AUTHOR_EMAIL,
url=URL,
install_requires=INSTALL_REQUIRES,
packages=find_packages()
)
| 28.75
| 117
| 0.773913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 327
| 0.355435
|
e3ba2aa1467f1469e9c62d6360d6ba267f4c6b98
| 752
|
py
|
Python
|
setup.py
|
guma44/croo
|
5cddee4c3163698cd9f265638e76671fef415baa
|
[
"MIT"
] | null | null | null |
setup.py
|
guma44/croo
|
5cddee4c3163698cd9f265638e76671fef415baa
|
[
"MIT"
] | null | null | null |
setup.py
|
guma44/croo
|
5cddee4c3163698cd9f265638e76671fef415baa
|
[
"MIT"
] | null | null | null |
import setuptools
from croo import croo_args
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='croo',
version=croo_args.__version__,
scripts=['bin/croo'],
python_requires='>3.4.1',
author='Jin Lee',
author_email='leepc12@gmail.com',
description='CRomwell Output Organizer',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/ENCODE-DCC/croo',
packages=setuptools.find_packages(exclude=['examples', 'docs']),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
],
install_requires=['caper']
)
| 28.923077
| 68
| 0.666223
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 280
| 0.37234
|
e3bc8d2fb6f6907f9468220745bf4d9d7f0ccd81
| 5,142
|
py
|
Python
|
source/estimators/estimator.py
|
mingweima/rldsge
|
ad40af982f455b65c5f407f6aa082e4caf7322a6
|
[
"MIT"
] | null | null | null |
source/estimators/estimator.py
|
mingweima/rldsge
|
ad40af982f455b65c5f407f6aa082e4caf7322a6
|
[
"MIT"
] | null | null | null |
source/estimators/estimator.py
|
mingweima/rldsge
|
ad40af982f455b65c5f407f6aa082e4caf7322a6
|
[
"MIT"
] | null | null | null |
from typing import Dict
import numpy as np
from ..envs.env import StructuralModel
from ..utils.lik_func import *
from ..utils.useful_class import ParameterGrid
class Estimator(ABC):
"""An Estimator takes in a (trained) solver and relevant params
and outputs estimated structural params
"""
def __init__(self, solver: Solver = None, estimator_params: dict = None):
self.solver = solver
self.env = solver.env
self.estimator_params = estimator_params
self.num_structural_params = self.env.env_params['num_structural_params']
self.estimated_params = None
@abstractmethod
def estimate(self) -> dict:
"""Outputs estimation using a dict (e.g. dict['k'] = 0.95)"""
"""How?"""
return self.estimator_params
class SMMEstimator(Estimator, ABC):
"""Estimator using Simulated Method of Moments"""
def __init__(self,
data: np.ndarray = None, # (nsamples, N, T) or (N, T); N: obs dim, T: eps length
solver: Solver = None,
env: StructuralModel = None,
estimator_params: dict = None):
super().__init__(solver=solver, env=env, estimator_params=estimator_params)
self.data = data
self.estimator_params.setdefault("verbose", True)
self.estimator_params.setdefault("weight_matrix", "identity") # weight matrix type for GMM
self.estimator_params.setdefault("sample_size", 1000)
assert "grid" in self.estimator_params
assert "num_moments" in self.estimator_params
self.estimator_params.setdefault("grid", ParameterGrid({'this_is_an_example': [0.1]}))
self.estimator_params.setdefault("n_moment", 1)
if self.estimator_params['weight_matrix'] not in ["identity"]:
raise ValueError(f"No weight matrix {self.estimator_params['weight_matrix']}")
if self.estimator_params['weight_matrix'] == 'identity':
self.weight_matrix = np.eye(self.estimator_params['n_moment'])
def estimate(self) -> Dict[str, float]:
"""Use SMM to estimate structural params
Returns a dict of estimated structural params"""
running_min_error = np.inf
running_best_param = None
for param_dict in self.estimator_params['grid']:
gmm_error = self._gmm_error(param_dict, self.data)
if gmm_error < running_min_error:
running_min_error = gmm_error
running_best_param = param_dict
return running_best_param
@staticmethod
def _data_moments(obs_vec: np.ndarray) -> np.ndarray:
moments = []
if obs_vec.ndim == 2: # (N, T)
for i in range(obs_vec.shape[0]):
mean = obs_vec[i, :].mean()
moments = np.append(moments, mean)
variance = obs_vec[i, :].var()
moments = np.append(moments, variance)
else:
assert obs_vec.ndim == 3 # (nsample, N, T)
for i in range(obs_vec.shape[1]):
mean = obs_vec[:, i, :].mean(axis=1).mean()
moments = np.append(moments, mean)
variance = obs_vec[:, i, :].var(axis=1).mean()
moments = np.append(moments, variance)
return moments
def _gmm_error(self, param_dict: Dict[str, float], data_obs_vec: np.ndarray):
"""Perform GMM on a single param dict
:parameter: param_dict a dict like {'delta': 0.1, 'gamma': 1}
:returns an error term that is float of how much error this param_dict generates in simulated samples"""
sample_size = self.estimator_params['sample_size']
# use: param_dict, sample_size, self.weight_matrix, self.solver, self.env
sim_obs_vec = None
for n in range(sample_size):
obs_sample = self.solver.sample(
param_dict=param_dict) # np array of size (N, T); in WhitedBasicModel N=2 (k, i)
obs_sample = obs_sample.reshape(1, *obs_sample.shape) # obs_sample.shape = (1, N, T)
# some method to concat/aggregate samples
sim_obs_vec = obs_sample if sim_obs_vec is None else np.append(sim_obs_vec, obs_sample, axis=0)
moms_data = self._data_moments(data_obs_vec)
moms_model = self._data_moments(sim_obs_vec)
err = (moms_model - moms_data) / (moms_data + 1.e-9)
crit_val = err.T @ self.weight_matrix @ err
return crit_val
class LikelihoodEstimator(Estimator, ABC):
"""General likelihood estimator using some kind of given likelihood function"""
def __init__(self, solver: Solver = None, estimator_params: dict = None):
super().__init__(solver=solver, estimator_params=estimator_params)
assert "lik_func" in estimator_params # class LikFunc object (likelihood function) from utils.lik_func
self.lik_func = estimator_params['lik_func']
assert isinstance(self.lik_func, LikFunc)
# TODO: JZH
if __name__ == "__main__":
grid = {
'delta': [0.1, 0.2, 0.3],
'gamma': [1, 10]
}
pg = ParameterGrid(grid)
for g in pg:
print(g)
| 42.495868
| 112
| 0.632828
| 4,801
| 0.933683
| 0
| 0
| 916
| 0.178141
| 0
| 0
| 1,330
| 0.258654
|
e3bd47079e9b2036b424cb4e9c92e2174a230006
| 1,269
|
py
|
Python
|
Algorithm.Python/OptionDataNullReferenceRegressionAlgorithm.py
|
BlackBoxAM/Lean
|
5ea9f04b104d27f0fcfe3a383a3a60ca12206d99
|
[
"Apache-2.0"
] | 6,580
|
2015-01-12T16:48:44.000Z
|
2022-03-31T22:05:09.000Z
|
Algorithm.Python/OptionDataNullReferenceRegressionAlgorithm.py
|
BlackBoxAM/Lean
|
5ea9f04b104d27f0fcfe3a383a3a60ca12206d99
|
[
"Apache-2.0"
] | 3,392
|
2015-01-12T17:44:07.000Z
|
2022-03-30T20:34:03.000Z
|
Algorithm.Python/OptionDataNullReferenceRegressionAlgorithm.py
|
BlackBoxAM/Lean
|
5ea9f04b104d27f0fcfe3a383a3a60ca12206d99
|
[
"Apache-2.0"
] | 3,354
|
2015-01-12T16:58:31.000Z
|
2022-03-31T00:56:03.000Z
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### This algorithm is a regression test for issue #2018 and PR #2038.
### </summary>
class OptionDataNullReferenceRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2016, 12, 1)
self.SetEndDate(2017, 1, 1)
self.SetCash(500000)
self.AddEquity("DUST")
option = self.AddOption("DUST")
option.SetFilter(self.UniverseFunc)
def UniverseFunc(self, universe):
return universe.IncludeWeeklys().Strikes(-1, +1).Expiration(timedelta(25), timedelta(100))
| 37.323529
| 98
| 0.735225
| 449
| 0.353822
| 0
| 0
| 0
| 0
| 0
| 0
| 783
| 0.617021
|
e3bda12509b429c895c643f26b992aa471887764
| 1,371
|
py
|
Python
|
examples/sharedlinks/sharedlinks-backend/links/models.py
|
gcbirzan/django-rest-registration
|
1a9da937c283d03d1fce1a68322a702e14692c79
|
[
"MIT"
] | 329
|
2018-05-09T13:10:37.000Z
|
2022-03-25T11:05:20.000Z
|
examples/sharedlinks/sharedlinks-backend/links/models.py
|
gcbirzan/django-rest-registration
|
1a9da937c283d03d1fce1a68322a702e14692c79
|
[
"MIT"
] | 167
|
2018-04-21T00:28:17.000Z
|
2022-03-30T09:24:52.000Z
|
examples/sharedlinks/sharedlinks-backend/links/models.py
|
gcbirzan/django-rest-registration
|
1a9da937c283d03d1fce1a68322a702e14692c79
|
[
"MIT"
] | 97
|
2018-05-09T14:17:59.000Z
|
2022-02-23T08:46:30.000Z
|
from django.db import models
from django.contrib.auth.models import User
class Link(models.Model):
url = models.URLField()
title = models.CharField(max_length=255)
reporter = models.ForeignKey(
User,
on_delete=models.SET_NULL,
related_name='reported_links',
null=True,
blank=False,
)
def __str__(self):
return '{self.title} ({self.url})'.format(self=self)
def get_num_of_positive_votes(self):
return self.votes.filter(positive=True).count()
def get_num_of_negative_votes(self):
return self.votes.filter(negative=True).count()
class LinkVote(models.Model):
class Meta:
unique_together = (
('link', 'voter'),
)
link = models.ForeignKey(
Link,
on_delete=models.CASCADE,
related_name='votes',
)
voter = models.ForeignKey(
User,
on_delete=models.SET_NULL,
related_name='votes',
null=True,
blank=False,
)
positive = models.BooleanField()
negative = models.BooleanField()
def __str__(self):
if self.positive:
vote = 'positive'
elif self.negative:
vote = 'negative'
else:
vote = 'neutral'
return '{vote} vote for {self.link} by {self.voter}'.format(
vote=vote, self=self)
| 23.637931
| 68
| 0.592268
| 1,292
| 0.942378
| 0
| 0
| 0
| 0
| 0
| 0
| 144
| 0.105033
|
e3bdcff4bd778ceff3ed0e2ca2a1821228f999c6
| 7,106
|
py
|
Python
|
hpc_rll/rl_utils/ppo.py
|
mingzhang96/DI-hpc
|
5431c283a91b77df7c6a86fb0affa60099d4bb31
|
[
"Apache-2.0"
] | 64
|
2021-07-08T02:18:08.000Z
|
2022-02-28T09:52:57.000Z
|
hpc_rll/rl_utils/ppo.py
|
mingzhang96/DI-hpc
|
5431c283a91b77df7c6a86fb0affa60099d4bb31
|
[
"Apache-2.0"
] | null | null | null |
hpc_rll/rl_utils/ppo.py
|
mingzhang96/DI-hpc
|
5431c283a91b77df7c6a86fb0affa60099d4bb31
|
[
"Apache-2.0"
] | 3
|
2021-07-14T08:58:45.000Z
|
2022-03-30T12:36:46.000Z
|
import torch
import torch.nn.functional as F
from typing import Optional
from collections import namedtuple
import hpc_rl_utils
# hpc version only support cuda
hpc_ppo_loss = namedtuple('hpc_ppo_loss', ['policy_loss', 'value_loss', 'entropy_loss'])
hpc_ppo_info = namedtuple('hpc_ppo_info', ['approx_kl', 'clipfrac'])
class PPOFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, logits_new, logits_old, action, value_new, value_old, adv, return_, weight,
clip_ratio, use_value_clip, dual_clip, logits_new_prob, logits_new_entropy, logits_new_grad_logits,
logits_new_grad_prob, logits_new_grad_entropy, logit_old_prob,
grad_policy_loss_buf, grad_value_loss_buf, grad_entropy_loss_buf,
policy_loss, value_loss, entropy_loss, approx_kl, clipfrac, grad_value, grad_logits_new):
inputs = [logits_new, logits_old, action, value_new, value_old, adv, return_, weight]
outputs = [logits_new_prob, logits_new_entropy, logits_new_grad_logits,
logits_new_grad_prob, logits_new_grad_entropy, logit_old_prob,
grad_policy_loss_buf, grad_value_loss_buf, grad_entropy_loss_buf,
policy_loss, value_loss, entropy_loss, approx_kl, clipfrac]
hpc_rl_utils.PPOForward(inputs, outputs, use_value_clip, clip_ratio, dual_clip)
bp_inputs = [grad_policy_loss_buf, grad_value_loss_buf, grad_entropy_loss_buf,
logits_new_grad_logits, logits_new_grad_prob, logits_new_grad_entropy]
bp_outputs = [grad_value, grad_logits_new]
ctx.bp_inputs = bp_inputs
ctx.bp_outputs = bp_outputs
return policy_loss, value_loss, entropy_loss, approx_kl, clipfrac
@staticmethod
def backward(ctx, grad_policy_loss, grad_value_loss, grad_entropy_loss, grad_approx_kl, grad_clipfrac):
inputs = [grad_policy_loss, grad_value_loss, grad_entropy_loss]
for var in ctx.bp_inputs:
inputs.append(var)
outputs = ctx.bp_outputs
hpc_rl_utils.PPOBackward(inputs, outputs)
grad_value = outputs[0]
grad_logits_new = outputs[1]
return grad_logits_new, None, None, grad_value, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
class PPO(torch.nn.Module):
"""
OverviewI:
Implementation of Proximal Policy Optimization (arXiv:1707.06347) with value_clip and dual_clip
Interface:
__init__, forward
"""
def __init__(self, B, N):
r"""
Overview
initialization of PPO
Arguments:
- B (:obj:`int`): batch size
- N (:obj:`int`): number of output
"""
super().__init__()
self.register_buffer('weight', torch.ones(B))
self.register_buffer('logits_new_prob', torch.zeros(B))
self.register_buffer('logits_new_entropy', torch.zeros(B))
self.register_buffer('logits_new_grad_logits', torch.zeros(B, N))
self.register_buffer('logits_new_grad_prob', torch.zeros(B, N))
self.register_buffer('logits_new_grad_entropy', torch.zeros(B, N))
self.register_buffer('logit_old_prob', torch.zeros(B))
self.register_buffer('grad_policy_loss_buf', torch.zeros(B))
self.register_buffer('grad_value_loss_buf', torch.zeros(B))
self.register_buffer('grad_entropy_loss_buf', torch.zeros(B))
self.register_buffer('policy_loss', torch.zeros(1))
self.register_buffer('value_loss', torch.zeros(1))
self.register_buffer('entropy_loss', torch.zeros(1))
self.register_buffer('approx_kl', torch.zeros(1))
self.register_buffer('clipfrac', torch.zeros(1))
self.register_buffer('grad_value', torch.zeros(B))
self.register_buffer('grad_logits_new', torch.zeros(B, N))
def forward(self, logits_new, logits_old, action, value_new, value_old, adv, return_,
weight = None,
clip_ratio: float = 0.2,
use_value_clip: bool = True,
dual_clip: Optional[float] = None
):
"""
Overview:
forward of PPO
Arguments:
- logit_new (:obj:`torch.FloatTensor`): :math:`(B, N)`, where B is batch size and N is action dim
- logit_old (:obj:`torch.FloatTensor`): :math:`(B, N)`
- action (:obj:`torch.LongTensor`): :math:`(B, )`
- value_new (:obj:`torch.FloatTensor`): :math:`(B, )`
- value_old (:obj:`torch.FloatTensor`): :math:`(B, )`
- adv (:obj:`torch.FloatTensor`): :math:`(B, )`
- return (:obj:`torch.FloatTensor`): :math:`(B, )`
- weight (:obj:`torch.FloatTensor` or :obj:`None`): :math:`(B, )`
- clip_ratio (:obj:`float`): the ppo clip ratio for the constraint of policy update, defaults to 0.2
- use_value_clip (:obj:`bool`): whether to use clip in value loss with the same ratio as policy
- dual_clip (:obj:`float`): a parameter c mentioned in arXiv:1912.09729 Equ. 5, shoule be in [1, inf),\
defaults to 5.0, if you don't want to use it, set this parameter to None
Returns:
- ppo_loss (:obj:`namedtuple`): the ppo loss item, all of them are the differentiable 0-dim tensor
- ppo_info (:obj:`namedtuple`): the ppo optim information for monitoring, all of them are Python scalar
.. note::
adv is already normalized value (adv - adv.mean()) / (adv.std() + 1e-8), and there are many
ways to calculate this mean and std, like among data buffer or train batch, so we don't couple
this part into ppo_error, you can refer to our examples for different ways.
"""
assert(logits_new.is_cuda)
assert(logits_old.is_cuda)
assert(action.is_cuda)
assert(value_new.is_cuda)
assert(value_old.is_cuda)
assert(adv.is_cuda)
assert(return_.is_cuda)
if weight is None:
weight = self.weight
else:
assert(weight.is_cuda)
assert dual_clip is None or dual_clip > 1.0, "dual_clip value must be greater than 1.0, but get value: {}".format(dual_clip)
if dual_clip is None:
dual_clip = 0.0;
policy_loss, value_loss, entropy_loss, approx_kl, clipfrac = PPOFunction.apply(
logits_new, logits_old, action, value_new, value_old, adv, return_, weight,
clip_ratio, use_value_clip, dual_clip,
self.logits_new_prob, self.logits_new_entropy, self.logits_new_grad_logits,
self.logits_new_grad_prob, self.logits_new_grad_entropy, self.logit_old_prob,
self.grad_policy_loss_buf, self.grad_value_loss_buf, self.grad_entropy_loss_buf,
self.policy_loss, self.value_loss, self.entropy_loss, self.approx_kl, self.clipfrac,
self.grad_value, self.grad_logits_new)
return hpc_ppo_loss(policy_loss, value_loss, entropy_loss), hpc_ppo_info(approx_kl.item(), clipfrac.item())
| 47.373333
| 192
| 0.659161
| 6,781
| 0.954264
| 0
| 0
| 1,954
| 0.274979
| 0
| 0
| 2,450
| 0.344779
|
e3be7a53e508b992ad117b38ccc98afaeeef9017
| 1,069
|
py
|
Python
|
src/monitoring_service/metrics.py
|
netcriptus/raiden-services
|
3955d91852c616f6ba0a3a979757edbd852b2c6d
|
[
"MIT"
] | 13
|
2019-02-07T23:23:33.000Z
|
2021-07-03T16:00:53.000Z
|
src/monitoring_service/metrics.py
|
netcriptus/raiden-services
|
3955d91852c616f6ba0a3a979757edbd852b2c6d
|
[
"MIT"
] | 1,095
|
2019-01-21T09:30:57.000Z
|
2022-03-25T05:13:30.000Z
|
src/monitoring_service/metrics.py
|
netcriptus/raiden-services
|
3955d91852c616f6ba0a3a979757edbd852b2c6d
|
[
"MIT"
] | 18
|
2019-01-21T09:17:19.000Z
|
2022-02-23T15:53:17.000Z
|
from prometheus_client import Counter
from raiden.utils.typing import TokenAmount
from raiden_libs.metrics import ( # noqa: F401, pylint: disable=unused-import
ERRORS_LOGGED,
EVENTS_EXCEPTIONS_RAISED,
EVENTS_PROCESSING_TIME,
MESSAGES_EXCEPTIONS_RAISED,
MESSAGES_PROCESSING_TIME,
REGISTRY,
ErrorCategory,
MetricsEnum,
collect_event_metrics,
collect_message_metrics,
get_metrics_for_label,
)
class Who(MetricsEnum):
US = "us"
THEY = "they"
REWARD_CLAIMS = Counter(
"economics_reward_claims_successful_total",
"The number of overall successful reward claims",
labelnames=[Who.label_name()],
registry=REGISTRY,
)
REWARD_CLAIMS_TOKEN = Counter(
"economics_reward_claims_token_total",
"The amount of token earned by reward claims",
labelnames=[Who.label_name()],
registry=REGISTRY,
)
def report_increased_reward_claims(amount: TokenAmount, who: Who) -> None:
get_metrics_for_label(REWARD_CLAIMS, who).inc()
get_metrics_for_label(REWARD_CLAIMS_TOKEN, who).inc(float(amount))
| 25.452381
| 78
| 0.750234
| 55
| 0.05145
| 0
| 0
| 0
| 0
| 0
| 0
| 225
| 0.210477
|
e3be9c37370787ab104874a6e05f24ddb94436e5
| 9,774
|
py
|
Python
|
helper/fetch_funcdata.py
|
SysSec-KAIST/FirmKit
|
6d8408e1336ed0b5d42d9722e0918888b3f3b424
|
[
"MIT"
] | 3
|
2022-01-05T22:04:09.000Z
|
2022-03-28T07:01:48.000Z
|
helper/fetch_funcdata.py
|
SysSec-KAIST/FirmKit
|
6d8408e1336ed0b5d42d9722e0918888b3f3b424
|
[
"MIT"
] | null | null | null |
helper/fetch_funcdata.py
|
SysSec-KAIST/FirmKit
|
6d8408e1336ed0b5d42d9722e0918888b3f3b424
|
[
"MIT"
] | null | null | null |
# modified from TikNib/tiknib/ida/fetch_funcdata_v7.5.py
import os
import sys
import string
from hashlib import sha1
from collections import defaultdict
import time
import pprint as pp
import idautils
import idc
import idaapi
import ida_pro
import ida_nalt
import ida_bytes
sys.path.append(os.path.abspath("./TikNib"))
from tiknib.utils import demangle, get_arch, init_idc, parse_fname, store_func_data
printset = set(string.printable)
isprintable = lambda x: set(x).issubset(printset)
# find consts
def get_consts(start_addr, end_addr):
consts = []
for h in idautils.Heads(start_addr, end_addr):
insn = DecodeInstruction(h)
if insn:
for op in insn.ops:
if op.type == idaapi.o_imm:
# get operand value
imm_value = op.value
# check if addres is loaded in idb
if not ida_bytes.is_loaded(imm_value):
consts.append(imm_value)
return consts
# find strings
def get_strings(start_addr, end_addr):
strings = []
for h in idautils.Heads(start_addr, end_addr):
for ref in idautils.DataRefsFrom(h):
t = idc.get_str_type(ref)
if isinstance(t, int) and t >= 0:
s = idc.get_strlit_contents(ref)
if isinstance(s, bytes):
s = s.decode()
if s and isprintable(s):
strings.append([h, s, t, ref])
return strings
# This function returns a caller map, and callee map for each function.
def get_call_graph():
callee_map = defaultdict(list)
caller_map = defaultdict(list)
for callee_ea in idautils.Functions():
callee = idaapi.get_func(callee_ea)
# TODO: Sometimes, IDA returns false result. so we need to check this
if not callee:
continue
callee_name = idc.get_func_name(callee_ea)
# TODO: check flow boolean 1
for caller_ea in CodeRefsTo(callee_ea, 1):
caller = idaapi.get_func(caller_ea)
# TODO: Sometimes, IDA returns false result. so we need to check
if not caller:
continue
caller_name = idc.get_func_name(caller_ea)
# TODO: check the correction - caller_ea -> callee_ea
callee_map[caller_name].append([callee_name, callee_ea])
caller_map[callee_name].append([caller_name, caller_ea])
return caller_map, callee_map
# This function returns edges, and updates caller_map, and callee_map
def get_bb_graph(caller_map, callee_map):
edge_map = {}
bb_callee_map = {}
for func_ea in idautils.Functions():
func = idaapi.get_func(func_ea)
if not func or func.start_ea == idaapi.BADADDR or func.end_ea == idaapi.BADADDR:
continue
# TODO: study how to use flags
graph = idaapi.FlowChart(func, flags=idaapi.FC_PREDS)
func_name = idc.get_func_name(func.start_ea)
edge_map[func_name] = []
bb_callee_map[func_name] = []
for bb in graph:
if bb.start_ea == idaapi.BADADDR or bb.end_ea == idaapi.BADADDR:
continue
for succbb in bb.succs():
edge_map[func_name].append((bb.id, succbb.id))
for callee_name, callee_ea in callee_map[func_name]:
# Get address where current function calls a callee.
if bb.start_ea <= callee_ea < bb.end_ea:
bb_callee_map[func_name].append((bb.id, callee_name, callee_ea))
return edge_map, bb_callee_map
def get_type(addr):
tif = idaapi.tinfo_t()
ida_nalt.get_tinfo(tif, addr)
funcdata = idaapi.func_type_data_t()
tif.get_func_details(funcdata)
func_type = idaapi.print_tinfo("", 0, 0, PRTYPE_1LINE, tif, "", "")
ret_type = idaapi.print_tinfo("", 0, 0, PRTYPE_1LINE, funcdata.rettype, "", "")
args = []
for i in range(funcdata.size()):
arg_type = idaapi.print_tinfo("", 0, 0, PRTYPE_1LINE, funcdata[i].type, "", "")
args.append([i, funcdata[i].name, arg_type, funcdata[i].argloc.atype()])
return [func_type, ret_type, args]
def get_bin_path():
bin_path = ida_nalt.get_input_file_path()
if not os.path.exists(bin_path):
bin_path = idc.get_idb_path().replace(".idb", "")
return bin_path
def main():
# Get IDA default information
bin_path = get_bin_path()
with open(bin_path, "rb") as f:
bin_hash = sha1(f.read()).hexdigest()
img_base = idaapi.get_imagebase()
info = idaapi.get_inf_structure()
if info.is_64bit():
bits = 64
elif info.is_32bit():
bits = 32
else:
bits = 16
endian = "little"
if info.is_be():
endian = "big"
arch = "_".join([info.procName, str(bits), endian])
arch = get_arch(arch)
package = ""
compiler = ""
opti = ""
other_option = ""
bin_name = os.path.basename(bin_path)
# Parse option information
# package, compiler, arch, opti, bin_name = parse_fname(bin_path)
# if "_noinline" in bin_path:
# other_option = "noinline"
# elif "_pie" in bin_path:
# other_option = "pie"
# elif "_lto" in bin_path:
# other_option = "lto"
# else:
# other_option = "normal"
# Prepare default information for processing
caller_map, callee_map = get_call_graph()
edge_map, bb_callee_map = get_bb_graph(caller_map, callee_map)
# Now extract function information
func_data = []
for idx, addr in enumerate(list(idautils.Functions())):
function = idaapi.get_func(addr)
if (
not function
or function.start_ea == idaapi.BADADDR
or function.end_ea == idaapi.BADADDR
):
continue
# IDA's default function information
func_name = get_func_name(addr).strip()
demangled_name, demangled_full_name = demangle(func_name)
graph = idaapi.FlowChart(function, flags=idaapi.FC_PREDS)
data = idc.get_bytes(addr, function.size()) or ""
data_hash = sha1(data).hexdigest()
stack_size = get_frame_size(addr)
# Get imported callees. Note that the segment name is used because
# idaapi.get_import_module_name() sometimes returns bad results ...
imported_callees = []
if func_name in callee_map:
imported_callees = list(
filter(
lambda x: get_segm_name(x[1]) != get_segm_name(addr),
callee_map[func_name],
)
)
# Get type information from IDA
func_type, ret_type, args = get_type(addr)
# Prepare basic block information for feature extraction
func_strings = []
func_consts = []
bb_data = []
for bb in graph:
if bb.start_ea == idaapi.BADADDR or bb.end_ea == idaapi.BADADDR:
continue
bb_size = bb.end_ea - bb.start_ea
block_data = idc.get_bytes(bb.start_ea, bb_size) or b""
block_data_hash = sha1(block_data).hexdigest()
bb_strings = get_strings(bb.start_ea, bb.end_ea)
bb_consts = get_consts(bb.start_ea, bb.end_ea)
bb_callees = list(filter(lambda x: x[0] == bb.id, bb_callee_map[func_name]))
bb_data.append(
{
"size": bb_size,
"block_id": bb.id,
"startEA": bb.start_ea,
"endEA": bb.end_ea,
"type": bb.type,
"is_ret": idaapi.is_ret_block(bb.type),
"hash": block_data_hash,
"callees": bb_callees,
"strings": bb_strings,
"consts": bb_consts,
}
)
func_strings.extend(bb_strings)
func_consts.extend(bb_consts)
func_data.append(
{
"ida_idx": idx,
"seg_name": get_segm_name(addr),
"name": func_name,
"demangled_name": demangled_name,
"demangled_full_name": demangled_full_name,
"hash": data_hash,
"size": function.size(),
"startEA": function.start_ea,
"endEA": function.end_ea,
"cfg_size": graph.size,
"img_base": img_base,
"bin_path": bin_path,
"bin_hash": bin_hash,
"bin_offset": addr - img_base,
"stack_size": stack_size,
"package": package,
"compiler": compiler,
"arch": arch,
"opti": opti,
"others": other_option,
"bin_name": bin_name,
"func_type": func_type,
"ret_type": ret_type,
"args": args,
"callers": caller_map[func_name],
"callees": callee_map[func_name],
"imported_callees": imported_callees,
"cfg": edge_map[func_name],
"strings": func_strings,
"consts": func_consts,
"bb_data": bb_data,
}
)
return func_data
init_idc()
try:
func_data = main()
except:
import traceback
traceback.print_exc()
ida_pro.qexit(1)
else:
bin_path = get_bin_path()
store_func_data(bin_path, func_data)
ida_pro.qexit(0)
| 34.294737
| 89
| 0.561797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,697
| 0.173624
|
e3c3ec76a20176afe22ba2e37b489b70bdc6e8aa
| 20,109
|
py
|
Python
|
AwesomeService/coveo-blitz-thrift/src/main/python/awesome/AwesomeService.py
|
coveord/Blitz-2015
|
9d8a0fbc3b4ca7cfdce9a3aea0efec205070e946
|
[
"Apache-2.0"
] | 4
|
2015-01-13T00:27:20.000Z
|
2015-01-19T21:21:18.000Z
|
AwesomeService/coveo-blitz-thrift/src/main/python/awesome/AwesomeService.py
|
Coveo/Blitz-2015
|
9d8a0fbc3b4ca7cfdce9a3aea0efec205070e946
|
[
"Apache-2.0"
] | null | null | null |
AwesomeService/coveo-blitz-thrift/src/main/python/awesome/AwesomeService.py
|
Coveo/Blitz-2015
|
9d8a0fbc3b4ca7cfdce9a3aea0efec205070e946
|
[
"Apache-2.0"
] | 1
|
2016-03-11T18:35:02.000Z
|
2016-03-11T18:35:02.000Z
|
#
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def getData(self, request):
"""
Gets data from your service. The type and format of the requests are defined in the documentation.
Parameters:
- request
"""
pass
def reset(self):
pass
def ping(self):
pass
def handleMapReduceResult(self, name, data):
"""
Parameters:
- name
- data
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def getData(self, request):
"""
Gets data from your service. The type and format of the requests are defined in the documentation.
Parameters:
- request
"""
self.send_getData(request)
return self.recv_getData()
def send_getData(self, request):
self._oprot.writeMessageBegin('getData', TMessageType.CALL, self._seqid)
args = getData_args()
args.request = request
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getData(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getData_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getData failed: unknown result");
def reset(self):
self.send_reset()
self.recv_reset()
def send_reset(self):
self._oprot.writeMessageBegin('reset', TMessageType.CALL, self._seqid)
args = reset_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_reset(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = reset_result()
result.read(iprot)
iprot.readMessageEnd()
return
def ping(self):
self.send_ping()
return self.recv_ping()
def send_ping(self):
self._oprot.writeMessageBegin('ping', TMessageType.CALL, self._seqid)
args = ping_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ping(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = ping_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "ping failed: unknown result");
def handleMapReduceResult(self, name, data):
"""
Parameters:
- name
- data
"""
self.send_handleMapReduceResult(name, data)
self.recv_handleMapReduceResult()
def send_handleMapReduceResult(self, name, data):
self._oprot.writeMessageBegin('handleMapReduceResult', TMessageType.CALL, self._seqid)
args = handleMapReduceResult_args()
args.name = name
args.data = data
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_handleMapReduceResult(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = handleMapReduceResult_result()
result.read(iprot)
iprot.readMessageEnd()
return
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["getData"] = Processor.process_getData
self._processMap["reset"] = Processor.process_reset
self._processMap["ping"] = Processor.process_ping
self._processMap["handleMapReduceResult"] = Processor.process_handleMapReduceResult
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_getData(self, seqid, iprot, oprot):
args = getData_args()
args.read(iprot)
iprot.readMessageEnd()
result = getData_result()
result.success = self._handler.getData(args.request)
oprot.writeMessageBegin("getData", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_reset(self, seqid, iprot, oprot):
args = reset_args()
args.read(iprot)
iprot.readMessageEnd()
result = reset_result()
self._handler.reset()
oprot.writeMessageBegin("reset", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_ping(self, seqid, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
result.success = self._handler.ping()
oprot.writeMessageBegin("ping", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_handleMapReduceResult(self, seqid, iprot, oprot):
args = handleMapReduceResult_args()
args.read(iprot)
iprot.readMessageEnd()
result = handleMapReduceResult_result()
self._handler.handleMapReduceResult(args.name, args.data)
oprot.writeMessageBegin("handleMapReduceResult", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class getData_args:
"""
Attributes:
- request
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'request', (Request, Request.thrift_spec), None, ), # 1
)
def __init__(self, request=None,):
self.request = request
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.request = Request()
self.request.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getData_args')
if self.request is not None:
oprot.writeFieldBegin('request', TType.STRUCT, 1)
self.request.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.request)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getData_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (Response, Response.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = Response()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getData_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class reset_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('reset_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class reset_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('reset_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ping_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class handleMapReduceResult_args:
"""
Attributes:
- name
- data
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'data', None, None, ), # 2
)
def __init__(self, name=None, data=None,):
self.name = name
self.data = data
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.data = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('handleMapReduceResult_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.data is not None:
oprot.writeFieldBegin('data', TType.STRING, 2)
oprot.writeString(self.data)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.name)
value = (value * 31) ^ hash(self.data)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class handleMapReduceResult_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('handleMapReduceResult_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 28.892241
| 188
| 0.675369
| 19,599
| 0.974638
| 0
| 0
| 0
| 0
| 0
| 0
| 1,319
| 0.065593
|
e3c455dcd759b47e6ff022d0f28b6d8b03f6c49a
| 10,382
|
py
|
Python
|
src/org_setup/resources/organizations.py
|
gilyas/aws-control-tower-org-setup-sample
|
65c1a1a0c7b7bb362dff1924f38f63bd8c3a8e41
|
[
"MIT-0"
] | null | null | null |
src/org_setup/resources/organizations.py
|
gilyas/aws-control-tower-org-setup-sample
|
65c1a1a0c7b7bb362dff1924f38f63bd8c3a8e41
|
[
"MIT-0"
] | null | null | null |
src/org_setup/resources/organizations.py
|
gilyas/aws-control-tower-org-setup-sample
|
65c1a1a0c7b7bb362dff1924f38f63bd8c3a8e41
|
[
"MIT-0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: MIT-0
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this
* software and associated documentation files (the "Software"), to deal in the Software
* without restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from functools import lru_cache
import json
from typing import List, Dict, Optional, Iterable, Any
from aws_lambda_powertools import Logger
import boto3
import botocore
from ..constants import AI_OPT_OUT_POLICY_NAME, AI_OPT_OUT_POLICY
from ..exceptions import OrganizationNotFoundException
logger = Logger(child=True)
__all__ = ["Organizations"]
class Organizations:
def __init__(self, session: boto3.Session, region: str) -> None:
self.client = session.client("organizations", region_name=region)
self.region = region
self._roots = []
self._accounts = []
def describe_organization(self) -> Dict[str, Any]:
"""
Describe the organization the account belongs to
"""
try:
response = self.client.describe_organization()
except self.client.exceptions.AWSOrganizationsNotInUseException:
raise OrganizationNotFoundException("Organization Not Found")
except botocore.exceptions.ClientError:
logger.exception(f"[{self.region} Unable to describe organization")
raise
return response["Organization"]
def list_accounts(self) -> List[Dict[str, str]]:
"""
List all of the accounts in an organization
"""
if self._accounts:
return self._accounts
accounts = []
paginator = self.client.get_paginator("list_accounts")
page_iterator = paginator.paginate(PaginationConfig={"PageSize": 20})
for page in page_iterator:
for account in page.get("Accounts", []):
if account.get("Status") != "ACTIVE":
continue
accounts.append(account)
self._accounts = accounts
return accounts
def list_policies(self, policy_type: str) -> List[Dict[str, str]]:
"""
List all of the policies in an organization
"""
policies = []
paginator = self.client.get_paginator("list_policies")
page_iterator = paginator.paginate(Filter=policy_type)
for page in page_iterator:
policies.extend(page.get("Policies", []))
return policies
def list_roots(self) -> List[Dict[str, str]]:
"""
List all the roots in an organization
"""
if self._roots:
return self._roots
roots = []
paginator = self.client.get_paginator("list_roots")
page_iterator = paginator.paginate()
for page in page_iterator:
roots.extend(page.get("Roots", []))
self._roots = roots
return roots
def enable_all_features(self) -> None:
"""
Enable all features in an organization
"""
logger.info(f"[{self.region}] Enabling all features in the organization")
try:
self.client.enable_all_features()
logger.debug(f"[{self.region}] Enabled all features in organization")
except botocore.exceptions.ClientError as error:
if (
error.response["Error"]["Code"]
!= "HandshakeConstraintViolationException"
):
logger.exception(
f"[{self.region}] Unable to enable all features in organization"
)
raise
def enable_aws_service_access(self, principals: Iterable[str]) -> None:
"""
Enable AWS service access in organization
"""
for principal in principals:
logger.info(f"[{self.region}] Enabling AWS service access for {principal}")
try:
self.client.enable_aws_service_access(ServicePrincipal=principal)
logger.debug(
f"[{self.region}] Enabled AWS service access for {principal}"
)
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Code"] != "ServiceException":
logger.exception(
f"[{self.region}] Unable enable AWS service access for {principal}"
)
raise error
def enable_all_policy_types(self) -> None:
"""
Enables all policy types in an organization
"""
logger.info(f"[{self.region}] Enabling all policy types in organization")
for root in self.list_roots():
root_id = root["Id"]
disabled_types = [
policy_type.get("Type")
for policy_type in root.get("PolicyTypes", [])
if policy_type.get("Status") != "ENABLED"
]
for disabled_type in disabled_types:
logger.info(
f"[{self.region}] Enabling policy type {disabled_type} on root {root_id}"
)
try:
self.client.enable_policy_type(
RootId=root_id, PolicyType=disabled_type
)
logger.debug(
f"[{self.region}] Enabled policy type {disabled_type} on root {root_id}"
)
except botocore.exceptions.ClientError as error:
if (
error.response["Error"]["Code"]
!= "PolicyTypeAlreadyEnabledException"
):
logger.exception(
f"[{self.region}] Unable to enable policy type"
)
raise error
logger.debug(f"[{self.region}] Enabled all policy types in organization")
def get_ai_optout_policy(self) -> str:
"""
Return the AI opt-out policy ID
"""
for policy in self.list_policies("AISERVICES_OPT_OUT_POLICY"):
if policy["Name"] == AI_OPT_OUT_POLICY_NAME:
logger.info(f"Found existing {AI_OPT_OUT_POLICY_NAME} policy")
return policy["Id"]
logger.info(
f"[{self.region}] {AI_OPT_OUT_POLICY_NAME} policy not found, creating"
)
try:
response = self.client.create_policy(
Content=json.dumps(AI_OPT_OUT_POLICY),
Description="Opt-out of all AI services",
Name=AI_OPT_OUT_POLICY_NAME,
Type="AISERVICES_OPT_OUT_POLICY",
)
policy_id = response.get("Policy", {}).get("PolicySummary", {}).get("Id")
logger.debug(
f"[{self.region}] Created policy {AI_OPT_OUT_POLICY_NAME} ({policy_id})"
)
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Code"] == "DuplicatePolicyException":
return self.get_ai_optout_policy()
raise error
return policy_id
def attach_ai_optout_policy(self) -> None:
"""
Attach the AI opt-out policy to the root
"""
policy_id = self.get_ai_optout_policy()
if not policy_id:
logger.warn(
f"[{self.region}] Unable to find {AI_OPT_OUT_POLICY_NAME} policy"
)
return
for root in self.list_roots():
root_id = root["Id"]
logger.info(
f"[{self.region}] Attaching {AI_OPT_OUT_POLICY_NAME} ({policy_id}) to root {root_id}"
)
try:
self.client.attach_policy(PolicyId=policy_id, TargetId=root_id)
logger.debug(
f"[{self.region}] Attached {AI_OPT_OUT_POLICY_NAME} ({policy_id}) to root {root_id}"
)
except botocore.exceptions.ClientError as error:
if (
error.response["Error"]["Code"]
!= "DuplicatePolicyAttachmentException"
):
logger.exception(f"[{self.region}] Unable to attach policy")
raise error
def register_delegated_administrator(
self, account_id: str, principals: Iterable[str]
) -> None:
"""
Register a delegated administrator
"""
for principal in principals:
logger.info(
f"[{self.region}] Registering {account_id} as a delegated administrator for {principal}"
)
try:
self.client.register_delegated_administrator(
AccountId=account_id, ServicePrincipal=principal
)
logger.debug(
f"[{self.region}] Registered {account_id} as a delegated administrator for {principal}"
)
except botocore.exceptions.ClientError as error:
if (
error.response["Error"]["Code"]
!= "AccountAlreadyRegisteredException"
):
logger.exception(
f"[{self.region}] Unable to register {account_id} as a delegated administrator for {principal}"
)
raise error
@lru_cache
def get_account_id(self, name: str) -> Optional[str]:
"""
Return the Account ID for an account
"""
for account in self.list_accounts():
if account.get("Name") == name:
return account["Id"]
return None
| 37.345324
| 119
| 0.573974
| 8,987
| 0.865633
| 0
| 0
| 283
| 0.027259
| 0
| 0
| 3,803
| 0.366307
|
e3c4a465fcdd23d8bc979d0237347d3db4337947
| 2,095
|
py
|
Python
|
clint/textui/core.py
|
mpmman/clint
|
9d3693d644b8587d985972b6075d970096f6439e
|
[
"0BSD"
] | 1,230
|
2015-01-03T05:39:25.000Z
|
2020-02-18T12:36:03.000Z
|
clint/textui/core.py
|
not-kennethreitz/clint
|
9d3693d644b8587d985972b6075d970096f6439e
|
[
"0BSD"
] | 50
|
2015-01-06T17:58:20.000Z
|
2018-03-19T13:25:22.000Z
|
clint/textui/core.py
|
not-kennethreitz/clint
|
9d3693d644b8587d985972b6075d970096f6439e
|
[
"0BSD"
] | 153
|
2015-01-03T03:56:25.000Z
|
2020-02-13T20:59:03.000Z
|
# -*- coding: utf-8 -*-
"""
clint.textui.core
~~~~~~~~~~~~~~~~~
Core TextUI functionality for Puts/Indent/Writer.
"""
from __future__ import absolute_import
import sys
from contextlib import contextmanager
from .formatters import max_width, min_width, _get_max_width_context
from .cols import columns
from ..utils import tsplit
__all__ = ('puts', 'puts_err', 'indent', 'dedent', 'columns', 'max_width',
'min_width', 'STDOUT', 'STDERR')
STDOUT = sys.stdout.write
STDERR = sys.stderr.write
NEWLINES = ('\n', '\r', '\r\n')
INDENT_STRINGS = []
# Private
def _indent(indent=0, quote='', indent_char=' '):
"""Indent util function, compute new indent_string"""
if indent > 0:
indent_string = ''.join((
str(quote),
(indent_char * (indent - len(quote)))
))
else:
indent_string = ''.join((
('\x08' * (-1 * (indent - len(quote)))),
str(quote))
)
if len(indent_string):
INDENT_STRINGS.append(indent_string)
# Public
def puts(s='', newline=True, stream=STDOUT):
"""Prints given string to stdout."""
max_width_ctx = _get_max_width_context()
if max_width_ctx:
cols, separator = max_width_ctx[-1]
s = max_width(s, cols, separator)
if newline:
s = tsplit(s, NEWLINES)
s = map(str, s)
indent = ''.join(INDENT_STRINGS)
s = (str('\n' + indent)).join(s)
_str = ''.join((
''.join(INDENT_STRINGS),
str(s),
'\n' if newline else ''
))
stream(_str)
def puts_err(s='', newline=True, stream=STDERR):
"""Prints given string to stderr."""
puts(s, newline, stream)
def dedent():
"""Dedent next strings, use only if you use indent otherwise than as a
context."""
INDENT_STRINGS.pop()
@contextmanager
def _indent_context():
"""Indentation context manager."""
try:
yield
finally:
dedent()
def indent(indent=4, quote=''):
"""Indentation manager, return an indentation context manager."""
_indent(indent, quote)
return _indent_context()
| 21.822917
| 74
| 0.602864
| 0
| 0
| 114
| 0.054415
| 130
| 0.062053
| 0
| 0
| 575
| 0.274463
|
e3c6cfd741e8bd9adaeac0bf0d16ec2396131aa6
| 636
|
py
|
Python
|
setup.py
|
mluciarr/McComplex
|
e83b5d11ab772a6bdc6233d318f7da5f67b3c5ce
|
[
"MIT"
] | null | null | null |
setup.py
|
mluciarr/McComplex
|
e83b5d11ab772a6bdc6233d318f7da5f67b3c5ce
|
[
"MIT"
] | null | null | null |
setup.py
|
mluciarr/McComplex
|
e83b5d11ab772a6bdc6233d318f7da5f67b3c5ce
|
[
"MIT"
] | 1
|
2021-04-14T22:43:33.000Z
|
2021-04-14T22:43:33.000Z
|
#!/usr/bin/env python
from distutils.core import setup
import setuptools
setup(name='McComplex',
version='1.0',
description="""This program reconstructs macrocomplexes of protein-protein
and protein-(DNA/RNA) from a list of files of binary interactions of its chains""",
author='Maria Lucía Romero, Ferran Pegenaute, Ipek Yaren',
author_email='ferran.pegenaute01@estudiant.upf.edu',
long_description=open('README.md').read(),
install_requires=['biopython >= 1.73.0','argparse >= 1.1.0', 'pysimplelog'],
packages=['McComplex', 'McComplex.functions'],
license='LICENSE.txt',
url='https://github.com/ferranpgp/McCrocomplex')
| 37.411765
| 84
| 0.748428
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 425
| 0.66719
|
e3c83e726d786e7b9f87a1f14f06ff2aa47d4a9b
| 1,277
|
py
|
Python
|
pymitools/girder/metadataPresets.py
|
chapmanbe/pymitools
|
be0f4a3f56dd6c8bb89678368c49e09b3333232c
|
[
"Apache-2.0"
] | null | null | null |
pymitools/girder/metadataPresets.py
|
chapmanbe/pymitools
|
be0f4a3f56dd6c8bb89678368c49e09b3333232c
|
[
"Apache-2.0"
] | null | null | null |
pymitools/girder/metadataPresets.py
|
chapmanbe/pymitools
|
be0f4a3f56dd6c8bb89678368c49e09b3333232c
|
[
"Apache-2.0"
] | null | null | null |
"""Metadata presets for commonly used keywords."""
presets = {
chest : {"Anatomical Region":
{"ID": "0001443",
"Name": "chest",
"Ontology Acronym": "UBERON",
"Ontology Name": "Uber Anatomy Ontology",
"Resource URL":
"http://purl.obolibrary.org/obo/UBERON_0001443"}},
abdomen : {"Anatomical Region":
{"ID": "0000916",
"Name": "abdomen",
"Ontology Acronym": "UBERON",
"Ontology Name": "Uber Anatomy Ontology",
"Resource URL":
"http://purl.obolibrary.org/obo/UBERON_0000916"}},
neck : {"Anatomical Region":
{"ID": "0000974",
"Name": "neck",
"Ontology Acronym": "UBERON",
"Ontology Name": "Uber Anatomy Ontology",
"Resource URL":
"http://purl.obolibrary.org/obo/UBERON_0000974"}},
head : {"Anatomical Region":
{"ID": "0000033",
"Name": "head",
"Ontology Acronym": "UBERON",
"Ontology Name": "Uber Anatomy Ontology",
"Resource URL":
"http://purl.obolibrary.org/obo/UBERON_0000033"}}}
| 29.697674
| 66
| 0.473767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 730
| 0.571652
|
e3c99e6c753a7603c000c4cebf8d2e0f312189b5
| 901
|
py
|
Python
|
data/colors-extractor.py
|
imlolman/Flat-UI-Colors-Chrome-App
|
d12010360dfeb1d38e8923dbe0fa5c51640b7314
|
[
"BSD-Source-Code"
] | 1
|
2021-04-23T13:33:16.000Z
|
2021-04-23T13:33:16.000Z
|
data/colors-extractor.py
|
imlolman/Flat-UI-Colors-Chrome-App
|
d12010360dfeb1d38e8923dbe0fa5c51640b7314
|
[
"BSD-Source-Code"
] | null | null | null |
data/colors-extractor.py
|
imlolman/Flat-UI-Colors-Chrome-App
|
d12010360dfeb1d38e8923dbe0fa5c51640b7314
|
[
"BSD-Source-Code"
] | null | null | null |
from bs4 import BeautifulSoup
import json
source = open('html-source.html', encoding="utf8").read()
soup = BeautifulSoup(source, 'html.parser')
# Prittified to look and understand the structure of Code
# prittified = soup.prettify().encode("utf-8")
# open('prettified.html', 'wb').write(prittified)
color_sets = []
for sets in soup.find_all("a", {"class": "smallpalette-container"}):
set = {}
set['name'] = sets.find(
'div', {"class": "name"}).contents[0].replace('\n ', '')
set['emoji'] = sets.find('span', {"class": "emoji"}).string
set['colors'] = []
for color in sets.find_all("div", {"class": "color"}):
set['colors'].append(color['style'].replace(
'background: ', "").replace(';', ""))
color_sets.append(set)
open('colors_data.json', 'w+').write(json.dumps(color_sets))
print('Check file `colors_data.json` Updated Color Sets.')
| 32.178571
| 71
| 0.627081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 424
| 0.470588
|
e3ca5e822898bbe283aa33628cdf89af94b959cf
| 2,005
|
py
|
Python
|
preprocessing.py
|
Y-greatigr/Covid19_Model
|
30fc0af1ac6c7f68bf072607ee0db194f8c8093a
|
[
"MIT"
] | null | null | null |
preprocessing.py
|
Y-greatigr/Covid19_Model
|
30fc0af1ac6c7f68bf072607ee0db194f8c8093a
|
[
"MIT"
] | null | null | null |
preprocessing.py
|
Y-greatigr/Covid19_Model
|
30fc0af1ac6c7f68bf072607ee0db194f8c8093a
|
[
"MIT"
] | null | null | null |
# 데이터를 가져온다.
# 학습 직전의 데이터를 가공하는 역할을 맡는다.
import numpy as np
import os
import options as opt
import datetime
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Normalizer
import joblib
def date_to_number(date):
# 글자로 표현된 날짜를 숫자로 바꾼다.
# ex) 'Dec/1' -> '12/1'
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
for i, month in enumerate(months):
date = date.replace(month, str(i+1))
return date
def load_one(path):
# path의 txt 파일 데이터를 가져온다.
with open(path, 'r', encoding='utf8') as f:
d1 = f.readlines()[0].split(' ')
d1 = d1[:-1]
d1 = list(map(float, d1))
return d1
def load(directory='Data'):
# directory의 txt 파일 데이터들을 가져온다.
data = []
for file in os.listdir(directory):
path = directory + '/' + file
with open(path, 'r', encoding='utf8') as f:
d1, d2 = [i.split(' ') for i in f.read().split('\n')]
d1, d2 = d1[:-1], d2[:-1]
d1 = list(map(float, d1))
d2 = list(map(float, d2))
data += d1
data += d2
return np.array(data)
def labeling(data, sight=25, y_n=1):
# (sight개의 x, y_n개의 y) 쌍을 만든다.
# ex) f([1,2,3,4,5,6,7,8,9], sight=3, y_n=1) -> [ [[1,2,3],[4]], [[2,3,4],[5]], ..]
x, y = [], []
for i in range(len(data) - sight - y_n + 1):
x.append(data[i:sight+i])
y.append(data[sight+i:sight+i+y_n])
return np.array(x), np.array(y)
if __name__ == "__main__":
# 데이터 가져오기
data = load()
# x, y 제작
x, y = labeling(data, sight=opt.SIGHT, y_n=opt.Y_N)
x = x.reshape((-1, opt.SIGHT, 1))
# train, test 데이터로 분할
xtrain, xtest, ytrain ,ytest = train_test_split(x, y, test_size=opt.TEST_SIZE)
joblib.dump([xtrain, xtest, ytrain, ytest], 'traintest.joblib') # 저장
print(xtrain.shape)
print(ytrain.shape)
print(xtest.shape)
print(ytest.shape)
| 28.642857
| 98
| 0.551122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 616
| 0.281664
|
e3cac54ed59276bd1cf21b47cfa19280c29a0b7c
| 20,168
|
py
|
Python
|
colorpy/colorpy-0.1.0/illuminants.py
|
gmweir/QuasiOptics
|
0974178984f845597c5209217613c26edf931ed0
|
[
"MIT"
] | 1
|
2020-11-06T18:16:00.000Z
|
2020-11-06T18:16:00.000Z
|
colorpy/colorpy-0.1.1/illuminants.py
|
gmweir/QuasiOptics
|
0974178984f845597c5209217613c26edf931ed0
|
[
"MIT"
] | null | null | null |
colorpy/colorpy-0.1.1/illuminants.py
|
gmweir/QuasiOptics
|
0974178984f845597c5209217613c26edf931ed0
|
[
"MIT"
] | null | null | null |
'''
illuminants.py - Definitions of some standard illuminants.
Description:
Illuminants are spectrums, normalized so that Y = 1.0.
Spectrums are 2D numpy arrays, with one row for each wavelength,
with the first column holding the wavelength in nm, and the
second column the intensity.
The spectrums have a wavelength increment of 1 nm.
Functions:
init () -
Initialize CIE Illuminant D65. This runs on module startup.
get_illuminant_D65 () -
Get CIE Illuminant D65, as a spectrum, normalized to Y = 1.0.
CIE standard illuminant D65 represents a phase of natural daylight
with a correlated color temperature of approximately 6504 K. (Wyszecki, p. 144)
In the interest of standardization the CIE recommends that D65 be used
whenever possible. Otherwise, D55 or D75 are recommended. (Wyszecki, p. 145)
(ColorPy does not currently provide D55 or D75, however.)
get_illuminant_A () -
Get CIE Illuminant A, as a spectrum, normalized to Y = 1.0.
This is actually a blackbody illuminant for T = 2856 K. (Wyszecki, p. 143)
get_blackbody_illuminant (T_K) -
Get the spectrum of a blackbody at the given temperature, normalized to Y = 1.0.
get_constant_illuminant () -
Get an illuminant, with spectrum constant over wavelength, normalized to Y = 1.0.
scale_illuminant (illuminant, scaling) -
Scale the illuminant intensity by the specfied factor.
References:
Wyszecki and Stiles, Color Science: Concepts and Methods, Quantitative Data and Formulae,
2nd edition, John Wiley, 1982. Wiley Classics Library Edition 2000. ISBN 0-471-39918-3.
CVRL Color and Vision Database - http://cvrl.ioo.ucl.ac.uk/index.htm - (accessed 17 Sep 2008)
Color and Vision Research Laboratories.
Provides a set of data sets related to color vision.
ColorPy uses the tables from this site for the 1931 CIE XYZ matching functions,
and for Illuminant D65, both at 1 nm wavelength increments.
CIE Standards - http://cvrl.ioo.ucl.ac.uk/cie.htm - (accessed 17 Sep 2008)
CIE standards as maintained by CVRL.
The 1931 CIE XYZ and D65 tables that ColorPy uses were obtained from the following files, linked here:
http://cvrl.ioo.ucl.ac.uk/database/data/cmfs/ciexyz31_1.txt
http://cvrl.ioo.ucl.ac.uk/database/data/cie/Illuminantd65.txt
CIE International Commission on Illumination - http://www.cie.co.at/ - (accessed 17 Sep 2008)
Official website of the CIE.
There are tables of the standard functions (matching functions, illuminants) here:
http://www.cie.co.at/main/freepubs.html
http://www.cie.co.at/publ/abst/datatables15_2004/x2.txt
http://www.cie.co.at/publ/abst/datatables15_2004/y2.txt
http://www.cie.co.at/publ/abst/datatables15_2004/z2.txt
http://www.cie.co.at/publ/abst/datatables15_2004/sid65.txt
ColorPy does not use these specific files.
License:
Copyright (C) 2008 Mark Kness
Author - Mark Kness - mkness@alumni.utexas.net
This file is part of ColorPy.
ColorPy is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ColorPy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with ColorPy. If not, see <http://www.gnu.org/licenses/>.
'''
import math, numpy
import colormodels
import ciexyz
import blackbody
import plots
# table of CIE Illuminant D65 spectrum.
# data from: http://cvrl.ioo.ucl.ac.uk/database/data/cie/Illuminantd65.txt
# massaged into this format.
_Illuminant_D65_table = [
[ 300, 0.034100 ],
[ 301, 0.360140 ],
[ 302, 0.686180 ],
[ 303, 1.012220 ],
[ 304, 1.338260 ],
[ 305, 1.664300 ],
[ 306, 1.990340 ],
[ 307, 2.316380 ],
[ 308, 2.642420 ],
[ 309, 2.968460 ],
[ 310, 3.294500 ],
[ 311, 4.988650 ],
[ 312, 6.682800 ],
[ 313, 8.376950 ],
[ 314, 10.071100 ],
[ 315, 11.765200 ],
[ 316, 13.459400 ],
[ 317, 15.153500 ],
[ 318, 16.847700 ],
[ 319, 18.541800 ],
[ 320, 20.236000 ],
[ 321, 21.917700 ],
[ 322, 23.599500 ],
[ 323, 25.281200 ],
[ 324, 26.963000 ],
[ 325, 28.644700 ],
[ 326, 30.326500 ],
[ 327, 32.008200 ],
[ 328, 33.690000 ],
[ 329, 35.371700 ],
[ 330, 37.053500 ],
[ 331, 37.343000 ],
[ 332, 37.632600 ],
[ 333, 37.922100 ],
[ 334, 38.211600 ],
[ 335, 38.501100 ],
[ 336, 38.790700 ],
[ 337, 39.080200 ],
[ 338, 39.369700 ],
[ 339, 39.659300 ],
[ 340, 39.948800 ],
[ 341, 40.445100 ],
[ 342, 40.941400 ],
[ 343, 41.437700 ],
[ 344, 41.934000 ],
[ 345, 42.430200 ],
[ 346, 42.926500 ],
[ 347, 43.422800 ],
[ 348, 43.919100 ],
[ 349, 44.415400 ],
[ 350, 44.911700 ],
[ 351, 45.084400 ],
[ 352, 45.257000 ],
[ 353, 45.429700 ],
[ 354, 45.602300 ],
[ 355, 45.775000 ],
[ 356, 45.947700 ],
[ 357, 46.120300 ],
[ 358, 46.293000 ],
[ 359, 46.465600 ],
[ 360, 46.638300 ],
[ 361, 47.183400 ],
[ 362, 47.728500 ],
[ 363, 48.273500 ],
[ 364, 48.818600 ],
[ 365, 49.363700 ],
[ 366, 49.908800 ],
[ 367, 50.453900 ],
[ 368, 50.998900 ],
[ 369, 51.544000 ],
[ 370, 52.089100 ],
[ 371, 51.877700 ],
[ 372, 51.666400 ],
[ 373, 51.455000 ],
[ 374, 51.243700 ],
[ 375, 51.032300 ],
[ 376, 50.820900 ],
[ 377, 50.609600 ],
[ 378, 50.398200 ],
[ 379, 50.186900 ],
[ 380, 49.975500 ],
[ 381, 50.442800 ],
[ 382, 50.910000 ],
[ 383, 51.377300 ],
[ 384, 51.844600 ],
[ 385, 52.311800 ],
[ 386, 52.779100 ],
[ 387, 53.246400 ],
[ 388, 53.713700 ],
[ 389, 54.180900 ],
[ 390, 54.648200 ],
[ 391, 57.458900 ],
[ 392, 60.269500 ],
[ 393, 63.080200 ],
[ 394, 65.890900 ],
[ 395, 68.701500 ],
[ 396, 71.512200 ],
[ 397, 74.322900 ],
[ 398, 77.133600 ],
[ 399, 79.944200 ],
[ 400, 82.754900 ],
[ 401, 83.628000 ],
[ 402, 84.501100 ],
[ 403, 85.374200 ],
[ 404, 86.247300 ],
[ 405, 87.120400 ],
[ 406, 87.993600 ],
[ 407, 88.866700 ],
[ 408, 89.739800 ],
[ 409, 90.612900 ],
[ 410, 91.486000 ],
[ 411, 91.680600 ],
[ 412, 91.875200 ],
[ 413, 92.069700 ],
[ 414, 92.264300 ],
[ 415, 92.458900 ],
[ 416, 92.653500 ],
[ 417, 92.848100 ],
[ 418, 93.042600 ],
[ 419, 93.237200 ],
[ 420, 93.431800 ],
[ 421, 92.756800 ],
[ 422, 92.081900 ],
[ 423, 91.406900 ],
[ 424, 90.732000 ],
[ 425, 90.057000 ],
[ 426, 89.382100 ],
[ 427, 88.707100 ],
[ 428, 88.032200 ],
[ 429, 87.357200 ],
[ 430, 86.682300 ],
[ 431, 88.500600 ],
[ 432, 90.318800 ],
[ 433, 92.137100 ],
[ 434, 93.955400 ],
[ 435, 95.773600 ],
[ 436, 97.591900 ],
[ 437, 99.410200 ],
[ 438, 101.228000 ],
[ 439, 103.047000 ],
[ 440, 104.865000 ],
[ 441, 106.079000 ],
[ 442, 107.294000 ],
[ 443, 108.508000 ],
[ 444, 109.722000 ],
[ 445, 110.936000 ],
[ 446, 112.151000 ],
[ 447, 113.365000 ],
[ 448, 114.579000 ],
[ 449, 115.794000 ],
[ 450, 117.008000 ],
[ 451, 117.088000 ],
[ 452, 117.169000 ],
[ 453, 117.249000 ],
[ 454, 117.330000 ],
[ 455, 117.410000 ],
[ 456, 117.490000 ],
[ 457, 117.571000 ],
[ 458, 117.651000 ],
[ 459, 117.732000 ],
[ 460, 117.812000 ],
[ 461, 117.517000 ],
[ 462, 117.222000 ],
[ 463, 116.927000 ],
[ 464, 116.632000 ],
[ 465, 116.336000 ],
[ 466, 116.041000 ],
[ 467, 115.746000 ],
[ 468, 115.451000 ],
[ 469, 115.156000 ],
[ 470, 114.861000 ],
[ 471, 114.967000 ],
[ 472, 115.073000 ],
[ 473, 115.180000 ],
[ 474, 115.286000 ],
[ 475, 115.392000 ],
[ 476, 115.498000 ],
[ 477, 115.604000 ],
[ 478, 115.711000 ],
[ 479, 115.817000 ],
[ 480, 115.923000 ],
[ 481, 115.212000 ],
[ 482, 114.501000 ],
[ 483, 113.789000 ],
[ 484, 113.078000 ],
[ 485, 112.367000 ],
[ 486, 111.656000 ],
[ 487, 110.945000 ],
[ 488, 110.233000 ],
[ 489, 109.522000 ],
[ 490, 108.811000 ],
[ 491, 108.865000 ],
[ 492, 108.920000 ],
[ 493, 108.974000 ],
[ 494, 109.028000 ],
[ 495, 109.082000 ],
[ 496, 109.137000 ],
[ 497, 109.191000 ],
[ 498, 109.245000 ],
[ 499, 109.300000 ],
[ 500, 109.354000 ],
[ 501, 109.199000 ],
[ 502, 109.044000 ],
[ 503, 108.888000 ],
[ 504, 108.733000 ],
[ 505, 108.578000 ],
[ 506, 108.423000 ],
[ 507, 108.268000 ],
[ 508, 108.112000 ],
[ 509, 107.957000 ],
[ 510, 107.802000 ],
[ 511, 107.501000 ],
[ 512, 107.200000 ],
[ 513, 106.898000 ],
[ 514, 106.597000 ],
[ 515, 106.296000 ],
[ 516, 105.995000 ],
[ 517, 105.694000 ],
[ 518, 105.392000 ],
[ 519, 105.091000 ],
[ 520, 104.790000 ],
[ 521, 105.080000 ],
[ 522, 105.370000 ],
[ 523, 105.660000 ],
[ 524, 105.950000 ],
[ 525, 106.239000 ],
[ 526, 106.529000 ],
[ 527, 106.819000 ],
[ 528, 107.109000 ],
[ 529, 107.399000 ],
[ 530, 107.689000 ],
[ 531, 107.361000 ],
[ 532, 107.032000 ],
[ 533, 106.704000 ],
[ 534, 106.375000 ],
[ 535, 106.047000 ],
[ 536, 105.719000 ],
[ 537, 105.390000 ],
[ 538, 105.062000 ],
[ 539, 104.733000 ],
[ 540, 104.405000 ],
[ 541, 104.369000 ],
[ 542, 104.333000 ],
[ 543, 104.297000 ],
[ 544, 104.261000 ],
[ 545, 104.225000 ],
[ 546, 104.190000 ],
[ 547, 104.154000 ],
[ 548, 104.118000 ],
[ 549, 104.082000 ],
[ 550, 104.046000 ],
[ 551, 103.641000 ],
[ 552, 103.237000 ],
[ 553, 102.832000 ],
[ 554, 102.428000 ],
[ 555, 102.023000 ],
[ 556, 101.618000 ],
[ 557, 101.214000 ],
[ 558, 100.809000 ],
[ 559, 100.405000 ],
[ 560, 100.000000 ],
[ 561, 99.633400 ],
[ 562, 99.266800 ],
[ 563, 98.900300 ],
[ 564, 98.533700 ],
[ 565, 98.167100 ],
[ 566, 97.800500 ],
[ 567, 97.433900 ],
[ 568, 97.067400 ],
[ 569, 96.700800 ],
[ 570, 96.334200 ],
[ 571, 96.279600 ],
[ 572, 96.225000 ],
[ 573, 96.170300 ],
[ 574, 96.115700 ],
[ 575, 96.061100 ],
[ 576, 96.006500 ],
[ 577, 95.951900 ],
[ 578, 95.897200 ],
[ 579, 95.842600 ],
[ 580, 95.788000 ],
[ 581, 95.077800 ],
[ 582, 94.367500 ],
[ 583, 93.657300 ],
[ 584, 92.947000 ],
[ 585, 92.236800 ],
[ 586, 91.526600 ],
[ 587, 90.816300 ],
[ 588, 90.106100 ],
[ 589, 89.395800 ],
[ 590, 88.685600 ],
[ 591, 88.817700 ],
[ 592, 88.949700 ],
[ 593, 89.081800 ],
[ 594, 89.213800 ],
[ 595, 89.345900 ],
[ 596, 89.478000 ],
[ 597, 89.610000 ],
[ 598, 89.742100 ],
[ 599, 89.874100 ],
[ 600, 90.006200 ],
[ 601, 89.965500 ],
[ 602, 89.924800 ],
[ 603, 89.884100 ],
[ 604, 89.843400 ],
[ 605, 89.802600 ],
[ 606, 89.761900 ],
[ 607, 89.721200 ],
[ 608, 89.680500 ],
[ 609, 89.639800 ],
[ 610, 89.599100 ],
[ 611, 89.409100 ],
[ 612, 89.219000 ],
[ 613, 89.029000 ],
[ 614, 88.838900 ],
[ 615, 88.648900 ],
[ 616, 88.458900 ],
[ 617, 88.268800 ],
[ 618, 88.078800 ],
[ 619, 87.888700 ],
[ 620, 87.698700 ],
[ 621, 87.257700 ],
[ 622, 86.816700 ],
[ 623, 86.375700 ],
[ 624, 85.934700 ],
[ 625, 85.493600 ],
[ 626, 85.052600 ],
[ 627, 84.611600 ],
[ 628, 84.170600 ],
[ 629, 83.729600 ],
[ 630, 83.288600 ],
[ 631, 83.329700 ],
[ 632, 83.370700 ],
[ 633, 83.411800 ],
[ 634, 83.452800 ],
[ 635, 83.493900 ],
[ 636, 83.535000 ],
[ 637, 83.576000 ],
[ 638, 83.617100 ],
[ 639, 83.658100 ],
[ 640, 83.699200 ],
[ 641, 83.332000 ],
[ 642, 82.964700 ],
[ 643, 82.597500 ],
[ 644, 82.230200 ],
[ 645, 81.863000 ],
[ 646, 81.495800 ],
[ 647, 81.128500 ],
[ 648, 80.761300 ],
[ 649, 80.394000 ],
[ 650, 80.026800 ],
[ 651, 80.045600 ],
[ 652, 80.064400 ],
[ 653, 80.083100 ],
[ 654, 80.101900 ],
[ 655, 80.120700 ],
[ 656, 80.139500 ],
[ 657, 80.158300 ],
[ 658, 80.177000 ],
[ 659, 80.195800 ],
[ 660, 80.214600 ],
[ 661, 80.420900 ],
[ 662, 80.627200 ],
[ 663, 80.833600 ],
[ 664, 81.039900 ],
[ 665, 81.246200 ],
[ 666, 81.452500 ],
[ 667, 81.658800 ],
[ 668, 81.865200 ],
[ 669, 82.071500 ],
[ 670, 82.277800 ],
[ 671, 81.878400 ],
[ 672, 81.479100 ],
[ 673, 81.079700 ],
[ 674, 80.680400 ],
[ 675, 80.281000 ],
[ 676, 79.881600 ],
[ 677, 79.482300 ],
[ 678, 79.082900 ],
[ 679, 78.683600 ],
[ 680, 78.284200 ],
[ 681, 77.427900 ],
[ 682, 76.571600 ],
[ 683, 75.715300 ],
[ 684, 74.859000 ],
[ 685, 74.002700 ],
[ 686, 73.146500 ],
[ 687, 72.290200 ],
[ 688, 71.433900 ],
[ 689, 70.577600 ],
[ 690, 69.721300 ],
[ 691, 69.910100 ],
[ 692, 70.098900 ],
[ 693, 70.287600 ],
[ 694, 70.476400 ],
[ 695, 70.665200 ],
[ 696, 70.854000 ],
[ 697, 71.042800 ],
[ 698, 71.231500 ],
[ 699, 71.420300 ],
[ 700, 71.609100 ],
[ 701, 71.883100 ],
[ 702, 72.157100 ],
[ 703, 72.431100 ],
[ 704, 72.705100 ],
[ 705, 72.979000 ],
[ 706, 73.253000 ],
[ 707, 73.527000 ],
[ 708, 73.801000 ],
[ 709, 74.075000 ],
[ 710, 74.349000 ],
[ 711, 73.074500 ],
[ 712, 71.800000 ],
[ 713, 70.525500 ],
[ 714, 69.251000 ],
[ 715, 67.976500 ],
[ 716, 66.702000 ],
[ 717, 65.427500 ],
[ 718, 64.153000 ],
[ 719, 62.878500 ],
[ 720, 61.604000 ],
[ 721, 62.432200 ],
[ 722, 63.260300 ],
[ 723, 64.088500 ],
[ 724, 64.916600 ],
[ 725, 65.744800 ],
[ 726, 66.573000 ],
[ 727, 67.401100 ],
[ 728, 68.229300 ],
[ 729, 69.057400 ],
[ 730, 69.885600 ],
[ 731, 70.405700 ],
[ 732, 70.925900 ],
[ 733, 71.446000 ],
[ 734, 71.966200 ],
[ 735, 72.486300 ],
[ 736, 73.006400 ],
[ 737, 73.526600 ],
[ 738, 74.046700 ],
[ 739, 74.566900 ],
[ 740, 75.087000 ],
[ 741, 73.937600 ],
[ 742, 72.788100 ],
[ 743, 71.638700 ],
[ 744, 70.489300 ],
[ 745, 69.339800 ],
[ 746, 68.190400 ],
[ 747, 67.041000 ],
[ 748, 65.891600 ],
[ 749, 64.742100 ],
[ 750, 63.592700 ],
[ 751, 61.875200 ],
[ 752, 60.157800 ],
[ 753, 58.440300 ],
[ 754, 56.722900 ],
[ 755, 55.005400 ],
[ 756, 53.288000 ],
[ 757, 51.570500 ],
[ 758, 49.853100 ],
[ 759, 48.135600 ],
[ 760, 46.418200 ],
[ 761, 48.456900 ],
[ 762, 50.495600 ],
[ 763, 52.534400 ],
[ 764, 54.573100 ],
[ 765, 56.611800 ],
[ 766, 58.650500 ],
[ 767, 60.689200 ],
[ 768, 62.728000 ],
[ 769, 64.766700 ],
[ 770, 66.805400 ],
[ 771, 66.463100 ],
[ 772, 66.120900 ],
[ 773, 65.778600 ],
[ 774, 65.436400 ],
[ 775, 65.094100 ],
[ 776, 64.751800 ],
[ 777, 64.409600 ],
[ 778, 64.067300 ],
[ 779, 63.725100 ],
[ 780, 63.382800 ],
[ 781, 63.474900 ],
[ 782, 63.567000 ],
[ 783, 63.659200 ],
[ 784, 63.751300 ],
[ 785, 63.843400 ],
[ 786, 63.935500 ],
[ 787, 64.027600 ],
[ 788, 64.119800 ],
[ 789, 64.211900 ],
[ 790, 64.304000 ],
[ 791, 63.818800 ],
[ 792, 63.333600 ],
[ 793, 62.848400 ],
[ 794, 62.363200 ],
[ 795, 61.877900 ],
[ 796, 61.392700 ],
[ 797, 60.907500 ],
[ 798, 60.422300 ],
[ 799, 59.937100 ],
[ 800, 59.451900 ],
[ 801, 58.702600 ],
[ 802, 57.953300 ],
[ 803, 57.204000 ],
[ 804, 56.454700 ],
[ 805, 55.705400 ],
[ 806, 54.956200 ],
[ 807, 54.206900 ],
[ 808, 53.457600 ],
[ 809, 52.708300 ],
[ 810, 51.959000 ],
[ 811, 52.507200 ],
[ 812, 53.055300 ],
[ 813, 53.603500 ],
[ 814, 54.151600 ],
[ 815, 54.699800 ],
[ 816, 55.248000 ],
[ 817, 55.796100 ],
[ 818, 56.344300 ],
[ 819, 56.892400 ],
[ 820, 57.440600 ],
[ 821, 57.727800 ],
[ 822, 58.015000 ],
[ 823, 58.302200 ],
[ 824, 58.589400 ],
[ 825, 58.876500 ],
[ 826, 59.163700 ],
[ 827, 59.450900 ],
[ 828, 59.738100 ],
[ 829, 60.025300 ],
[ 830, 60.312500 ]
]
_Illuminant_D65 = None
def init ():
'''Initialize CIE Illuminant D65. This runs on module startup.'''
table_size = len (_Illuminant_D65_table)
first_wl = _Illuminant_D65_table [0][0]
last_wl = _Illuminant_D65_table [-1][0]
# for now, only consider the part in the normal visible range (360-830 nm)
first_index = ciexyz.start_wl_nm - first_wl
table_first = _Illuminant_D65_table [first_index][0]
assert (table_first == 360), 'Mismatch finding 360 nm entry in D65 table'
global _Illuminant_D65
_Illuminant_D65 = ciexyz.empty_spectrum()
(num_wl, num_cols) = _Illuminant_D65.shape
for i in xrange (0, num_wl):
_Illuminant_D65 [i][1] = _Illuminant_D65_table [first_index + i][1]
# normalization - illuminant is scaled so that Y = 1.0
xyz = ciexyz.xyz_from_spectrum (_Illuminant_D65)
scaling = 1.0 / xyz [1]
_Illuminant_D65 [:,1] *= scaling
#
# Get any of the available illuminants - D65, A, any blackbody, or a constant spectrum.
# ColorPy does not currently provide D55 or D75.
#
def get_illuminant_D65 ():
'''Get CIE Illuminant D65, as a spectrum, normalized to Y = 1.0.
CIE standard illuminant D65 represents a phase of natural daylight
with a correlated color temperature of approximately 6504 K. (Wyszecki, p. 144)
In the interest of standardization the CIE recommends that D65 be used
whenever possible. Otherwise, D55 or D75 are recommended. (Wyszecki, p. 145)
(ColorPy does not currently provide D55 or D75, however.)'''
illuminant = _Illuminant_D65.copy()
return illuminant
def get_illuminant_A ():
'''Get CIE Illuminant A, as a spectrum, normalized to Y = 1.0.
This is actually a blackbody illuminant for T = 2856 K. (Wyszecki, p. 143)'''
illuminant = get_blackbody_illuminant (2856.0)
return illuminant
def get_blackbody_illuminant (T_K):
'''Get the spectrum of a blackbody at the given temperature, normalized to Y = 1.0.'''
illuminant = blackbody.blackbody_spectrum (T_K)
xyz = ciexyz.xyz_from_spectrum (illuminant)
if xyz [1] != 0.0:
scaling = 1.0 / xyz [1]
illuminant [:,1] *= scaling
return illuminant
def get_constant_illuminant ():
'''Get an illuminant, with spectrum constant over wavelength, normalized to Y = 1.0.'''
illuminant = ciexyz.empty_spectrum()
(num_wl, num_cols) = illuminant.shape
for i in xrange (0, num_wl):
illuminant [i][1] = 1.0
xyz = ciexyz.xyz_from_spectrum (illuminant)
if xyz [1] != 0.0:
scaling = 1.0 / xyz [1]
illuminant [:,1] *= scaling
return illuminant
# Scale an illuminant by an arbitrary factor
def scale_illuminant (illuminant, scaling):
'''Scale the illuminant intensity by the specfied factor.'''
illuminant [:,1] *= scaling
return illuminant
# Initialize at module startup
init()
# Figures - Plot some of the illuminants
def figures ():
'''Plot spectra for several illuminants.'''
# D65
plots.spectrum_plot (
get_illuminant_D65(), 'CIE Illuminant D65', 'Illuminant-D65')
# A
plots.spectrum_plot (
get_illuminant_A(), 'CIE Illuminant A', 'Illuminant-A')
# Constant
plots.spectrum_plot (
get_constant_illuminant(), 'Constant Illuminant', 'Illuminant-Const')
# Blackbody (5778)
plots.spectrum_plot (
get_blackbody_illuminant (5778.0), '5778 K Illuminant', 'Illuminant-5778')
| 27.741403
| 106
| 0.555881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,306
| 0.26309
|
e3cb6664659c1efec8fe41651c43927d133e5bf2
| 10,046
|
py
|
Python
|
tests/unit/test_functions.py
|
noahsa/scikit-hts
|
db067f416172d18f7d0127150c45419883260d54
|
[
"MIT"
] | null | null | null |
tests/unit/test_functions.py
|
noahsa/scikit-hts
|
db067f416172d18f7d0127150c45419883260d54
|
[
"MIT"
] | null | null | null |
tests/unit/test_functions.py
|
noahsa/scikit-hts
|
db067f416172d18f7d0127150c45419883260d54
|
[
"MIT"
] | null | null | null |
import numpy
import pandas
import hts.hierarchy
from hts.functions import (
_create_bl_str_col,
get_agg_series,
get_hierarchichal_df,
to_sum_mat,
)
def test_sum_mat_uv(uv_tree):
mat, sum_mat_labels = to_sum_mat(uv_tree)
assert isinstance(mat, numpy.ndarray)
shp = mat.shape
assert shp[0] == uv_tree.num_nodes() + 1
assert shp[1] == uv_tree.leaf_sum()
def test_sum_mat_mv(mv_tree):
mat, sum_mat_labels = to_sum_mat(mv_tree)
assert isinstance(mat, numpy.ndarray)
shp = mat.shape
assert shp[0] == mv_tree.num_nodes() + 1
assert shp[1] == mv_tree.leaf_sum()
def test_sum_mat_hierarchical():
hierarchy = {"total": ["A", "B"], "A": ["A_X", "A_Y", "A_Z"], "B": ["B_X", "B_Y"]}
hier_df = pandas.DataFrame(
data={
"total": [],
"A": [],
"B": [],
"A_X": [],
"A_Y": [],
"A_Z": [],
"B_X": [],
"B_Y": [],
}
)
tree = hts.hierarchy.HierarchyTree.from_nodes(hierarchy, hier_df)
sum_mat, sum_mat_labels = to_sum_mat(tree)
expected_sum_mat = numpy.array(
[
[1, 1, 1, 1, 1], # total
[0, 0, 0, 1, 1], # B
[1, 1, 1, 0, 0], # A
[1, 0, 0, 0, 0], # A_X
[0, 1, 0, 0, 0], # A_Y
[0, 0, 1, 0, 0], # A_Z
[0, 0, 0, 1, 0], # B_X
[0, 0, 0, 0, 1],
]
) # B_Y
numpy.testing.assert_array_equal(sum_mat, expected_sum_mat)
assert sum_mat_labels == ["total", "B", "A", "A_X", "A_Y", "A_Z", "B_X", "B_Y"]
def test_sum_mat_grouped():
hierarchy = {
"total": ["A", "B", "X", "Y"],
"A": ["A_X", "A_Y"],
"B": ["B_X", "B_Y"],
}
grouped_df = pandas.DataFrame(
data={
"total": [],
"A": [],
"B": [],
"X": [],
"Y": [],
"A_X": [],
"A_Y": [],
"B_X": [],
"B_Y": [],
}
)
tree = hts.hierarchy.HierarchyTree.from_nodes(hierarchy, grouped_df)
sum_mat, sum_mat_labels = to_sum_mat(tree)
expected_sum_mat = numpy.array(
[
[1, 1, 1, 1], # total
[0, 1, 0, 1], # Y
[1, 0, 1, 0], # X
[0, 0, 1, 1], # B
[1, 1, 0, 0], # A
[1, 0, 0, 0], # A_X
[0, 1, 0, 0], # A_Y
[0, 0, 1, 0], # B_X
[0, 0, 0, 1], # B_Y
]
)
numpy.testing.assert_array_equal(sum_mat, expected_sum_mat)
assert sum_mat_labels == ["total", "Y", "X", "B", "A", "A_X", "A_Y", "B_X", "B_Y"]
def test_sum_mat_visnights_hier(visnights_hier):
hier_df = pandas.DataFrame(
data={
"total": [],
"VIC": [],
"QLD": [],
"SAU": [],
"WAU": [],
"OTH": [],
"NSW": [],
"NSW_Metro": [],
"NSW_NthCo": [],
"NSW_NthIn": [],
"NSW_SthCo": [],
"NSW_SthIn": [],
"OTH_Metro": [],
"OTH_NoMet": [],
"QLD_Cntrl": [],
"QLD_Metro": [],
"QLD_NthCo": [],
"SAU_Coast": [],
"SAU_Inner": [],
"SAU_Metro": [],
"VIC_EstCo": [],
"VIC_Inner": [],
"VIC_Metro": [],
"VIC_WstCo": [],
"WAU_Coast": [],
"WAU_Inner": [],
"WAU_Metro": [],
}
)
tree = hts.hierarchy.HierarchyTree.from_nodes(visnights_hier, hier_df)
sum_mat, sum_mat_labels = to_sum_mat(tree)
expected_sum_mat = numpy.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], # total
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], # VIC
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], # QLD
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], # SAU
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # WAU
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # OTH
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_Metro
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_NthCo
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_NthIn
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_SthCo
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_SthIn
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # OTH_Metro
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # OTH_NoMet
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # WAU_Coast
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # WAU_Inner
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # WAU_Metro
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # SAU_Coast
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], # SAU_Inner
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], # SAU_Metro
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], # QLD_Cntrl
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], # QLD_Metro
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], # QLD_NthCo
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], # VIC_EstCo
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], # VIC_Inner
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], # VIC_Metro
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], # VIC_WstCo
]
)
numpy.testing.assert_array_equal(sum_mat, expected_sum_mat)
def test_demo_unique_constraint():
# Example https://otexts.com/fpp2/hts.html
# Does not work when you have elements that are named the same, but represent
# different levels in the hierarchy. See expected_sum_mat below for example.
hierarchy = {"total": ["A", "B"], "A": ["AA", "AB", "AC"], "B": ["BA", "BB"]}
hier_df = pandas.DataFrame(
data={
"total": [],
"A": [],
"B": [],
"AA": [],
"AB": [],
"AC": [],
"BA": [],
"BB": [],
}
)
tree = hts.hierarchy.HierarchyTree.from_nodes(hierarchy, hier_df)
sum_mat, sum_mat_labels = to_sum_mat(tree)
expected_sum_mat = numpy.array(
[
[1, 1, 1, 1, 1], # total
[0, 1, 0, 1, 1], # B, Incorrectly finds B in AB
[1, 1, 1, 1, 0], # A, Incorrectly finds A in BA
[1, 0, 0, 0, 0], # AA
[0, 1, 0, 0, 0], # AB
[0, 0, 1, 0, 0], # AC
[0, 0, 0, 1, 0], # BA
[0, 0, 0, 0, 1], # BB
]
)
numpy.testing.assert_array_equal(sum_mat, expected_sum_mat)
def test_1lev():
grouped_df = pandas.DataFrame(
data={"lev1": ["A", "A", "B", "B"], "lev2": ["X", "Y", "X", "Y"],}
)
levels = get_agg_series(grouped_df, [["lev1"]])
expected_levels = ["A", "B"]
assert sorted(levels) == sorted(expected_levels)
levels = get_agg_series(grouped_df, [["lev2"]])
expected_levels = ["X", "Y"]
assert sorted(levels) == sorted(expected_levels)
def test_2lev():
grouped_df = pandas.DataFrame(
data={"lev1": ["A", "A", "B", "B"], "lev2": ["X", "Y", "X", "Y"],}
)
levels = get_agg_series(grouped_df, [["lev1", "lev2"]])
expected_levels = ["A_X", "A_Y", "B_X", "B_Y"]
assert sorted(levels) == sorted(expected_levels)
def test_hierarchichal():
hier_df = pandas.DataFrame(
data={"lev1": ["A", "A", "A", "B", "B"], "lev2": ["X", "Y", "Z", "X", "Y"],}
)
levels = get_agg_series(hier_df, [["lev1"], ["lev1", "lev2"]])
expected_levels = ["A", "B", "A_X", "A_Y", "A_Z", "B_X", "B_Y"]
assert sorted(levels) == sorted(expected_levels)
def test_grouped():
hier_df = pandas.DataFrame(
data={"lev1": ["A", "A", "A", "B", "B"], "lev2": ["X", "Y", "Z", "X", "Y"],}
)
hierarchy = [["lev1"], ["lev2"], ["lev1", "lev2"]]
levels = get_agg_series(hier_df, hierarchy)
expected_levels = ["A", "B", "X", "Y", "Z", "A_X", "A_Y", "A_Z", "B_X", "B_Y"]
assert sorted(levels) == sorted(expected_levels)
def test_grouped_create_df():
hier_df = pandas.DataFrame(
data={
"ds": ["2020-01", "2020-02"] * 5,
"lev1": ["A", "A", "A", "A", "A", "A", "B", "B", "B", "B"],
"lev2": ["X", "X", "Y", "Y", "Z", "Z", "X", "X", "Y", "Y"],
"val": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
}
)
level_names = ["lev1", "lev2"]
hierarchy = [["lev1"], ["lev2"]]
gts_df, sum_mat, sum_mat_labels = get_hierarchichal_df(
hier_df,
level_names=level_names,
hierarchy=hierarchy,
date_colname="ds",
val_colname="val",
)
expected_columns = [
"A_X",
"A_Y",
"A_Z",
"B_X",
"B_Y",
"A",
"B",
"X",
"Y",
"Z",
"total",
]
assert sorted(list(gts_df.columns)) == sorted(expected_columns)
def test_parent_child():
grouped_df = pandas.DataFrame(
data={"lev1": ["A", "A", "B"], "lev2": ["X", "Y", "Z"],}
)
levels = get_agg_series(grouped_df, [["lev1", "lev2"]])
expected_levels = ["A_X", "A_Y", "B_Z"]
assert sorted(levels) == sorted(expected_levels)
def test_create_bl_str_col():
grouped_df = pandas.DataFrame(
data={"lev1": ["A", "A", "B"], "lev2": ["X", "Y", "Z"],}
)
col = _create_bl_str_col(grouped_df, ["lev1", "lev2"])
assert col == ["A_X", "A_Y", "B_Z"]
| 31.993631
| 86
| 0.429723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,806
| 0.179773
|
e3cbbca95424c00d63673acba3c061a2db999558
| 644
|
py
|
Python
|
tests/settings.py
|
team23/django_t10e
|
f25e8ac6507e05968d2dbf1003ec4cb9f35b627e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/settings.py
|
team23/django_t10e
|
f25e8ac6507e05968d2dbf1003ec4cb9f35b627e
|
[
"BSD-3-Clause"
] | 2
|
2016-03-22T15:31:38.000Z
|
2016-04-05T08:59:39.000Z
|
tests/settings.py
|
team23/django_t10e
|
f25e8ac6507e05968d2dbf1003ec4cb9f35b627e
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import warnings
warnings.simplefilter('always')
test_dir = os.path.dirname(os.path.abspath(__file__))
DATABASES = {
'default': {
'NAME': os.path.join(test_dir, 'db.sqlite'),
'ENGINE': 'django.db.backends.sqlite3',
},
}
USE_I18N = True
USE_L10N = True
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django_t10e',
'tests',
]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
MIDDLEWARE_CLASSES = ()
TEMPLATE_DIRS = (
os.path.join(test_dir, 'templates'),
)
STATIC_URL = '/static/'
SECRET_KEY = '0'
SITE_ID = 1
| 16.512821
| 62
| 0.669255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 228
| 0.354037
|
e3cc45c059a23522906c2bbff40ce8bfec753ce5
| 3,101
|
py
|
Python
|
medium/380-Insert Delete GetRandom O(1).py
|
Davidxswang/leetcode
|
d554b7f5228f14c646f726ddb91014a612673e06
|
[
"Apache-2.0"
] | 2
|
2020-05-08T02:17:17.000Z
|
2020-05-17T04:55:56.000Z
|
medium/380-Insert Delete GetRandom O(1).py
|
Davidxswang/leetcode
|
d554b7f5228f14c646f726ddb91014a612673e06
|
[
"Apache-2.0"
] | null | null | null |
medium/380-Insert Delete GetRandom O(1).py
|
Davidxswang/leetcode
|
d554b7f5228f14c646f726ddb91014a612673e06
|
[
"Apache-2.0"
] | null | null | null |
"""
https://leetcode.com/problems/insert-delete-getrandom-o1/
Implement the RandomizedSet class:
bool insert(int val) Inserts an item val into the set if not present. Returns true if the item was not present, false otherwise.
bool remove(int val) Removes an item val from the set if present. Returns true if the item was present, false otherwise.
int getRandom() Returns a random element from the current set of elements (it's guaranteed that at least one element exists when this method is called). Each element must have the same probability of being returned.
Follow up: Could you implement the functions of the class with each function works in average O(1) time?
Example 1:
Input
["RandomizedSet", "insert", "remove", "insert", "getRandom", "remove", "insert", "getRandom"]
[[], [1], [2], [2], [], [1], [2], []]
Output
[null, true, false, true, 2, true, false, 2]
Explanation
RandomizedSet randomizedSet = new RandomizedSet();
randomizedSet.insert(1); // Inserts 1 to the set. Returns true as 1 was inserted successfully.
randomizedSet.remove(2); // Returns false as 2 does not exist in the set.
randomizedSet.insert(2); // Inserts 2 to the set, returns true. Set now contains [1,2].
randomizedSet.getRandom(); // getRandom() should return either 1 or 2 randomly.
randomizedSet.remove(1); // Removes 1 from the set, returns true. Set now contains [2].
randomizedSet.insert(2); // 2 was already in the set, so return false.
randomizedSet.getRandom(); // Since 2 is the only number in the set, getRandom() will always return 2.
Constraints:
-231 <= val <= 231 - 1
At most 105 calls will be made to insert, remove, and getRandom.
There will be at least one element in the data structure when getRandom is called.
"""
# time complexity: O(1), space complexity: O(n)
class RandomizedSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.nums = []
self.index = dict()
def insert(self, val: int) -> bool:
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val in self.index:
return False
self.nums.append(val)
self.index[val] = len(self.nums) - 1
return True
def remove(self, val: int) -> bool:
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val in self.index:
val_index = self.index[val]
last_num = self.nums[-1]
self.nums[val_index] = last_num
self.index[last_num] = val_index
self.nums.pop()
self.index.pop(val)
return True
return False
def getRandom(self) -> int:
"""
Get a random element from the set.
"""
import random
return self.nums[random.randint(0, len(self.nums)-1)]
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
| 35.643678
| 215
| 0.660755
| 1,141
| 0.367946
| 0
| 0
| 0
| 0
| 0
| 0
| 2,299
| 0.741374
|
e3cd17e1ce16cc51bbf2c4408a071cf80ad1dcea
| 851
|
py
|
Python
|
src/main/generic_cpu/test3/generic_cpu.py
|
cicerone/kosim
|
a9f718a19019c11fd6e6c6fc0164d4d214bbb5e2
|
[
"BSL-1.0"
] | 2
|
2019-11-15T19:15:36.000Z
|
2022-03-14T12:53:18.000Z
|
src/main/generic_cpu/test3/generic_cpu.py
|
cicerone/kosim
|
a9f718a19019c11fd6e6c6fc0164d4d214bbb5e2
|
[
"BSL-1.0"
] | null | null | null |
src/main/generic_cpu/test3/generic_cpu.py
|
cicerone/kosim
|
a9f718a19019c11fd6e6c6fc0164d4d214bbb5e2
|
[
"BSL-1.0"
] | null | null | null |
#!/usr/bin/env python
#==============================================================================================
# Copyright (c) 2009 Kotys LLC. Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# Author: Cicerone Mihalache
# Support: kosim@kotys.biz
#==============================================================================================
import sys
import libkosim_generic_cpu_test3 as kosim_generic_cpu
#print len(sys.argv)
#for arg in sys.argv:
# print "arg(%s)\n" % (arg)
opt_builder = kosim_generic_cpu.OptionsBuilder()
for arg in sys.argv:
opt_builder.SetArgument(arg)
opt_builder.BuildArgv()
opt_builder.InitProgramOptions()
kosim_generic_cpu.run_sim()
print "--- Test DONE ---"
| 31.518519
| 95
| 0.551116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 572
| 0.67215
|
e3cfcef261416f1b7213e8dce2b540fc137ab1f5
| 7,491
|
py
|
Python
|
smartools/patches/sheets.py
|
davocarli/smartools
|
57e6233efe8da6b34557f99e8d7c24eef77cfd9d
|
[
"MIT"
] | 2
|
2021-01-01T17:34:02.000Z
|
2021-01-07T13:23:00.000Z
|
smartools/patches/sheets.py
|
davocarli/smartools
|
57e6233efe8da6b34557f99e8d7c24eef77cfd9d
|
[
"MIT"
] | null | null | null |
smartools/patches/sheets.py
|
davocarli/smartools
|
57e6233efe8da6b34557f99e8d7c24eef77cfd9d
|
[
"MIT"
] | null | null | null |
import smartsheet
# from smartsheet.smartsheet import fresh_operation
from .__smartools import SmartoolsObject, access_levels, RequirementError
from .typed_list import SmartoolsTypedList
smart = smartsheet.Smartsheet("INIT")
smart.Sheets
class SmartoolsSheets(smartsheet.sheets.Sheets):
def smartools(self):
return 'smartools methods are available!'
# Gets the sheet and sets index references for columns, rows, and cells
def get_sheet(self, *args, **kwargs):
sheet = super().get_sheet(*args, **kwargs)
if 'exclude' in kwargs and 'dicts' in kwargs['exclude']:
return sheet
try:
coldict = {}
primary_index = None
for column in sheet.columns:
coldict[column.title] = column.index
coldict[column.id] = column.index
if column.primary:
coldict[''] = column.index
primary_index = column.index
sheet.columns.index_reference = coldict
rowdict = {}
for i in range(len(sheet.rows)):
sheet.rows[i].cells.index_reference = coldict
primary_value = str(sheet.rows[i].cells[primary_index].value or '')
if primary_value not in rowdict:
rowdict[primary_value] = i
sheet.rows.index_reference = rowdict
sheet.primary_index = primary_index
except:
pass
return sheet
# Adds rows to a sheet. Allows you to pass a list of more than 500 rows, and automatically handles timeout errors using exponentially smaller requests
def bulk_add_rows(self,
sheet_id, # The ID of the sheet the rows should be added to
rows, # The list of rows that should be added to the sheet
n=500, # The number of rows per request to begin with. Will usually be 500, but if working with a large sheet where timeouts are expected you can start smaller
retries=5, # The number of consecutive errors adding rows before the operation is cancelled
sleep=60, # The amount of time to sleep in case of rate limiting error
**kwargs):
result = {
'responses': [],
'rows': [],
'data': [],
'status': '',
'error_message': None
}
current_retries = retries
if n > 500:
n = 500
if not isinstance(rows, list):
rows = [rows]
while len(rows) > 0:
response = self.add_rows(sheet_id, rows[:n], **kwargs)
if hasattr(response.result, 'error_code'):
current_retries -= 1
if response.result.error_code == 4002:
n = n//2
elif response.result.error_code in [4003, 4004]:
time.sleep(sleep)
else:
if current_retries <= 0:
result['responses'].append(response)
result['status'] = 'ERROR'
result['error_message'] = 'See last response for detailed error.'
result['last_response'] = response
return SmartoolsObject(result)
else:
result['data'].extend(response.data)
rows = rows[n:]
current_retries = retries
result['responses'].append(response)
result['rows'].extend(response.result)
result['last_response'] = result['responses'][-1]
result['status'] = 'SUCCESS'
return SmartoolsObject(result)
# Updates rows on a sheet. Allows you to pass a list of more than 500 rows, and automatically handles timeout errors using exponentially smaller requests
def bulk_update_rows(self,
sheet_id, # The ID of the sheet whose rows should be updated
rows, # The list of rows that should be updated
n=500, # The number of rows per request to begin with. Will usually be 500, but if working with a large sheet where timeouts are expected you can start smaller
retries=5, # The number of consecutive errors adding rows before the operation is cancelled
sleep=60, # The amount of time to sleep in case of rate limiting error
**kwargs):
result = {
'responses': [],
'rows': [],
'data': [],
'status': '',
'error_message': None
}
current_retries = retries
if n > 500:
n = 500
if not isinstance(rows, list):
rows = [rows]
while len(rows) > 0:
response = self.update_rows(sheet_id, rows[:n], **kwargs)
if hasattr(response.result, 'error_code'):
current_retries -= 1
if response.result.error_code == 4002:
n = n//2
elif response.result.error_code in [4003, 4004]:
time.sleep(sleep)
else:
if current_retries <= 0:
result['responses'].append(response)
result['status'] = 'ERROR'
result['error_message'] = 'See last response for detailed error.'
result['last_response'] = response
return SmartoolsObject(result)
else:
result['data'].extend(response.data)
rows = rows[n:]
current_retries = retries
result['responses'].append(response)
result['rows'].extend(response.result)
result['last_response'] = result['responses'][-1]
result['status'] = 'SUCCESS'
return SmartoolsObject(result)
# Takes a sheet ID and minimum permission level as arguments, then returns an object including a confirmation of whether the permission level is met
def check_sheet_permissions(self,
sheet_id, # The ID of the sheet to check for permission requirements
permission_level=None # The minimum permission level required. Can be a number from 1-5, or a String. If None, method will just return the sheet permission level
):
try:
sheet_id = int(sheet_id)
except:
return SmartoolsObject({'status': 'ERROR', 'access_met': False, 'Reason': 'Sheet ID is invalid'})
sheet = self.get_sheet(sheet_id, column_ids=[0], row_numbers=[0], level=1, exclude='dicts')
if hasattr(sheet, 'result') and hasattr(sheet.result, 'error_code'):
return SmartoolsObject({'status': 'ERROR', 'access_met': False, 'sheet': sheet})
if isinstance(permission_level, str):
permission_level = access_levels[permission_level]
if permission_level is None:
return SmartoolsObject({'status': 'ERROR', 'access_met': False, 'access_level': sheet.access_level})
else:
permission_met = permission_level <= access_levels[str(sheet.access_level)]
return SmartoolsObject({'status': 'SUCCESS', 'access_met': permission_met, 'access_level': sheet.access_level, 'sheet_response': sheet})
# Retrieves a sheet then returns a DataFrame of the sheet's data
def get_sheet_as_pandas_dataframe(self,
sheet, # The ID of the sheet to be returned OR the existing Sheet object to be processed
label_column=None, # The column to be used for row labels of the DataFrame.
):
try:
import pandas as pd
except ImportError:
raise RequirementError({'message': 'Import Error: This method requires the pandas module', 'recommended_action': 'Install pandas by using "pip install pandas"'})
if isinstance(sheet, int):
sheet = self.get_sheet(sheet_id)
elif not isinstance(sheet, smartsheet.models.Sheet):
raise Exception('sheet must be either an int or a sheet object.')
pd_row_data = []
pd_row_labels = []
pd_columns = []
# Prep df columns
for column in sheet.columns:
if (label_column is None and column.primary == True):
label_column = column.id
elif column.id == label_column or column.title == label_column:
label_column = column.id
else:
pd_columns.append(column.title)
# Prep row data
for row in sheet.rows:
row_list = []
for cell in row.cells:
if cell.column_id == label_column:
pd_row_labels.append(cell.value)
else:
row_list.append(cell.value)
pd_row_data.append(row_list)
return pd.DataFrame(pd_row_data, columns=pd_columns, index=pd_row_labels)
# Perform Monkey Patch
smartsheet.sheets.Sheets = SmartoolsSheets
smartsheet.models.sheet.TypedList = SmartoolsTypedList
smartsheet.models.row.TypedList = SmartoolsTypedList
| 34.84186
| 165
| 0.707916
| 7,075
| 0.944467
| 0
| 0
| 0
| 0
| 0
| 0
| 2,707
| 0.361367
|
e3cfd1eba8567bcfd38dbc01b741198461b5c024
| 3,119
|
py
|
Python
|
modules/persons/application/controllers/v1/phone/create_phone_controller.py
|
eduardolujan/hexagonal_architecture_django
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
[
"BSD-3-Clause"
] | 6
|
2020-08-09T23:41:08.000Z
|
2021-03-16T22:05:40.000Z
|
modules/persons/application/controllers/v1/phone/create_phone_controller.py
|
eduardolujan/hexagonal_architecture_django
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
[
"BSD-3-Clause"
] | 1
|
2020-10-02T02:59:38.000Z
|
2020-10-02T02:59:38.000Z
|
modules/persons/application/controllers/v1/phone/create_phone_controller.py
|
eduardolujan/hexagonal_architecture_django
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
[
"BSD-3-Clause"
] | 2
|
2021-03-16T22:05:43.000Z
|
2021-04-30T06:35:25.000Z
|
# -*- coding: utf-8 -*-
# Infra
from modules.shared.infrastructure.log import LoggerDecorator, PyLoggerService
# Application
from modules.persons.application.create import PhoneCreator
from modules.persons.application.create.command import CreatePhoneCommand
# Domain
from modules.shared.domain.http import status as http_status
from modules.shared.domain.requests import Request
from modules.shared.domain.responses import Response
from modules.shared.domain.serializers import SerializerManager
from modules.shared.domain.repository import UnitOfWork
from modules.shared.domain.bus.event import EventBus
from modules.persons.domain.repository import PhoneRepository
@LoggerDecorator(logger=PyLoggerService(file_path=__file__))
class CreatePhoneController:
"""
CreatePhoneController
"""
def __init__(self,
request: Request,
response: Response,
address_serializer_manager: SerializerManager,
address_repository: PhoneRepository,
unit_of_work: UnitOfWork,
event_bus: EventBus):
if not isinstance(address_repository, PhoneRepository):
raise ValueError(f"Parameter address_repository: {address_repository} "
f"is not instance of PhoneRepository")
if not isinstance(unit_of_work, UnitOfWork):
raise ValueError(f"Paramter unit_of_work:{unit_of_work} "
f"is not instance of UnitOfWork")
if not isinstance(event_bus, EventBus):
raise ValueError(f"Parameter unit_of_work:{event_bus} "
f"is not instance of MessageBus")
self.__request = request
self.__response = response
self.__serializer_manager = address_serializer_manager
self.__repository = address_repository
self.__unit_of_work = unit_of_work
self.__event_bus = event_bus
def __call__(self) -> Response:
try:
phone_data = self.__request.get_body()
create_phone_command = CreatePhoneCommand(
id=phone_data.get('id'),
number=phone_data.get('number'),
extension=phone_data.get('extension'))
phone_creator = PhoneCreator(
self.__repository,
self.__unit_of_work,
self.__event_bus)
phone_creator(create_phone_command)
response_data = dict(
success=True,
message='All ok',
)
return self.__response(response_data,
status=http_status.HTTP_201_CREATED)
except Exception as err:
self.log.exception(f"Error in {__class__}::post, err:{err}")
response_data = dict(
success=False,
message=f"{err}"
)
if hasattr(err, 'errors'):
response_data.update(errors=err.errors)
return self.__response(response_data,
status=http_status.HTTP_400_BAD_REQUEST)
| 36.267442
| 83
| 0.631613
| 2,385
| 0.764668
| 0
| 0
| 2,446
| 0.784226
| 0
| 0
| 408
| 0.130811
|
e3cfd93cdd0841ab2b66bf374189846ddaaf186d
| 5,651
|
py
|
Python
|
tests/test_dispatch_sparql_query_model_catalog.py
|
KnowledgeCaptureAndDiscovery/OBA_sparql
|
9c1c28902ab3d6561b3b6a0f8a7d284688d86326
|
[
"Apache-2.0"
] | 5
|
2020-05-12T22:00:16.000Z
|
2021-11-08T22:16:11.000Z
|
tests/test_dispatch_sparql_query_model_catalog.py
|
KnowledgeCaptureAndDiscovery/OBA_sparql
|
9c1c28902ab3d6561b3b6a0f8a7d284688d86326
|
[
"Apache-2.0"
] | 24
|
2019-09-26T23:20:11.000Z
|
2022-01-14T14:19:14.000Z
|
tests/test_dispatch_sparql_query_model_catalog.py
|
KnowledgeCaptureAndDiscovery/OBA_sparql
|
9c1c28902ab3d6561b3b6a0f8a7d284688d86326
|
[
"Apache-2.0"
] | 1
|
2021-12-01T14:56:09.000Z
|
2021-12-01T14:56:09.000Z
|
import json
import logging
import unittest
from typing import Dict
from SPARQLWrapper import JSONLD
from obasparql.query_manager import QueryManager, QUERIES_TYPES, QUERY_TYPE_GET_ONE_USER
from obasparql.utils import generate_uri
from tests.settings import *
logger = logging.getLogger('testing')
graph_user = generate_uri(model_catalog_graph_base, "mint@isi.edu")
class TestQueryManager(unittest.TestCase):
def setUp(self):
self.query_manager = QueryManager(queries_dir=model_catalog_queries,
context_dir=model_catalog_context,
endpoint=model_catalog_endpoint,
named_graph_base=model_catalog_graph_base,
uri_prefix=model_catalog_prefix)
def test_dispatch_sparqlquery(self):
endpoint = "http://dbpedia.org/sparql"
query_template = '''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
CONSTRUCT {
<http://dbpedia.org/resource/Indemnity_Act_1717> ?predicate ?prop .
?prop a ?type .
?prop rdfs:label ?label
}
WHERE {
<http://dbpedia.org/resource/Indemnity_Act_1717> ?predicate ?prop
OPTIONAL {
?prop a ?type
OPTIONAL {
?prop rdfs:label ?label
}
}
}
'''
results = self.query_manager.dispatch_sparql_query(raw_sparql_query=query_template,
request_args={},
return_format=JSONLD)
self.assertIsNotNone(json.loads(results))
def test_dispatch_sparqlquery_model_catalog(self):
"""
Testing to get the resource Travis
Travis is a Region
Returns:
"""
owl_class_name = "Region"
owl_resource_iri = "https://w3id.org/okn/i/mint/United_States"
query_directory = owl_class_name
query_type = QUERY_TYPE_GET_ONE_USER
request_args: Dict[str, str] = {
"resource": owl_resource_iri,
"g": graph_user
}
query_template = getattr(self.query_manager, query_directory)[query_type]
results = self.query_manager.dispatch_sparql_query(raw_sparql_query=query_template,
request_args=request_args,
return_format=JSONLD)
self.assertIsNotNone(json.loads(results))
def test_framed_get_one(self):
owl_class_uri = "https://w3id.org/okn/o/sdm#Region"
owl_resource_uri = "https://w3id.org/okn/i/mint/Travis"
response = '''{
"@graph" : [ {
"@id" : "https://w3id.org/okn/i/mint/Texas",
"@type" : "https://w3id.org/okn/o/sdm#Region",
"label" : "Texas (USA)"
}, {
"@id" : "https://w3id.org/okn/i/mint/Travis",
"@type" : "https://w3id.org/okn/o/sdm#Region",
"label" : "Travis",
"description" : "Travis (Texas)",
"partOf" : "https://w3id.org/okn/i/mint/Texas"
} ],
"@context" : {
"label" : {
"@id" : "http://www.w3.org/2000/01/rdf-schema#label"
},
"partOf" : {
"@id" : "https://w3id.org/okn/o/sdm#partOf",
"@type" : "@id"
},
"description" : {
"@id" : "https://w3id.org/okn/o/sd#description"
},
"sd" : "https://w3id.org/okn/o/sd#",
"rdfs" : "http://www.w3.org/2000/01/rdf-schema#"
}
}'''
framed = self.query_manager.frame_results(response, owl_class_uri, owl_resource_uri)
self.assertEqual(owl_resource_uri, framed[0]["id"])
def test_framed_get_one_reflexive(self):
owl_class_uri = "https://w3id.org/okn/o/sdm#Region"
owl_resource_uri = "https://w3id.org/okn/i/mint/United_States"
response = '''{
"@graph" : [ {
"@id" : "https://w3id.org/okn/i/mint/Texas",
"@type" : "https://w3id.org/okn/o/sdm#Region",
"label" : "Texas (USA)",
"description" : "Texas is the second largest state in the United States by area (after Alaska) and population (after California). Located in the South Central region, Texas shares borders with the states of Louisiana to the east, Arkansas to the northeast, Oklahoma to the north, New Mexico to the west, and the Mexican states of Chihuahua, Coahuila, Nuevo Leon, and Tamaulipas to the southwest, and has a coastline with the Gulf of Mexico to the southeast.",
"geo" : "https://w3id.org/okn/i/mint/Texas_Shape",
"partOf" : "https://w3id.org/okn/i/mint/United_States"
}, {
"@id" : "https://w3id.org/okn/i/mint/Texas_Shape",
"@type" : "https://w3id.org/okn/o/sdm#GeoShape",
"label" : "Bounding box for Texas region"
}, {
"@id" : "https://w3id.org/okn/i/mint/United_States",
"@type" : "https://w3id.org/okn/o/sdm#Region",
"label" : "United States of America"
}, {
"@id" : "https://w3id.org/okn/o/sdm#Region",
"@type" : "http://www.w3.org/2002/07/owl#Class"
} ],
"@context" : {
"partOf" : {
"@id" : "https://w3id.org/okn/o/sdm#partOf",
"@type" : "@id"
},
"geo" : {
"@id" : "https://w3id.org/okn/o/sdm#geo",
"@type" : "@id"
},
"description" : {
"@id" : "https://w3id.org/okn/o/sd#description"
},
"label" : {
"@id" : "http://www.w3.org/2000/01/rdf-schema#label"
},
"rdfs" : "http://www.w3.org/2000/01/rdf-schema#"
}
}
'''
framed = self.query_manager.frame_results(response, owl_class_uri, owl_resource_uri)
self.assertEqual(owl_resource_uri, framed[0]["id"])
if __name__ == '__main__':
unittest.main()
| 36.694805
| 463
| 0.585914
| 5,231
| 0.925677
| 0
| 0
| 0
| 0
| 0
| 0
| 3,190
| 0.564502
|
e3d014948574aa9afc4263cc074b784b2bb1665c
| 1,538
|
py
|
Python
|
cogs/ObjectCache.py
|
Deivedux/Shiramine
|
bbaf651a4ccd5f65c8aef1eb09ba8899bb2958db
|
[
"MIT"
] | 6
|
2019-03-20T15:15:31.000Z
|
2022-02-23T20:11:24.000Z
|
cogs/ObjectCache.py
|
Deivedux/Shiramine
|
bbaf651a4ccd5f65c8aef1eb09ba8899bb2958db
|
[
"MIT"
] | 1
|
2021-11-20T00:25:48.000Z
|
2021-11-20T00:25:48.000Z
|
cogs/ObjectCache.py
|
Deivedux/Shiramine
|
bbaf651a4ccd5f65c8aef1eb09ba8899bb2958db
|
[
"MIT"
] | 8
|
2019-11-22T05:56:40.000Z
|
2021-12-04T17:38:38.000Z
|
import time
import json
import sqlite3
import os
conn = sqlite3.connect('configs/Database.db')
c = conn.cursor()
start_time = time.time()
with open('configs/config.json') as json_data:
config = json.load(json_data)
server_config_raw = c.execute("SELECT * FROM ServerConfig").fetchall()
server_config = {}
def server_cache(db_response):
server_config[int(db_response[0])] = {}
if db_response[1]:
server_config[int(db_response[0])]['prefix'] = db_response[1]
server_config[int(db_response[0])]['language'] = db_response[2]
if db_response[3]:
server_config[int(db_response[0])]['img_filter'] = int(db_response[3])
server_config[int(db_response[0])]['member_persistence'] = int(db_response[12])
if db_response[13]:
server_config[int(db_response[0])]['server_log'] = int(db_response[13])
for i in server_config_raw:
server_cache(i)
del server_config_raw
db_response = c.execute("SELECT * FROM URLFilters").fetchall()
url_filters = dict()
def url_filter_cache(db_response):
try:
url_filters[db_response[0]].append(db_response[1])
except KeyError:
url_filters[db_response[0]] = [db_response[1]]
for i in db_response:
url_filter_cache(i)
response_string = {}
for i in os.listdir('./languages'):
if i.endswith('.json'):
with open(os.path.join('./languages', i)) as file:
response = json.load(file)
response_string[i.strip('.json')] = response
def get_lang(guild, response):
try:
return response_string[server_config[guild.id]['language']][response]
except:
return response_string['english'][response]
| 27.464286
| 80
| 0.737321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 217
| 0.141092
|
e3d072cf82df30c9642a147eb2b4e745f7865fe4
| 643
|
py
|
Python
|
Python/valid-palindrome-ii.py
|
coolryze/LeetCode
|
03876232521a20d32f8fa4e7d6d19cf208739a79
|
[
"MIT"
] | 2
|
2018-07-18T01:33:07.000Z
|
2018-11-16T03:17:03.000Z
|
Python/valid-palindrome-ii.py
|
coolryze/LeetCode
|
03876232521a20d32f8fa4e7d6d19cf208739a79
|
[
"MIT"
] | null | null | null |
Python/valid-palindrome-ii.py
|
coolryze/LeetCode
|
03876232521a20d32f8fa4e7d6d19cf208739a79
|
[
"MIT"
] | null | null | null |
class Solution:
def validPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
left = 0
right = len(s)-1
while left < right:
if s[left] != s[right]:
return self.isPalindrome(s, left, right-1) or self.isPalindrome(s, left+1, right)
else:
left += 1
right -= 1
return True
def isPalindrome(self, s, left, right):
while left < right:
if s[left] != s[right]:
return False
else:
left += 1
right -= 1
return True
| 22.964286
| 97
| 0.426128
| 642
| 0.998445
| 0
| 0
| 0
| 0
| 0
| 0
| 57
| 0.088647
|
e3d079b0ac366654644d7bfe8c3c51abdf0bef18
| 308
|
py
|
Python
|
Afvaldienst/__init__.py
|
xirixiz/python-afvalwijzer-afvalstoffendienst
|
ef76b07033848a6f7092e941c6c4a3ec214f2842
|
[
"MIT"
] | 1
|
2019-10-28T12:26:14.000Z
|
2019-10-28T12:26:14.000Z
|
Afvaldienst/__init__.py
|
xirixiz/afvaldienst
|
ef76b07033848a6f7092e941c6c4a3ec214f2842
|
[
"MIT"
] | 3
|
2020-09-11T08:38:50.000Z
|
2020-09-23T07:08:44.000Z
|
Afvaldienst/__init__.py
|
xirixiz/python-afvalwijzer-afvalstoffendienst
|
ef76b07033848a6f7092e941c6c4a3ec214f2842
|
[
"MIT"
] | null | null | null |
__author__ = 'Bram van Dartel - xirixiz'
__author_email__ = 'spam@rootrulez.com'
__license__ = 'MIT'
__maintainer_email__ = 'spam@rootrulez.com'
__url__ = 'https://github.com/xirixiz/afvaldienst',
__version__ = '1.1.4'
from .Afvaldienst import Afvaldienst
from .AfvaldienstScraper import AfvaldienstScraper
| 30.8
| 51
| 0.788961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 119
| 0.386364
|
e3d0ea8dfddd487de8fd53ee32a9b4f750e83af2
| 4,749
|
py
|
Python
|
src/python_lib_for_me/date.py
|
silverag-corgi/python-lib-for-me
|
ed30c7b879396ca6af53c762d7c919b0ea44bea7
|
[
"MIT"
] | null | null | null |
src/python_lib_for_me/date.py
|
silverag-corgi/python-lib-for-me
|
ed30c7b879396ca6af53c762d7c919b0ea44bea7
|
[
"MIT"
] | 1
|
2022-02-06T08:21:56.000Z
|
2022-02-06T15:48:26.000Z
|
src/python_lib_for_me/date.py
|
silverag-corgi/python-lib-for-me
|
ed30c7b879396ca6af53c762d7c919b0ea44bea7
|
[
"MIT"
] | null | null | null |
'''
日付モジュール
'''
import calendar
from datetime import date, datetime, timedelta
from typing import Iterator
from zoneinfo import ZoneInfo
from dateutil.relativedelta import relativedelta
def get_first_date_of_this_month(base_date: date) -> date:
'''
今月初日取得
Args:
base_date (date): 基底日付
Returns:
date: 基底日付から算出した今月初日
'''
base_date_by_month: date = base_date + relativedelta(months=0)
base_date_by_month_day: date = base_date_by_month.replace(
day=1
)
return base_date_by_month_day
def get_last_date_of_this_month(base_date: date) -> date:
'''
今月末日取得
Args:
base_date (date): 基底日付
Returns:
date: 基底日付から算出した今月末日
'''
base_date_by_month: date = base_date + relativedelta(months=0)
base_date_by_month_day: date = base_date_by_month.replace(
day=calendar.monthrange(base_date_by_month.year, base_date_by_month.month)[1]
)
return base_date_by_month_day
def get_first_date_of_next_month(base_date: date) -> date:
'''
来月初日取得
Args:
base_date (date): 基底日付
Returns:
date: 基底日付から算出した来月初日
'''
base_date_by_month: date = base_date + relativedelta(months=1)
base_date_by_month_day: date = base_date_by_month.replace(
day=1
)
return base_date_by_month_day
def get_last_date_of_next_month(base_date: date) -> date:
'''
来月末日取得
Args:
base_date (date): 基底日付
Returns:
date: 基底日付から算出した来月末日
'''
base_date_by_month: date = base_date + relativedelta(months=1)
base_date_by_month_day: date = base_date_by_month.replace(
day=calendar.monthrange(base_date_by_month.year, base_date_by_month.month)[1]
)
return base_date_by_month_day
def get_first_date_of_last_month(base_date: date) -> date:
'''
先月初日取得
Args:
base_date (date): 基底日付
Returns:
date: 基底日付から算出した先月初日
'''
base_date_by_month: date = base_date + relativedelta(months=-1)
base_date_by_month_day: date = base_date_by_month.replace(
day=1
)
return base_date_by_month_day
def get_last_date_of_last_month(base_date: date) -> date:
'''
先月末日取得
Args:
base_date (date): 基底日付
Returns:
date: 基底日付から算出した先月末日
'''
base_date_by_month: date = base_date + relativedelta(months=-1)
base_date_by_month_day: date = base_date_by_month.replace(
day=calendar.monthrange(base_date_by_month.year, base_date_by_month.month)[1]
)
return base_date_by_month_day
def gen_date_range(start_date: date, end_date: date) -> Iterator[date]:
'''
日付範囲生成
Args:
start_date (date) : 開始日付
end_date (date) : 終了日付
Yields:
Iterator[date]: 日付範囲
'''
for count in range((end_date - start_date).days + 1):
yield start_date + timedelta(days=count)
def convert_timestamp_to_jst(
src_timestamp: str,
src_timestamp_format: str = '%Y-%m-%d %H:%M:%S%z',
jst_timestamp_format: str = '%Y-%m-%d %H:%M:%S'
) -> str:
'''
タイムスタンプJST変換
Args:
src_timestamp (str) : 変換元タイムスタンプ
src_timestamp_format (str, optional) : 変換元タイムスタンプのフォーマット
jst_timestamp_format (str, optional) : 変換先タイムスタンプ(JST)のフォーマット
Returns:
str: タイムスタンプ(JST)
'''
src_datetime: datetime = datetime.strptime(src_timestamp, src_timestamp_format)
jst_datetime: datetime = src_datetime.astimezone(ZoneInfo('Japan'))
jst_timestamp: str = datetime.strftime(jst_datetime, jst_timestamp_format)
return jst_timestamp
def convert_timestamp_to_utc(
src_timestamp: str,
src_timestamp_format: str = '%Y-%m-%d %H:%M:%S%z',
utc_timestamp_format: str = '%Y-%m-%d %H:%M:%S'
) -> str:
'''
タイムスタンプUTC変換
Args:
src_timestamp (str) : 変換元タイムスタンプ
src_timestamp_format (str, optional) : 変換元タイムスタンプのフォーマット
utc_timestamp_format (str, optional) : 変換先タイムスタンプ(UTC)のフォーマット
Returns:
str: タイムスタンプ(UTC)
'''
src_datetime: datetime = datetime.strptime(src_timestamp, src_timestamp_format)
utc_datetime: datetime = src_datetime.astimezone(ZoneInfo('UTC'))
utc_timestamp: str = datetime.strftime(utc_datetime, utc_timestamp_format)
return utc_timestamp
| 24.863874
| 90
| 0.606443
| 0
| 0
| 390
| 0.073212
| 0
| 0
| 0
| 0
| 2,185
| 0.410175
|
e3d1710232166bf85532195c15df881b2381f79f
| 267
|
py
|
Python
|
tpi/main.py
|
karajan1001/tpi
|
c7259a8fea023797058deaf487700645df5fe210
|
[
"Apache-2.0"
] | 5
|
2021-09-04T05:02:59.000Z
|
2021-09-30T18:23:42.000Z
|
tpi/main.py
|
karajan1001/tpi
|
c7259a8fea023797058deaf487700645df5fe210
|
[
"Apache-2.0"
] | 14
|
2021-09-07T15:17:27.000Z
|
2021-10-08T01:09:41.000Z
|
tpi/main.py
|
karajan1001/tpi
|
c7259a8fea023797058deaf487700645df5fe210
|
[
"Apache-2.0"
] | 6
|
2021-09-06T08:52:04.000Z
|
2022-02-07T21:43:48.000Z
|
import argparse
import logging
log = logging.getLogger(__name__)
def get_main_parser():
parser = argparse.ArgumentParser(prog="tpi")
return parser
def main(argv=None):
parser = get_main_parser()
args = parser.parse_args(argv)
log.debug(args)
| 16.6875
| 48
| 0.715356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0.018727
|
e3d176bb8a4ef4588c81f92f7a9d84251d18fd27
| 2,948
|
py
|
Python
|
catkin_ws/src/easter_egg_hunt/scripts/waypoint_states.py
|
pdscraml/bunny-hunter
|
7d6951f5cbcc46ec31c8b17dc66a6297cc4d7536
|
[
"Apache-2.0"
] | null | null | null |
catkin_ws/src/easter_egg_hunt/scripts/waypoint_states.py
|
pdscraml/bunny-hunter
|
7d6951f5cbcc46ec31c8b17dc66a6297cc4d7536
|
[
"Apache-2.0"
] | null | null | null |
catkin_ws/src/easter_egg_hunt/scripts/waypoint_states.py
|
pdscraml/bunny-hunter
|
7d6951f5cbcc46ec31c8b17dc66a6297cc4d7536
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Intro to Robotics - EE5900 - Spring 2017
# Final Project
# Philip (Team Lead)
# Ian
# Akhil
#
# Revision: v1.2
# imports
import rospy
import smach
import smach_ros
import time
import actionlib
from easter_egg_hunt.srv import EnableDiscovery
from move_base_msgs.msg import MoveBaseGoal, MoveBaseAction
from ar_track_alvar_msgs.msg._AlvarMarkers import AlvarMarkers
from easter_egg_hunt.msg import DiscoveredWaypoints
class EnableWaypointDiscovery(smach.State):
def __init__(self):
super(EnableWaypointDiscovery, self).__init__(outcomes=['WAYPOINTS_ENABLED'])
self.enabler = rospy.ServiceProxy('WaypointManager/enable_discovery', EnableDiscovery)
def execute(self, userdata):
was_enabled = self.enabler(True)
return 'WAYPOINTS_ENABLED'
class DisableWaypointDiscovery(smach.State):
def __init__(self):
super(DisableWaypointDiscovery, self).__init__(outcomes=['WAYPOINTS_DISABLED'])
self.enabler = rospy.ServiceProxy('WaypointManager/enable_discovery', EnableDiscovery)
def execute(self, userdata):
was_enabled = self.enabler(False)
return 'WAYPOINTS_DISABLED'
class WaypointSelect(smach.State):
def __init__(self):
super(WaypointSelect, self).__init__(outcomes=['WAYPOINT_SELECTED', 'WAYPOINT_UNAVAILABLE'], output_keys=["marker_dest", "marker_ID"])
def execute(self, userdata):
selected_marker = None
while not selected_marker:
try:
selected_marker = rospy.wait_for_message('/ar_pose_marker', AlvarMarkers, timeout=0.2).markers
except (rospy.ROSException, rospy.ROSInterruptException) as e:
rospy.logwarn(e)
continue
try:
waypoints = rospy.wait_for_message('WaypointManager/waypoints', DiscoveredWaypoints, timeout=3)
waypoints = {x.ID:x.pose for x in waypoints.waypoints}
waypoint = waypoints[selected_marker[0].id]
except (rospy.ROSException, rospy.ROSInterruptException) as e:
return "WAYPOINT_UNAVAILABLE"
dest = MoveBaseGoal()
dest.target_pose.header.frame_id = 'map'
dest.target_pose.pose = waypoint
userdata.marker_dest = dest
userdata.marker_ID = selected_marker[0].id
rospy.loginfo(dest)
time.sleep(5)
return 'WAYPOINT_SELECTED'
class WaypointNav(smach.State):
def __init__(self):
super(WaypointNav, self).__init__(outcomes=["WAYPOINT_REACHED", "FAILED_WAYPOINT"], input_keys=["marker_dest"])
def execute(self, userdata):
# try:
mvbs = actionlib.SimpleActionClient('move_base', MoveBaseAction)
mvbs.wait_for_server()
mvbs.send_goal(userdata.marker_dest)
mvbs.wait_for_result()
return 'WAYPOINT_REACHED'
# except Exception as e:
# return 'FAILED_WAYPOINT'
| 33.123596
| 142
| 0.684871
| 2,451
| 0.831411
| 0
| 0
| 0
| 0
| 0
| 0
| 621
| 0.210651
|
e3d20c80e3fd93f5b987a741bdb20323be97f451
| 209
|
py
|
Python
|
templates/hello/views.py
|
cesarau04/python-react-webapp
|
305f69693313065a9ebbe116a34fd27111c86851
|
[
"0BSD"
] | null | null | null |
templates/hello/views.py
|
cesarau04/python-react-webapp
|
305f69693313065a9ebbe116a34fd27111c86851
|
[
"0BSD"
] | 1
|
2021-03-10T10:17:52.000Z
|
2021-03-10T10:17:52.000Z
|
templates/hello/views.py
|
cesarau04/python-react-webapp
|
305f69693313065a9ebbe116a34fd27111c86851
|
[
"0BSD"
] | null | null | null |
from flask import render_template, Blueprint
hello_blueprint = Blueprint('hello', __name__)
@hello_blueprint.route('/')
@hello_blueprint.route('/hello')
def index():
return render_template('index.html')
| 23.222222
| 46
| 0.760766
| 0
| 0
| 0
| 0
| 114
| 0.545455
| 0
| 0
| 30
| 0.143541
|
e3d2b660c79791266d30c8a38f66f8ca7ec0c0c0
| 682
|
py
|
Python
|
project/api/views.py
|
akxen/pyomo-drf-docker
|
9299561e61ce0cc6b40968e078aea84bded1228b
|
[
"Apache-2.0"
] | null | null | null |
project/api/views.py
|
akxen/pyomo-drf-docker
|
9299561e61ce0cc6b40968e078aea84bded1228b
|
[
"Apache-2.0"
] | null | null | null |
project/api/views.py
|
akxen/pyomo-drf-docker
|
9299561e61ce0cc6b40968e078aea84bded1228b
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from .serializers import ModelDataSerializer
from .optimisation.model import run_model
class RunModel(APIView):
"""Construct, run, and solve model with data posted by user"""
def post(self, request, format=None):
serializer = ModelDataSerializer(data=request.data)
if serializer.is_valid():
result = run_model(data=serializer.data)
return Response(result)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| 31
| 78
| 0.758065
| 397
| 0.582111
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 0.090909
|
e3d5b6ff47680e0205ffd2a767cb7c6b5cf84622
| 1,456
|
py
|
Python
|
icees_api/features/qgraph_utils.py
|
xu-hao/ddcr-api
|
f69c80a84d413078bd36985b6579d2bc32329b8f
|
[
"MIT"
] | 2
|
2018-10-03T16:58:57.000Z
|
2021-10-04T22:10:48.000Z
|
icees_api/features/qgraph_utils.py
|
xu-hao/ddcr-api
|
f69c80a84d413078bd36985b6579d2bc32329b8f
|
[
"MIT"
] | 195
|
2019-06-26T17:56:33.000Z
|
2022-03-30T20:46:05.000Z
|
icees_api/features/qgraph_utils.py
|
xu-hao/ddcr-api
|
f69c80a84d413078bd36985b6579d2bc32329b8f
|
[
"MIT"
] | 5
|
2018-09-10T19:45:29.000Z
|
2020-10-26T17:59:05.000Z
|
"""Query graph utilities."""
import re
from bmt import Toolkit
BMT = Toolkit()
def get_subcategories(category):
"""Get sub-categories, according to the Biolink model."""
return [
descendant.replace("_", "")
for descendant in BMT.get_descendants(category, formatted=True, reflexive=True)
]
def camelcase_to_snakecase(string):
"""Convert CamelCase to snake_case."""
return re.sub(r"(?<!^)(?=[A-Z])", "_", string).lower()
def get_subpredicates(predicate):
"""Get sub-predicates, according to the Biolink model."""
curies = BMT.get_descendants(predicate, formatted=True, reflexive=True)
return [
"biolink:" + camelcase_to_snakecase(curie[8:])
for curie in curies
]
def normalize_qgraph(qgraph):
"""Normalize query graph."""
for node in qgraph["nodes"].values():
node["categories"] = [
descendant
for category in node.get("categories", None) or ["biolink:NamedThing"]
for descendant in get_subcategories(category)
]
if "biolink:SmallMolecule" in node["categories"]:
node["categories"].append("biolink:ChemicalSubstance")
node.pop("is_set", None)
for edge in qgraph["edges"].values():
edge["predicates"] = [
descendant
for predicate in edge.get("predicates", None) or ["biolink:related_to"]
for descendant in get_subpredicates(predicate)
]
| 30.333333
| 87
| 0.632555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 428
| 0.293956
|
e3d5f08a740b483f1653463909ea2ce9beb6acde
| 3,493
|
py
|
Python
|
toy-evolve/weno.py
|
IanHawke/toy-evolve
|
a1490327dd19492e2c0bb0d9c6909abe8b167135
|
[
"MIT"
] | null | null | null |
toy-evolve/weno.py
|
IanHawke/toy-evolve
|
a1490327dd19492e2c0bb0d9c6909abe8b167135
|
[
"MIT"
] | null | null | null |
toy-evolve/weno.py
|
IanHawke/toy-evolve
|
a1490327dd19492e2c0bb0d9c6909abe8b167135
|
[
"MIT"
] | null | null | null |
import numpy
C_3 = numpy.array([1, 2]) / 3
a_3 = numpy.array([[3, -1], [1, 1]]) / 2
sigma_3 = numpy.array([[[1, 0], [-2, 1]], [[1, 0], [-2, 1]]])
C_5 = numpy.array([1, 6, 3]) / 10
a_5 = numpy.array([[11, -7, 2], [2, 5, -1], [-1, 5, 2]]) / 6
sigma_5 = numpy.array([[[40, 0, 0],
[-124, 100, 0],
[44, -76, 16] ],
[[16, 0, 0],
[-52, 52, 0],
[20, -52, 16] ],
[[16, 0, 0],
[-76, 44, 0],
[100, -124, 40] ] ]) / 12
C_all = { 2 : C_3,
3 : C_5 }
a_all = { 2 : a_3,
3 : a_5 }
sigma_all = { 2 : sigma_3,
3 : sigma_5 }
def weno3_upwind(q):
order = 2
epsilon = 1e-16
alpha = numpy.zeros(order)
beta = numpy.zeros(order)
q_stencils = numpy.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l):
beta[k] += sigma_3[k, l, m] * q[1 + k - l] * q[1 + k - m]
alpha[k] = C_3[k] / (epsilon + beta[k]**2)
for l in range(order):
q_stencils[k] += a_3[k, l] * q[1 + k - l]
w = alpha / numpy.sum(alpha)
return numpy.dot(w, q_stencils)
def weno3(q, simulation):
Nvars, Npoints = q.shape
q_minus = numpy.zeros_like(q)
q_plus = numpy.zeros_like(q)
for i in range(2, Npoints-2):
for Nv in range(Nvars):
q_plus [Nv, i] = weno3_upwind(q[Nv, i-1:i+2])
q_minus[Nv, i] = weno3_upwind(q[Nv, i+1:i-2:-1])
return q_minus, q_plus
def weno5_upwind(q):
order = 3
epsilon = 1e-16
alpha = numpy.zeros(order)
beta = numpy.zeros(order)
q_stencils = numpy.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l):
beta[k] += sigma_5[k, l, m] * q[2 + k - l] * q[2 + k - m]
alpha[k] = C_5[k] / (epsilon + beta[k]**2)
for l in range(order):
q_stencils[k] += a_5[k, l] * q[2 + k - l]
w = alpha / numpy.sum(alpha)
return numpy.dot(w, q_stencils)
def weno5(q, simulation):
Nvars, Npoints = q.shape
q_minus = numpy.zeros_like(q)
q_plus = numpy.zeros_like(q)
for i in range(3, Npoints-3):
for Nv in range(Nvars):
q_plus [Nv, i] = weno5_upwind(q[Nv, i-2:i+3])
q_minus[Nv, i] = weno5_upwind(q[Nv, i+2:i-3:-1])
return q_minus, q_plus
def weno_upwind(q, order):
a = a_all[order]
C = C_all[order]
sigma = sigma_all[order]
epsilon = 1e-16
alpha = numpy.zeros(order)
beta = numpy.zeros(order)
q_stencils = numpy.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l):
beta[k] += sigma[k, l, m] * q[order-1+k-l] * q[order-1+k-m]
alpha[k] = C[k] / (epsilon + beta[k]**2)
for l in range(order):
q_stencils[k] += a[k, l] * q[order-1+k-l]
w = alpha / numpy.sum(alpha)
return numpy.dot(w, q_stencils)
def weno(q, simulation, order):
Nvars, Npoints = q.shape
q_minus = numpy.zeros_like(q)
q_plus = numpy.zeros_like(q)
for i in range(order, Npoints-order):
for Nv in range(Nvars):
q_plus [Nv, i] = weno_upwind(q[Nv, i+1-order:i+order], order)
q_minus[Nv, i] = weno_upwind(q[Nv, i+order-1:i-order:-1], order)
return q_minus, q_plus
| 32.342593
| 80
| 0.488405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e3d6ee49185d1368971f9d3c026c6acc53822813
| 2,832
|
py
|
Python
|
tests/test_optimize.py
|
ricosjp/siml
|
8fc07d798cdedd77622c16221ee44a575d36bad0
|
[
"Apache-2.0"
] | 11
|
2020-12-28T16:22:33.000Z
|
2021-11-14T17:09:27.000Z
|
tests/test_optimize.py
|
ricosjp/siml
|
8fc07d798cdedd77622c16221ee44a575d36bad0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_optimize.py
|
ricosjp/siml
|
8fc07d798cdedd77622c16221ee44a575d36bad0
|
[
"Apache-2.0"
] | 2
|
2021-04-28T09:41:47.000Z
|
2021-07-01T21:18:51.000Z
|
from pathlib import Path
import shutil
import unittest
import numpy as np
import siml.optimize as optimize
import siml.setting as setting
class TestOptimize(unittest.TestCase):
def test_generate_dict(self):
main_setting = setting.MainSetting.read_settings_yaml(
Path('tests/data/deform/optuna.yml'))
objective = optimize.Objective(main_setting, None)
dict_replace_1 = {
'inputs': [{'name': 'abc', 'dim': 6}],
'n_node': 35,
'hidden_layers': 11,
'dropout': 0.01}
replaced_setting_1 = objective._generate_dict(
main_setting.optuna.setting, dict_replace_1)
dict_replace_2 = {
'inputs': [
{'name': 'elemental_strain', 'dim': 6},
{'name': 'something', 'dim': 100}],
'n_node': 135,
'hidden_layers': 111,
'dropout': 0.11}
replaced_setting_2 = objective._generate_dict(
main_setting.optuna.setting, dict_replace_2)
self.assertEqual(
replaced_setting_1['trainer']['inputs'][0]['name'],
'abc')
self.assertEqual(
replaced_setting_2['trainer']['inputs'][0]['name'],
'elemental_strain')
self.assertEqual(
replaced_setting_2['trainer']['inputs'][1]['name'],
'something')
self.assertEqual(
replaced_setting_2['model']['blocks'][0]['hidden_nodes'], 135)
self.assertEqual(
replaced_setting_2['model']['blocks'][0]['hidden_layers'], 111)
self.assertEqual(
replaced_setting_2['model']['blocks'][0]['hidden_dropout'], 0.11)
def test_perform_study(self):
main_setting = setting.MainSetting.read_settings_yaml(
Path('tests/data/deform/optuna.yml'))
if main_setting.optuna.output_base_directory.exists():
shutil.rmtree(main_setting.optuna.output_base_directory)
study = optimize.Study(main_setting)
study.perform_study()
self.assertLess(
study.study.best_trial.value,
np.max([t.value for t in study.study.trials]))
def test_perform_study_step_by_step(self):
main_setting_yml = Path('tests/data/deform/optuna.yml')
main_setting = setting.MainSetting.read_settings_yaml(
main_setting_yml)
if main_setting.optuna.output_base_directory.exists():
shutil.rmtree(main_setting.optuna.output_base_directory)
db_setting = setting.DBSetting(use_sqlite=True)
study = optimize.Study(main_setting, db_setting, step_by_step=True)
for _ in range(3):
try:
study.perform_study()
except SystemExit:
continue
self.assertEqual(len(study.study.get_trials()), 3)
| 35.848101
| 77
| 0.611935
| 2,689
| 0.949506
| 0
| 0
| 0
| 0
| 0
| 0
| 430
| 0.151836
|
e3d7620b8331f1df9da2a2562c6b4d96e926fba0
| 1,773
|
py
|
Python
|
demo.py
|
natekspencer/vivintpy
|
ea65b05871b3f13326ba370112357a6696793bf6
|
[
"MIT"
] | 3
|
2022-02-10T14:08:59.000Z
|
2022-03-30T18:55:25.000Z
|
demo.py
|
natekspencer/pyvivint
|
ea65b05871b3f13326ba370112357a6696793bf6
|
[
"MIT"
] | null | null | null |
demo.py
|
natekspencer/pyvivint
|
ea65b05871b3f13326ba370112357a6696793bf6
|
[
"MIT"
] | 2
|
2021-10-31T01:43:26.000Z
|
2021-11-21T13:33:55.000Z
|
import asyncio
import logging
import os
import pubnub
from vivintpy.account import Account
from vivintpy.devices import VivintDevice
from vivintpy.devices.camera import MOTION_DETECTED, Camera
from vivintpy.exceptions import VivintSkyApiMfaRequiredError
pubnub.set_stream_logger(name="pubnub", level=logging.ERROR)
async def main():
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("Demo started")
def camera_motion_callback(device: VivintDevice) -> None:
logging.debug("Motion detected from camera: %s", device)
account = Account(username=os.environ["username"], password=os.environ["password"])
try:
await account.connect(load_devices=True, subscribe_for_realtime_updates=True)
except VivintSkyApiMfaRequiredError:
code = input("Enter MFA Code: ")
await account.verify_mfa(code)
logging.debug("MFA verified")
logging.debug("Discovered systems & devices:")
for system in account.systems:
logging.debug(f"\tSystem {system.id}")
for alarm_panel in system.alarm_panels:
logging.debug(
f"\t\tAlarm panel {alarm_panel.id}:{alarm_panel.partition_id}"
)
for device in alarm_panel.devices:
logging.debug(f"\t\t\tDevice: {device}")
if isinstance(device, Camera):
device.on(
MOTION_DETECTED,
lambda event: camera_motion_callback(event["device"]),
)
try:
while True:
await asyncio.sleep(300)
await account.refresh()
except Exception as e:
logging.debug(e)
finally:
await account.disconnect()
if __name__ == "__main__":
asyncio.run(main())
| 31.105263
| 87
| 0.648054
| 0
| 0
| 0
| 0
| 0
| 0
| 1,399
| 0.789058
| 266
| 0.150028
|
e3db1939642019da218fde1bd068b8be2f4606ff
| 3,811
|
py
|
Python
|
qanda/views.py
|
Fnechz/StakeOverflow-Clone
|
7f17bdb80ebc23a2a5210eb31db6121c5d41e70c
|
[
"MIT"
] | null | null | null |
qanda/views.py
|
Fnechz/StakeOverflow-Clone
|
7f17bdb80ebc23a2a5210eb31db6121c5d41e70c
|
[
"MIT"
] | null | null | null |
qanda/views.py
|
Fnechz/StakeOverflow-Clone
|
7f17bdb80ebc23a2a5210eb31db6121c5d41e70c
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http.response import HttpResponseRedirect, HttpResponseBadRequest
from django.urls.base import reverse
from django.utils import timezone
from django.views.generic import (
CreateView,
DayArchiveView,
DetailView,
RedirectView,
TemplateView,
UpdateView,
)
from qanda.forms import QuestionForm, AnswerForm, AnswerAcceptanceForm
from qanda.models import Question, Answer
from qanda.service.elasticsearch import search_for_questions
from django.shortcuts import render
# Creating my views here.
class SearchView(TemplateView):
template_name = 'qanda/search.html'
def get_context_data(self, **kwargs):
query = self.request.GET.get('q', None)
ctx = super().get_context_data(query=query, **kwargs)
if query:
results = search_for_questions(query)
ctx['hits'] = results
return ctx
class TodaysQuestionList(RedirectView):
def get_redirect_url(self, *args, **kwargs):
today = timezone.now()
return reverse(
'questions:daily_questions',
kwargs={
'day': today.day,
'month': today.month,
'year': today.year,
}
)
class DailyQuestionList(DayArchiveView):
queryset = Question.objects.all()
date_field = 'created'
month_format = '%m'
allow_empty = True
class UpdateAnswerAcceptanceView(LoginRequiredMixin, UpdateView):
form_class = AnswerAcceptanceForm
queryset = Answer.objects.all()
def get_success_url(self):
return self.object.question.get_absolute_url()
def form_invalid(self, form):
return HttpResponseRedirect(
redirect_to=self.object.question.get_absolute_url())
class AskQuestionView(LoginRequiredMixin, CreateView):
form_class = QuestionForm
template_name = 'qanda/ask.html'
def get_initial(self):
return {
'user': self.request.user.id
}
def form_valid(self, form):
action = self.request.POST.get('action')
if action =='SAVE':
#save and redirect as usual
return super().form_valid(form)
elif action == 'PREVIEW':
preview = Question(
question=form.cleaned_data['question'],
title=form.cleaned_data['title'])
ctx = self.get_context_data(preview=preview)
return self.render_to_response(context=ctx)
return HttpResponseBadRequest()
class QuestionDetailView(DetailView):
model = Question
ACCEPT_FORM = AnswerAcceptanceForm(initial={'accepted': True})
REJECT_FORM = AnswerAcceptanceForm(initial={'accepted': False})
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx.update({
'answer_form': AnswerForm(initial={
'user': self.request.user.id,
'question': self.object.id,
})
})
if self.object.can_accept_answers(self.request.user):
ctx.update({
'accept_form': self.ACCEPT_FORM,
'reject_form': self.REJECT_FORM,
})
return ctx
class CreateAnswerView(LoginRequiredMixin, CreateView):
form_class = AnswerForm
template_name = 'qanda/create_answer.html'
def get_initial(self):
return {
'question': self.get_question().id,
'user': self.request.user.id,
}
def get_context_data(self, **kwargs):
return super().get_context_data(question=self.get_question(),
**kwargs)
def get_success_url(self):
return self.object.question.get_absolute_url()
def form_valid(self, form):
action = self.request.POST.get('action')
if action =='SAVE':
#save and redirect as usual
return super().form_valid(form)
elif action == 'PREVIEW':
ctx = self.get_context_data(preview=form.cleaned_data['answer'])
return self.render_to_response(context=ctx)
return HttpResponseBadRequest()
def get_question(self):
return Question.objects.get(pk=self.kwargs['pk'])
| 26.282759
| 78
| 0.708738
| 3,172
| 0.832327
| 0
| 0
| 0
| 0
| 0
| 0
| 382
| 0.100236
|
e3ddafddccd1aee845e95e14e1da8e8b355c53a4
| 92,214
|
py
|
Python
|
incubator/bootstrap_cli/__main__.py
|
cognitedata/inso-bootstrap-cli
|
d2ed0e575703acc7af2a11212357b6fd439f5279
|
[
"Apache-2.0"
] | null | null | null |
incubator/bootstrap_cli/__main__.py
|
cognitedata/inso-bootstrap-cli
|
d2ed0e575703acc7af2a11212357b6fd439f5279
|
[
"Apache-2.0"
] | 7
|
2022-02-16T12:46:33.000Z
|
2022-03-30T15:58:45.000Z
|
incubator/bootstrap_cli/__main__.py
|
cognitedata/inso-bootstrap-cli
|
d2ed0e575703acc7af2a11212357b6fd439f5279
|
[
"Apache-2.0"
] | null | null | null |
# 888 888
# 888 888
# 888 888
# .d8888b 88888b. 8888b. 88888b. .d88b. .d88b. 888 .d88b. .d88b.
# d88P" 888 "88b "88b 888 "88b d88P"88b d8P Y8b 888 d88""88b d88P"88b
# 888 888 888 .d888888 888 888 888 888 88888888 888 888 888 888 888
# Y88b. 888 888 888 888 888 888 Y88b 888 Y8b. 888 Y88..88P Y88b 888
# "Y8888P 888 888 "Y888888 888 888 "Y88888 "Y8888 888 "Y88P" "Y88888
# 888 888
# Y8b d88P Y8b d88P
# "Y88P" "Y88P"
#
# 210504 mh:
# * Adding support for minimum groups and project capabilities for read and owner Groups
# * Exception handling for root-groups to avoid duplicate groups and projects capabilities
# 210610 mh:
# * Adding RAW DBs and Datasets for Groups {env}:allprojects:{owner/read} and {env}:{group}:allprojects:{owner/read}
# * Adding functionality for updating dataset details (external id, description, etc) based on the config.yml
# 210910 pa:
# * extended acl_default_types by labels, relationships, functions
# * removed labels from acl_admin_types
# * functions don't have dataset scope
# 211013 pa:
# * renamed "adfs" to "aad" terminology => aad_mappings
# * for AAD 'root:client' and 'root:user' can be merged into 'root'
# 211014 pa:
# * adding new capabilities
# extractionpipelinesAcl
# extractionrunsAcl
# 211108 pa:
# * adding new capabilities
# entitymatchingAcl
# * refactor list of acl types which only support "all" scope
# acl_all_scope_only_types
# * support "labels" for non admin groups
# 211110 pa:
# * adding new capabilities
# sessionsAcl
# 220202 pa:
# * adding new capabilities
# typesAcl
# 220216 pa:
# * adding 'generate_special_groups()' to handle
# 'extractors' and 'transformations' and their 'aad_mappings'
# * configurable through `deploy --with-special-groups=[yes|no]` parameter
# * adding new capabilities:
# transformationsAcl (replacing the need for magic "transformations" CDF Group)
# 220404 pa:
# * v1.4.0 limited datasets for 'owner' that they cannot edit or create datasets
# * removed `datasets:write` capability
# * moved that capability to action_dimensions['admin']
# 220405 sd:
# * v1.5.0 added dry-run mode as global parameter for all commands
# 220405 pa:
# * v1.6.0
# * removed 'transformation' acl from 'acl_all_scope_only_types'
# as it now supports dataset scopes too!
# * refactor variable names to match the new documentation
# 1. group_types_dimensions > group_bootstrap_hierarchy
# 2. group_type > ns_name (namespace: src, ca, uc)
# 3. group_prefix > node_name (src:001:sap)
# 220406 pa/sd:
# * v1.7.0
# * added 'diagram' command which creates a Mermaid (diagram as code) output
# 220406 pa:
# * v1.7.1
# * started to use '# fmt:skip' to save intended multiline formatted and indented code
# from black auto-format
# 220420 pa:
# * v.1.9.2
# * fixed Poetry on Windows issues
# 220422 pa:
# * v1.10.0
# * issue #28 possibility to skip creation of RAW DBs
# * added '--with-raw-capability' parameter for 'deploy' and 'diagram' commands
# 220424 pa:
# * introduced CommandMode enums to support more detailed BootstrapCore initialization
# * started with validation-functions ('validate_config_is_cdf_project_in_mappings')
# * for 'diagram' command
# - made 'cognite' section optional
# - added support for parameter '--cdf-project' to explicit diagram a specific CDF Project
# - Added cdf-project name to diagram "IdP Groups for CDF: <>" subgraph title
# - renamed mermaid properties from 'name/short' to 'id_name/display'
# * documented config-deploy-example-v2.yml
# 220511 pa: v2.0.0 release :)
import logging
import time
# from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from itertools import islice
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, TypeVar
import click
import pandas as pd
import yaml
from click import Context
from cognite.client.data_classes import DataSet, Group
from cognite.client.data_classes.data_sets import DataSetUpdate
from cognite.extractorutils.configtools import CogniteClient
from dotenv import load_dotenv
# cli internal
from incubator.bootstrap_cli import __version__
from incubator.bootstrap_cli.configuration import (
BootstrapConfigError,
BootstrapDeleteConfig,
BootstrapDeployConfig,
BootstrapValidationError,
CommandMode,
SharedAccess,
YesNoType,
)
from incubator.bootstrap_cli.mermaid_generator.mermaid import (
AssymetricNode,
DottedEdge,
Edge,
GraphRegistry,
Node,
RoundedNode,
SubroutineNode,
TrapezNode,
)
# '''
# 888 888 888 .d888 d8b
# 888 888 888 d88P" Y8P
# 888 888 888 888
# .d88b. 888 .d88b. 88888b. 8888b. 888 .d8888b .d88b. 88888b. 888888 888 .d88b. .d8888b
# d88P"88b 888 d88""88b 888 "88b "88b 888 d88P" d88""88b 888 "88b 888 888 d88P"88b 88K
# 888 888 888 888 888 888 888 .d888888 888 888 888 888 888 888 888 888 888 888 "Y8888b.
# Y88b 888 888 Y88..88P 888 d88P 888 888 888 Y88b. Y88..88P 888 888 888 888 Y88b 888 X88
# "Y88888 888 "Y88P" 88888P" "Y888888 888 "Y8888P "Y88P" 888 888 888 888 "Y88888 88888P'
# 888 888
# Y8b d88P Y8b d88P
# "Y88P" "Y88P"
# '''
_logger = logging.getLogger(__name__)
# because within f'' strings no backslash-character is allowed
NEWLINE = "\n"
# capabilities (acl) which only support scope: {"all":{}}
acl_all_scope_only_types = set(
[
"projects",
"sessions",
"functions",
"entitymatching",
"types",
"threed",
]
)
# lookup of non-default actions per capability (acl) and role (owner/read/admin)
action_dimensions = {
# owner datasets might only need READ and OWNER
"owner": { # else ["READ","WRITE"]
"raw": ["READ", "WRITE", "LIST"],
"datasets": ["READ", "OWNER"],
"groups": ["LIST"],
"projects": ["LIST"],
"sessions": ["LIST", "CREATE"],
"threed": ["READ", "CREATE", "UPDATE", "DELETE"],
},
"read": { # else ["READ"]
"raw": ["READ", "LIST"],
"groups": ["LIST"],
"projects": ["LIST"],
"sessions": ["LIST"],
},
"admin": {
"datasets": ["READ", "WRITE", "OWNER"],
"groups": ["LIST", "READ", "CREATE", "UPDATE", "DELETE"],
"projects": ["READ", "UPDATE", "LIST"],
},
}
#
# GENERIC configurations
# extend when new capability (acl) is available
# check if action_dimensions must be extended with non-default capabilities:
# which are owner: ["READ","WRITE"]
# and read: ["READ"])
#
acl_default_types = [
"assets",
"datasets",
"entitymatching",
"events",
"extractionPipelines",
"extractionRuns",
"files",
"functions",
"groups",
"labels",
"projects",
"raw",
"relationships",
"sequences",
"sessions",
"timeSeries",
"transformations",
"types",
"threed",
]
# give precedence when merging over acl_default_types
acl_admin_types = list(action_dimensions["admin"].keys())
# '''
# 888888b. 888 888 .d8888b.
# 888 "88b 888 888 d88P Y88b
# 888 .88P 888 888 888 888
# 8888888K. .d88b. .d88b. 888888 .d8888b 888888 888d888 8888b. 88888b. 888 .d88b. 888d888 .d88b.
# 888 "Y88b d88""88b d88""88b 888 88K 888 888P" "88b 888 "88b 888 d88""88b 888P" d8P Y8b
# 888 888 888 888 888 888 888 "Y8888b. 888 888 .d888888 888 888 888 888 888 888 888 88888888
# 888 d88P Y88..88P Y88..88P Y88b. X88 Y88b. 888 888 888 888 d88P Y88b d88P Y88..88P 888 Y8b.
# 8888888P" "Y88P" "Y88P" "Y888 88888P' "Y888 888 "Y888888 88888P" "Y8888P" "Y88P" 888 "Y8888
# 888
# 888
# 888
# '''
# type-hint for ExtpipesCore instance response
T_BootstrapCore = TypeVar("T_BootstrapCore", bound="BootstrapCore")
class BootstrapCore:
# CDF Group prefix, i.e. "cdf:", to make bootstrap created CDF Groups easy recognizable in Fusion
GROUP_NAME_PREFIX = ""
# mandatory for hierarchical-namespace
AGGREGATED_LEVEL_NAME = ""
# rawdbs creation support additional variants, for special purposes (like saving statestores)
# - default-suffix is ':rawdb' with no variant-suffix (represented by "")
# - additional variant-suffixes can be added like this ["", ":state"]
RAW_VARIANTS = [""]
def __init__(self, configpath: str, command: CommandMode):
if command == CommandMode.DELETE:
self.config: BootstrapDeleteConfig = BootstrapDeleteConfig.from_yaml(configpath)
self.delete_or_deprecate: Dict[str, Any] = self.config.delete_or_deprecate
if not self.config.cognite:
BootstrapConfigError("'cognite' section required in configuration")
elif command in (CommandMode.DEPLOY, CommandMode.DIAGRAM, CommandMode.PREPARE):
self.config: BootstrapDeployConfig = BootstrapDeployConfig.from_yaml(configpath)
self.bootstrap_config: BootstrapDeployConfig = self.config.bootstrap
self.idp_cdf_mappings = self.bootstrap_config.idp_cdf_mappings
# CogniteClient is optional for diagram
if command != CommandMode.DIAGRAM:
# mandatory section
if not self.config.cognite:
BootstrapConfigError("'cognite' section required in configuration")
#
# load 'bootstrap.features'
#
# unpack and process features
features = self.bootstrap_config.features
# [OPTIONAL] default: False
self.with_special_groups: bool = features.with_special_groups
# [OPTIONAL] default: True
self.with_raw_capability: bool = features.with_raw_capability
# [OPTIONAL] default: "allprojects"
BootstrapCore.AGGREGATED_LEVEL_NAME = features.aggregated_level_name
# [OPTIONAL] default: "cdf:"
# support for '' empty string
BootstrapCore.GROUP_NAME_PREFIX = f"{features.group_prefix}:" if features.group_prefix else ""
# [OPTIONAL] default: "dataset"
# support for '' empty string
BootstrapCore.DATASET_SUFFIX = f":{features.dataset_suffix}" if features.dataset_suffix else ""
# [OPTIONAL] default: "rawdb"
# support for '' empty string
BootstrapCore.RAW_SUFFIX = f":{features.rawdb_suffix}" if features.rawdb_suffix else ""
# [OPTIONAL] default: ["", ":"state"]
BootstrapCore.RAW_VARIANTS = [""] + [f":{suffix}" for suffix in features.rawdb_additional_variants]
self.deployed: Dict[str, Any] = {}
self.all_scope_ctx: Dict[str, Any] = {}
self.is_dry_run: bool = False
self.client: CogniteClient = None
self.cdf_project = None
# TODO debug
# print(f"self.config= {self.config}")
# TODO: support 'logger' section optional, provide default config for logger with console only
#
# Logger initialisation
#
# make sure the optional folders in logger.file.path exists
# to avoid: FileNotFoundError: [Errno 2] No such file or directory: '/github/workspace/logs/test-deploy.log'
if self.config.logger.file:
(Path.cwd() / self.config.logger.file.path).parent.mkdir(parents=True, exist_ok=True)
self.config.logger.setup_logging()
_logger.info("Starting CDF Bootstrap configuration")
# debug new features
if getattr(self, "bootstrap_config", False):
# TODO: not available for 'delete' but there must be aa smarter solution
_logger.debug(
"Features from yaml-config or defaults (can be overridden by cli-parameters!): "
f"{self.bootstrap_config.features=}"
)
#
# Cognite initialisation (optional for 'diagram')
#
if self.config.cognite:
self.client: CogniteClient = self.config.cognite.get_cognite_client( # noqa
client_name="inso-bootstrap-cli", token_custom_args=self.config.token_custom_args
)
self.cdf_project = self.client.config.project
_logger.info("Successful connection to CDF client")
@staticmethod
def acl_template(actions, scope):
return {"actions": actions, "scope": scope}
@staticmethod
def get_allprojects_name_template(ns_name=None):
return f"{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}" if ns_name else BootstrapCore.AGGREGATED_LEVEL_NAME
@staticmethod
def get_dataset_name_template():
return "{node_name}" + BootstrapCore.DATASET_SUFFIX
@staticmethod
def get_raw_dbs_name_template():
return "{node_name}" + BootstrapCore.RAW_SUFFIX + "{raw_variant}"
@staticmethod
def get_timestamp():
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def validate_config_length_limits(self):
"""
Validate features in config
"""
#
# CHECK 1 (availability)
#
if not self.AGGREGATED_LEVEL_NAME:
raise BootstrapValidationError(
"Features validation error: 'features.aggregated-level-name' is required, "
f"but provided as <{self.AGGREGATED_LEVEL_NAME}>"
)
#
# CHECK 2 (length limits)
#
# TODO: GROUP_NAME_LENGTH_LIMIT = ??
RAWDB_NAME_LENGTH_LIMIT = 32
DATASET_NAME_LENGTH_LIMIT = 50
DATASET_EXTERNALID_LENGTH_LIMIT = 255
# create all required scopes to check name lengths
all_scopes = {
# generate_target_raw_dbs -> returns a Set[str]
"raw": self.generate_target_raw_dbs(), # all raw_dbs
# generate_target_datasets -> returns a Dict[str, Any]
"datasets": self.generate_target_datasets(), # all datasets
}
errors = []
if self.with_raw_capability:
errors.extend(
[
("RAW DB", rawdb_name, len(rawdb_name), RAWDB_NAME_LENGTH_LIMIT)
for rawdb_name in all_scopes["raw"]
if len(rawdb_name) > RAWDB_NAME_LENGTH_LIMIT
]
)
errors.extend(
[
("DATA SET name", dataset_name, len(dataset_name), DATASET_NAME_LENGTH_LIMIT)
for dataset_name, dataset_details in all_scopes["datasets"].items()
if len(dataset_name) > DATASET_NAME_LENGTH_LIMIT
]
)
errors.extend(
[
(
"DATA SET external_id",
dataset_details["external_id"],
len(dataset_name),
DATASET_EXTERNALID_LENGTH_LIMIT,
)
for dataset_name, dataset_details in all_scopes["datasets"].items()
if len(dataset_details["external_id"]) > DATASET_EXTERNALID_LENGTH_LIMIT
]
)
if errors:
raise BootstrapValidationError(
"Features validation error(s):\n"
# RAW DB src:002:weather:rawdbiswaytoolongtofit : len(38) > 32
f"""{NEWLINE.join(
[
f'{scope_type} {scope_error} : len({scope_length}) > {max_length}'
for (scope_type, scope_error, scope_length, max_length) in errors
])}"""
)
# return self for chaining
return self
def validate_config_is_cdf_project_in_mappings(self):
# check if mapping exists for configured cdf-project
is_cdf_project_in_mappings = self.cdf_project in [mapping.cdf_project for mapping in self.idp_cdf_mappings]
if not is_cdf_project_in_mappings:
_logger.warning(f"No 'idp-cdf-mapping' found for CDF Project <{self.cdf_project}>")
# log or raise?
# raise ValueError(f'No mapping for CDF project {self.cdf_project}')
# return self for chaining
return self
def generate_default_action(self, action, acl_type):
return action_dimensions[action].get(acl_type, ["READ", "WRITE"] if action == "owner" else ["READ"])
def generate_admin_action(self, acl_admin_type):
return action_dimensions["admin"][acl_admin_type]
def get_ns_node_shared_access_by_name(self, node_name) -> SharedAccess:
for ns in self.bootstrap_config.namespaces:
for ns_node in ns.ns_nodes:
if node_name == ns_node.node_name:
return ns_node.shared_access
return SharedAccess([], [])
def get_group_raw_dbs_groupedby_action(self, action, ns_name, node_name=None):
raw_db_names: Dict[str, Any] = {"owner": [], "read": []}
if node_name:
raw_db_names[action].extend(
# the dataset which belongs directly to this node_name
[
self.get_raw_dbs_name_template().format(node_name=node_name, raw_variant=raw_variant)
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
# for owner groups add "shared_owner_access" raw_dbs too
if action == "owner":
raw_db_names["owner"].extend(
[
self.get_raw_dbs_name_template().format(
node_name=shared_node.node_name, raw_variant=raw_variant
)
# find the group_config which matches the name,
# and check the "shared_access" groups list (else [])
for shared_node in self.get_ns_node_shared_access_by_name(node_name).owner
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
raw_db_names["read"].extend(
[
self.get_raw_dbs_name_template().format(
node_name=shared_node.node_name, raw_variant=raw_variant
)
# find the group_config which matches the name,
# and check the "shared_access" groups list (else [])
for shared_node in self.get_ns_node_shared_access_by_name(node_name).read
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
else: # handling the {ns_name}:{BootstrapCore.AGGREGATED_GROUP_NAME}
raw_db_names[action].extend(
[
self.get_raw_dbs_name_template().format(node_name=ns_node.node_name, raw_variant=raw_variant)
for ns in self.bootstrap_config.namespaces
if ns.ns_name == ns_name
for ns_node in ns.ns_nodes
for raw_variant in BootstrapCore.RAW_VARIANTS
]
# adding the {ns_name}:{BootstrapCore.AGGREGATED_GROUP_NAME} rawdbs
+ [ # noqa
self.get_raw_dbs_name_template().format(
node_name=self.get_allprojects_name_template(ns_name=ns_name), raw_variant=raw_variant
)
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
# only owner-groups support "shared_access" rawdbs
if action == "owner":
raw_db_names["owner"].extend(
[
self.get_raw_dbs_name_template().format(
node_name=shared_access_node.node_name, raw_variant=raw_variant
)
# and check the "shared_access" groups list (else [])
for ns in self.bootstrap_config.namespaces
if ns.ns_name == ns_name
for ns_node in ns.ns_nodes
for shared_access_node in ns_node.shared_access.owner
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
raw_db_names["read"].extend(
[
self.get_raw_dbs_name_template().format(
node_name=shared_access_node.node_name, raw_variant=raw_variant
)
# and check the "shared_access" groups list (else [])
for ns in self.bootstrap_config.namespaces
if ns.ns_name == ns_name
for ns_node in ns.ns_nodes
for shared_access_node in ns_node.shared_access.read
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
# returns clear names grouped by action
return raw_db_names
def get_group_datasets_groupedby_action(self, action, ns_name, node_name=None):
dataset_names: Dict[str, Any] = {"owner": [], "read": []}
# for example fac:001:wasit, uc:002:meg, etc.
if node_name:
dataset_names[action].extend(
# the dataset which belongs directly to this node_name
[self.get_dataset_name_template().format(node_name=node_name)]
)
# for owner groups add "shared_access" datasets too
if action == "owner":
dataset_names["owner"].extend(
[
self.get_dataset_name_template().format(node_name=shared_node.node_name)
# find the group_config which matches the id,
# and check the "shared_access" groups list (else [])
for shared_node in self.get_ns_node_shared_access_by_name(node_name).owner
]
)
dataset_names["read"].extend(
[
self.get_dataset_name_template().format(node_name=shared_node.node_name)
# find the group_config which matches the id,
# and check the "shared_access" groups list (else [])
for shared_node in self.get_ns_node_shared_access_by_name(node_name).read
]
)
# for example src, fac, uc, ca
else: # handling the {ns_name}:{BootstrapCore.AGGREGATED_GROUP_NAME}
dataset_names[action].extend(
[
# all datasets for each of the nodes of the given namespace
self.get_dataset_name_template().format(node_name=ns_node.node_name)
for ns in self.bootstrap_config.namespaces
if ns.ns_name == ns_name
for ns_node in ns.ns_nodes
]
# adding the {ns_name}:{BootstrapCore.AGGREGATED_GROUP_NAME} dataset
+ [ # noqa
self.get_dataset_name_template().format(
node_name=self.get_allprojects_name_template(ns_name=ns_name)
)
]
)
# only owner-groups support "shared_access" datasets
if action == "owner":
dataset_names["owner"].extend(
[
self.get_dataset_name_template().format(node_name=shared_access_node.node_name)
# and check the "shared_access" groups list (else [])
for ns in self.bootstrap_config.namespaces
if ns.ns_name == ns_name
for ns_node in ns.ns_nodes
for shared_access_node in ns_node.shared_access.owner
]
)
dataset_names["read"].extend(
[
self.get_dataset_name_template().format(node_name=shared_access_node.node_name)
# and check the "shared_access" groups list (else [])
for ns in self.bootstrap_config.namespaces
if ns.ns_name == ns_name
for ns_node in ns.ns_nodes
for shared_access_node in ns_node.shared_access.read
]
)
# returns clear names
return dataset_names
def dataset_names_to_ids(self, dataset_names):
return self.deployed["datasets"].query("name in @dataset_names")["id"].tolist()
def get_scope_ctx_groupedby_action(self, action, ns_name, node_name=None):
ds_by_action = self.get_group_datasets_groupedby_action(action, ns_name, node_name)
rawdbs_by_action = self.get_group_raw_dbs_groupedby_action(action, ns_name, node_name)
return {
action: {"raw": rawdbs_by_action[action], "datasets": ds_by_action[action]}
for action in ["owner", "read"]
} # fmt: skip
def generate_scope(self, acl_type, scope_ctx):
if acl_type == "raw":
# { "tableScope": { "dbsToTables": { "foo:db": {}, "bar:db": {} } }
return {"tableScope": {"dbsToTables": {raw: {} for raw in scope_ctx["raw"]}}}
elif acl_type == "datasets":
# { "idScope": { "ids": [ 2695894113527579, 4254268848874387 ] } }
return {"idScope": {"ids": self.dataset_names_to_ids(scope_ctx["datasets"])}}
# adding minimum projects and groups scopes for non-root groups
# TODO: adding documentation link
elif acl_type in acl_all_scope_only_types:
return {"all": {}}
elif acl_type == "groups":
return {"currentuserscope": {}}
else: # like 'assets', 'events', 'files', 'sequences', 'timeSeries', ..
# { "datasetScope": { "ids": [ 2695894113527579, 4254268848874387 ] } }
return {"datasetScope": {"ids": self.dataset_names_to_ids(scope_ctx["datasets"])}}
def generate_group_name_and_capabilities(
self, action: str = None, ns_name: str = None, node_name: str = None, root_account: str = None
) -> Tuple[str, List[Dict[str, Any]]]:
"""Create the group-name and its capabilities.
The function supports following levels expressed by parameter combinations:
- core: {action} + {ns_name} + {node_name}
- namespace: {action} + {ns_name}
- top-level: {action}
- root: {root_account}
Args:
action (str, optional):
One of the action_dimensions ["read", "owner"].
Defaults to None.
ns_name (str, optional):
Namespace like "src" or "uc".
Defaults to None.
node_name (str, optional):
Core group like "src:001:sap" or "uc:003:demand".
Defaults to None.
root_account (str, optional):
Name of the root-account.
Defaults to None.
Returns:
Tuple[str, List[Dict[str, Any]]]: group-name and list of capabilities
"""
capabilities = []
# detail level like cdf:src:001:public:read
if action and ns_name and node_name:
# group for each dedicated group-core id
group_name_full_qualified = f"{BootstrapCore.GROUP_NAME_PREFIX}{node_name}:{action}"
[
capabilities.append( # type: ignore
{
f"{acl_type}Acl": self.acl_template(
# check for acl specific owner actions, else default
actions=self.generate_default_action(shared_action, acl_type),
scope=self.generate_scope(acl_type, scope_ctx),
)
}
)
for acl_type in acl_default_types
for shared_action, scope_ctx in self.get_scope_ctx_groupedby_action(action, ns_name, node_name).items()
# don't create empty scopes
# enough to check one as they have both same length, but that's more explicit
if scope_ctx["raw"] and scope_ctx["datasets"]
]
# group-type level like cdf:src:all:read
elif action and ns_name:
# 'all' groups on group-type level
# (access to all datasets/ raw-dbs which belong to this group-type)
group_name_full_qualified = (
f"{BootstrapCore.GROUP_NAME_PREFIX}{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}:{action}"
)
[
capabilities.append( # type: ignore
{
f"{acl_type}Acl": self.acl_template(
# check for acl specific owner actions, else default
actions=self.generate_default_action(shared_action, acl_type),
scope=self.generate_scope(acl_type, scope_ctx),
)
}
)
for acl_type in acl_default_types
for shared_action, scope_ctx in self.get_scope_ctx_groupedby_action(action, ns_name).items()
# don't create empty scopes
# enough to check one as they have both same length, but that's more explicit
if scope_ctx["raw"] and scope_ctx["datasets"]
]
# top level like cdf:all:read
elif action:
# 'all' groups on action level (no limits to datasets or raw-dbs)
group_name_full_qualified = (
f"{BootstrapCore.GROUP_NAME_PREFIX}{BootstrapCore.AGGREGATED_LEVEL_NAME}:{action}"
)
[
capabilities.append( # type: ignore
{
f"{acl_type}Acl": self.acl_template(
# check for acl specific owner actions, else default
actions=self.generate_default_action(action, acl_type),
# scope = { "all": {} }
# create scope for all raw_dbs and datasets
scope=self.generate_scope(acl_type, self.all_scope_ctx),
)
}
)
for acl_type in acl_default_types
]
# root level like cdf:root
elif root_account: # no parameters
# all (no limits)
group_name_full_qualified = f"{BootstrapCore.GROUP_NAME_PREFIX}{root_account}"
# all default ACLs
[
capabilities.append( # type: ignore
{
f"{acl_type}Acl": self.acl_template(
# check for acl specific owner actions, else default
actions=self.generate_default_action("owner", acl_type),
scope={"all": {}},
)
}
)
# skipping admin types from default types to avoid duplicates
for acl_type in (set(acl_default_types) - set(acl_admin_types))
]
# plus admin ACLs
[
capabilities.append( # type: ignore
{
f"{acl_admin_type}Acl": self.acl_template(
# check for acl specific owner actions, else default
actions=self.generate_admin_action(acl_admin_type),
scope={"all": {}},
)
}
)
for acl_admin_type in acl_admin_types
]
return group_name_full_qualified, capabilities
def get_group_ids_by_name(self, group_name: str) -> List[int]:
"""Lookup if CDF Group name exists (could be more than one!)
and return list of all CDF Group IDs
Args:
group_name (str): CDF Group name to check
Returns:
List[int]: of CDF Group IDs
"""
return self.deployed["groups"].query("name == @group_name")["id"].tolist()
# return self.deployed["groups"].query("name == @group_payload['name']")["id"].tolist()
# TODO 220203 pa: explicit providing 'group_name'
# to bypass a strange bug under Docker which throws a
# pandas.core.computation.ops.UndefinedVariableError:
# name 'str_0_0x900xd80x90xec0x870x7f0x00x0' is not defined
def create_group(
self,
group_name: str,
group_capabilities: Dict[str, Any] = None,
idp_mapping: Tuple[str] = None,
) -> Group:
"""Creating a CDF Group
- with upsert support the same way Fusion updates CDF Groups
if a group with the same name exists:
1. a new group with the same name will be created
2. then the old group will be deleted (by its 'id')
- with support of explicit given aad-mapping or internal lookup from config
Args:
group_name (str): name of the CDF Group (always prefixed with GROUP_NAME_PREFIX)
group_capabilities (List[Dict[str, Any]], optional): Defining the CDF Group capabilities.
aad_mapping (Tuple[str, str], optional):
Tuple of ({AAD SourceID}, {AAD SourceName})
to link the CDF Group to
Returns:
Group: the new created CDF Group
"""
idp_source_id, idp_source_name = None, None
if idp_mapping:
# explicit given
# TODO: change from tuple to dataclass
if len(idp_mapping) != 2:
raise ValueError(f"Expected a tuple of length 2, got {idp_mapping=} instead")
idp_source_id, idp_source_name = idp_mapping
else:
# check lookup from provided config
mapping = self.bootstrap_config.get_idp_cdf_mapping_for_group(
cdf_project=self.cdf_project, cdf_group=group_name
)
# unpack
idp_source_id, idp_source_name = mapping.idp_source_id, mapping.idp_source_name
# check if group already exists, if yes it will be deleted after a new one is created
old_group_ids = self.get_group_ids_by_name(group_name)
new_group = Group(name=group_name, capabilities=group_capabilities)
if idp_source_id:
# inject (both will be pushed through the API call!)
new_group.source_id = idp_source_id # 'S-314159-1234'
new_group.source = idp_source_name # type: ignore
# print(f"group_create_object:<{group_create_object}>")
# overwrite new_group as it now contains id too
if self.is_dry_run:
_logger.info(f"Dry run - Creating group with name: <{new_group.name}>")
_logger.debug(f"Dry run - Creating group details: <{new_group}>")
else:
new_group = self.client.iam.groups.create(new_group)
# if the group name existed before, delete those groups now
# same upsert approach Fusion is using to update a CDF Group: create new with changes => then delete old one
if old_group_ids:
if self.is_dry_run:
_logger.info(f"Dry run - Deleting groups with ids: <{old_group_ids}>")
else:
self.client.iam.groups.delete(old_group_ids)
return new_group
def process_group(
self, action: str = None, ns_name: str = None, node_name: str = None, root_account: str = None
) -> Group:
# to avoid complex upsert logic, all groups will be recreated and then the old ones deleted
# to be merged with existing code
# print(f"=== START: action<{action}> | ns_name<{ns_name}> | node_name<{node_name}> ===")
group_name, group_capabilities = self.generate_group_name_and_capabilities(
action, ns_name, node_name, root_account
)
group: Group = self.create_group(group_name, group_capabilities)
return group
def generate_target_datasets(self) -> Dict[str, Any]:
# list of all targets: autogenerated dataset names
target_datasets = {
# dictionary generator
# dataset_name : {Optional[dataset_description], Optional[dataset_metadata], ..}
# key:
(fq_ns_name := self.get_dataset_name_template().format(node_name=ns_node.node_name)):
# value
{
"description": ns_node.description,
"metadata": ns_node.metadata,
# if not explicit provided, same template as name
"external_id": ns_node.external_id or fq_ns_name,
}
for ns in self.bootstrap_config.namespaces
for ns_node in ns.ns_nodes
}
# update target datasets to include 'allproject' and '{ns_name}:{BootstrapCore.AGGREGATED_GROUP_NAME}' datasets
target_datasets.update(
{ # dictionary generator
# key:
self.get_dataset_name_template().format(
node_name=f"{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}"
if ns_name
else BootstrapCore.AGGREGATED_LEVEL_NAME
):
# value
{
"description": f"Dataset for '{BootstrapCore.AGGREGATED_LEVEL_NAME}' Owner Groups",
# "metadata": "",
"external_id": f"{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}"
if ns_name
else BootstrapCore.AGGREGATED_LEVEL_NAME,
}
# creating 'all' at group type level + top-level
for ns_name in list([ns.ns_name for ns in self.bootstrap_config.namespaces]) + [""]
}
)
return target_datasets
def generate_missing_datasets(self) -> Tuple[List[str], List[str]]:
target_datasets = self.generate_target_datasets()
# TODO: SDK should do this fine, that was an older me still learning :)
def chunks(data, SIZE=10000):
it = iter(data)
for i in range(0, len(data), SIZE):
yield {k: data[k] for k in islice(it, SIZE)}
# which targets are not already deployed?
missing_datasets = {
name: payload
for name, payload in target_datasets.items()
if name not in self.deployed["datasets"]["name"].tolist()
}
if missing_datasets:
# create all datasets which are not already deployed
# https://docs.cognite.com/api/v1/#operation/createDataSets
for chunked_missing_datasets in chunks(missing_datasets, 10):
datasets_to_be_created = [
DataSet(
name=name,
description=payload.get("description"),
external_id=payload.get("external_id"),
metadata=payload.get("metadata"),
write_protected=True,
)
for name, payload in chunked_missing_datasets.items()
]
if self.is_dry_run:
for data_set_to_be_created in datasets_to_be_created:
_logger.info(f"Dry run - Creating dataset with name: <{data_set_to_be_created.name}>")
_logger.debug(f"Dry run - Creating dataset: <{data_set_to_be_created}>")
else:
self.client.data_sets.create(datasets_to_be_created)
# which targets are already deployed?
existing_datasets = {
# dictionary generator
# key:
dataset_columns["name"]:
# value
# Merge dataset 'id' from CDF with dataset arguments from config.yml
dict(id=dataset_columns["id"], **target_datasets[dataset_columns["name"]])
for row_id, dataset_columns in self.deployed["datasets"].iterrows() # iterating pd dataframe
if dataset_columns["name"] in target_datasets.keys()
}
if existing_datasets:
# update datasets which are already deployed
# https://docs.cognite.com/api/v1/#operation/createDataSets
# TODO: description, metadata, externalId
for chunked_existing_datasets in chunks(existing_datasets, 10):
datasets_to_be_updated = [
DataSetUpdate(id=dataset["id"])
.name.set(name)
.description.set(dataset.get("description"))
.external_id.set(dataset.get("external_id"))
.metadata.set(dataset.get("metadata"))
for name, dataset in chunked_existing_datasets.items()
]
if self.is_dry_run:
for data_set_to_be_updated in datasets_to_be_updated:
_logger.info(f"Dry run - Updating dataset with name: <{data_set_to_be_updated.name}>")
_logger.debug(f"Dry run - Updating dataset: <{data_set_to_be_updated}>")
# _logger.info(f"Dry run - Updating dataset: <{data_set_to_be_updated}>")
else:
self.client.data_sets.update(datasets_to_be_updated)
return list(target_datasets.keys()), list(missing_datasets.keys())
def generate_target_raw_dbs(self) -> Set[str]:
# list of all targets: autogenerated raw_db names
target_raw_db_names = set(
[
self.get_raw_dbs_name_template().format(node_name=ns_node.node_name, raw_variant=raw_variant)
for ns in self.bootstrap_config.namespaces
for ns_node in ns.ns_nodes
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
target_raw_db_names.update(
# add RAW DBs for 'all' users
[
self.get_raw_dbs_name_template().format(
node_name=f"{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}"
if ns_name
else BootstrapCore.AGGREGATED_LEVEL_NAME,
raw_variant=raw_variant,
)
# creating allprojects at group type level + top-level
for ns_name in list([ns.ns_name for ns in self.bootstrap_config.namespaces]) + [""]
for raw_variant in BootstrapCore.RAW_VARIANTS
]
)
return target_raw_db_names
def generate_missing_raw_dbs(self) -> Tuple[List[str], List[str]]:
target_raw_db_names = self.generate_target_raw_dbs()
try:
# which targets are not already deployed?
missing_rawdbs = target_raw_db_names - set(self.deployed["raw_dbs"]["name"])
except Exception as exc:
_logger.info(f"Raw databases do not exist in CDF:\n{exc}")
missing_rawdbs = target_raw_db_names
if missing_rawdbs:
# create all raw_dbs which are not already deployed
if self.is_dry_run:
for raw_db in list(missing_rawdbs):
_logger.info(f"Dry run - Creating rawdb: <{raw_db}>")
else:
self.client.raw.databases.create(list(missing_rawdbs))
return target_raw_db_names, missing_rawdbs
"""
"Special CDF Groups" are groups which don't have capabilities but have an effect by their name only.
1. 'transformations' group: grants access to "Fusion > Integrate > Transformations"
2. 'extractors' group: grants access to "Fusion > Integrate > Extract Data" which allows dowload of extractors
Both of them are about getting deprecated in the near future (time of writing: Q4 '21).
- 'transformations' can already be replaced with dedicated 'transformationsAcl' capabilities
- 'extractors' only used to grant access to extractor-download page
"""
def generate_special_groups(self):
special_group_names = ["extractors", "transformations"]
_logger.info(f"Generating special groups:\n{special_group_names}")
for special_group_name in special_group_names:
self.create_group(group_name=special_group_name)
# generate all groups - iterating through the 3-level hierarchy
def generate_groups(self):
# permutate the combinations
for action in ["read", "owner"]: # action_dimensions w/o 'admin'
for ns in self.bootstrap_config.namespaces:
for ns_node in ns.ns_nodes:
# group for each dedicated group-type id
self.process_group(action, ns.ns_name, ns_node.node_name)
# 'all' groups on group-type level
# (access to all datasets/ raw-dbs which belong to this group-type)
self.process_group(action, ns.ns_name)
# 'all' groups on action level (no limits to datasets or raw-dbs)
self.process_group(action)
# creating CDF Group for root_account (highest admin-level)
for root_account in ["root"]:
self.process_group(root_account=root_account)
def load_deployed_config_from_cdf(self, groups_only=False) -> None:
"""Load CDF Groups, Datasets and RAW DBs as pd.DataFrames
and store them in 'self.deployed' dictionary.
Args:
groups_only (bool, optional): Limit to CDF Groups only (used by 'prepare' command). Defaults to False.
"""
NOLIMIT = -1
#
# Groups
#
groups_df = self.client.iam.groups.list(all=True).to_pandas()
available_group_columns = [
column
for column in groups_df.columns
if column in ["name", "id", "sourceId", "capabilities"]
] # fmt: skip
if groups_only:
#
# early exit
#
self.deployed = {"groups": groups_df[available_group_columns]}
return
#
# Data Sets
#
datasets_df = self.client.data_sets.list(limit=NOLIMIT).to_pandas()
if len(datasets_df) == 0:
# create an empty dataframe with columns, as SDK responded with no columns
datasets_df = pd.DataFrame(columns=["name", "id"])
else:
datasets_df = datasets_df[["name", "id"]]
#
# RAW DBs
#
rawdbs_df = self.client.raw.databases.list(limit=NOLIMIT).to_pandas()
if len(rawdbs_df) == 0:
# create an empty dataframe with columns, as SDK responded with no columns
rawdbs_df = pd.DataFrame(columns=["name"])
else:
rawdbs_df = rawdbs_df[["name"]]
# store DataFrames
# deployed: Dict[str, pd.DataFrame]
self.deployed = {
"groups": groups_df[available_group_columns],
"datasets": datasets_df,
"raw_dbs": rawdbs_df,
}
# prepare a yaml for "delete" job
def dump_delete_template_to_yaml(self) -> None:
# and reload again now with latest group config too
time.sleep(5) # wait for groups to be created!
self.load_deployed_config_from_cdf()
delete_template = yaml.dump(
{
"delete_or_deprecate": {
"raw_dbs": [],
"datasets": [],
"groups": [],
},
"latest_deployment": {
"raw_dbs": sorted(self.deployed["raw_dbs"].sort_values(["name"])["name"].tolist()),
# fillna('') because dataset names can be empty (NaN value)
"datasets": sorted(self.deployed["datasets"].fillna("").sort_values(["name"])["name"].tolist()),
# fillna('') because group names can be empty (NaN value)
"groups": sorted(self.deployed["groups"].fillna("").sort_values(["name"])["name"].tolist()),
},
# TODO: 220509 pa: this dict cannot support (possible) duplicate dataset names
# and why is this dumped anyway? Is this just for info?
"dataset_ids": {
row["name"]: row["id"] for i, row in sorted(self.deployed["datasets"][["name", "id"]].iterrows())
},
}
)
_logger.info(f"Delete template:\n{delete_template}")
# return delete_template
"""
### create / delete
* new in config
* delete removed from config
"""
def dry_run(self, dry_run: YesNoType) -> T_BootstrapCore:
self.is_dry_run = dry_run == YesNoType.yes
# return self for command chaining
return self
# '''
# oo.ooooo. oooo d8b .ooooo. oo.ooooo. .oooo. oooo d8b .ooooo.
# 888' `88b `888""8P d88' `88b 888' `88b `P )88b `888""8P d88' `88b
# 888 888 888 888ooo888 888 888 .oP"888 888 888ooo888
# 888 888 888 888 .o 888 888 d8( 888 888 888 .o
# 888bod8P' d888b `Y8bod8P' 888bod8P' `Y888""8o d888b `Y8bod8P'
# 888 888
# o888o o888o
# '''
def prepare(self, idp_source_id: str) -> None:
group_name = "cdf:bootstrap"
# group_name = f"{create_config.environment}:bootstrap"
group_capabilities = [
{"datasetsAcl": {"actions": ["READ", "WRITE", "OWNER"], "scope": {"all": {}}}},
{"rawAcl": {"actions": ["READ", "WRITE", "LIST"], "scope": {"all": {}}}},
{"groupsAcl": {"actions": ["LIST", "READ", "CREATE", "UPDATE", "DELETE"], "scope": {"all": {}}}},
{"projectsAcl": {"actions": ["READ", "UPDATE"], "scope": {"all": {}}}},
]
# TODO: replace with dataclass
idp_mapping = [
# sourceId
idp_source_id,
# sourceName
f"IdP Group ID: {idp_source_id}",
]
# load deployed groups with their ids and metadata
self.load_deployed_config_from_cdf(groups_only=True)
_logger.debug(f"GROUPS in CDF:\n{self.deployed['groups']}")
# allows idempotent creates, as it cleans up old groups with same names after creation
self.create_group(group_name=group_name, group_capabilities=group_capabilities, idp_mapping=idp_mapping)
if not self.is_dry_run:
_logger.info(f"Created CDF Group {group_name}")
_logger.info("Finished CDF Project Bootstrapper in 'prepare' mode ")
# '''
# .o8 oooo .
# "888 `888 .o8
# .oooo888 .ooooo. 888 .ooooo. .o888oo .ooooo.
# d88' `888 d88' `88b 888 d88' `88b 888 d88' `88b
# 888 888 888ooo888 888 888ooo888 888 888ooo888
# 888 888 888 .o 888 888 .o 888 . 888 .o
# `Y8bod88P" `Y8bod8P' o888o `Y8bod8P' "888" `Y8bod8P'
# '''
def delete(self):
# load deployed groups, datasets, raw_dbs with their ids and metadata
self.load_deployed_config_from_cdf()
# groups
group_names = self.delete_or_deprecate["groups"]
if group_names:
delete_group_ids = self.deployed["groups"].query("name in @group_names")["id"].tolist()
if delete_group_ids:
# only delete groups which exist
_logger.info(f"DELETE groups: {group_names}")
if not self.is_dry_run:
self.client.iam.groups.delete(delete_group_ids)
else:
_logger.info(f"Groups already deleted: {group_names}")
else:
_logger.info("No Groups to delete")
# raw_dbs
raw_db_names = self.delete_or_deprecate["raw_dbs"]
if raw_db_names:
delete_raw_db_names = list(set(raw_db_names).intersection(set(self.deployed["raw_dbs"]["name"])))
if delete_raw_db_names:
# only delete dbs which exist
# print("DELETE raw_dbs recursive with tables: ", raw_db_names)
_logger.info(f"DELETE raw_dbs recursive with tables: {raw_db_names}")
if not self.is_dry_run:
self.client.raw.databases.delete(delete_raw_db_names, recursive=True)
else:
# print(f"RAW DBs already deleted: {raw_db_names}")
_logger.info(f"RAW DBs already deleted: {raw_db_names}")
else:
_logger.info("No RAW Databases to delete")
# datasets cannot be deleted by design
# deprecate/archive them by prefix name with "_DEPR_", setting
# "archive=true" and a "description" with timestamp of deprecation
dataset_names = self.delete_or_deprecate["datasets"]
if dataset_names:
# get datasets which exists by name
delete_datasets_df = self.deployed["datasets"].query("name in @dataset_names")
if not delete_datasets_df.empty:
for i, row in delete_datasets_df.iterrows():
_logger.info(f"DEPRECATE dataset: {row['name']}")
update_dataset = self.client.data_sets.retrieve(id=row["id"])
update_dataset.name = (
f"_DEPR_{update_dataset.name}"
if not update_dataset.name.startswith("_DEPR_")
else f"{update_dataset.name}"
) # don't stack the DEPR prefixes
update_dataset.description = "Deprecated {}".format(self.get_timestamp())
update_dataset.metadata = dict(update_dataset.metadata, archived=True) # or dict(a, **b)
update_dataset.external_id = f"_DEPR_{update_dataset.external_id}_[{self.get_timestamp()}]"
if self.is_dry_run:
_logger.info(f"Dry run - Deprecating dataset: <{update_dataset}>")
self.client.data_sets.update(update_dataset)
else:
_logger.info("No Datasets to archive (and mark as deprecated)")
# dump all configs to yaml, as cope/paste template for delete_or_deprecate step
self.dump_delete_template_to_yaml()
# TODO: write to file or standard output
_logger.info("Finished deleting CDF Groups, Datasets and RAW Databases")
# '''
# .o8 oooo
# "888 `888
# .oooo888 .ooooo. oo.ooooo. 888 .ooooo. oooo ooo
# d88' `888 d88' `88b 888' `88b 888 d88' `88b `88. .8'
# 888 888 888ooo888 888 888 888 888 888 `88..8'
# 888 888 888 .o 888 888 888 888 888 `888'
# `Y8bod88P" `Y8bod8P' 888bod8P' o888o `Y8bod8P' .8'
# 888 .o..P'
# o888o `Y8P'
# '''
def deploy(self, with_special_groups: YesNoType, with_raw_capability: YesNoType) -> None:
# store parameter as bool
# if provided they override configuration or defaults from yaml-config
if with_special_groups:
self.with_special_groups = with_special_groups == YesNoType.yes
if with_raw_capability:
self.with_raw_capability = with_raw_capability == YesNoType.yes
# debug new features and override with cli-parameters
_logger.info(f"From cli: {with_special_groups=} / {with_raw_capability=}")
_logger.info(f"Effective: {self.with_special_groups=} / {self.with_raw_capability=}")
# load deployed groups, datasets, raw_dbs with their ids and metadata
self.load_deployed_config_from_cdf()
_logger.debug(f"RAW_DBS in CDF:\n{self.deployed['raw_dbs']}")
_logger.debug(f"DATASETS in CDF:\n{self.deployed['datasets']}")
_logger.debug(f"GROUPS in CDF:\n{self.deployed['groups']}")
# run generate steps (only print results atm)
target_raw_dbs: List[str] = []
new_created_raw_dbs: List[str] = []
if self.with_raw_capability:
target_raw_dbs, new_created_raw_dbs = self.generate_missing_raw_dbs()
_logger.info(f"All RAW_DBS from config:\n{target_raw_dbs}")
_logger.info(f"New RAW_DBS to CDF:\n{new_created_raw_dbs}")
else:
# no RAW DBs means no access to RAW at all
# which means no 'rawAcl' capability to create
# remove it form the default types
_logger.info("Creating no RAW_DBS and no 'rawAcl' capability")
acl_default_types.remove("raw")
target_datasets, new_created_datasets = self.generate_missing_datasets()
_logger.info(f"All DATASETS from config:\n{target_datasets}")
_logger.info(f"New DATASETS to CDF:\n{new_created_datasets}")
# store all raw_dbs and datasets in scope of this configuration
self.all_scope_ctx = {
"raw": target_raw_dbs, # all raw_dbs
"datasets": target_datasets, # all datasets
}
# reload deployed configs to be used as reference for group creation
time.sleep(5) # wait for datasets and raw_dbs to be created!
self.load_deployed_config_from_cdf()
# Special CDF Groups and their aad_mappings
if with_special_groups == YesNoType.yes:
self.generate_special_groups()
# CDF Groups from configuration
self.generate_groups()
if not self.is_dry_run:
_logger.info("Created new CDF Groups")
# and reload again now with latest group config too
# dump all configs to yaml, as cope/paste template for delete_or_deprecate step
self.dump_delete_template_to_yaml()
_logger.info("Finished creating CDF Groups, Datasets and RAW Databases")
# _logger.info(f'Bootstrap Pipelines: created: {len(created)}, deleted: {len(delete_ids)}')
# '''
# .o8 o8o
# "888 `"'
# .oooo888 oooo .oooo. .oooooooo oooo d8b .oooo. ooo. .oo. .oo.
# d88' `888 `888 `P )88b 888' `88b `888""8P `P )88b `888P"Y88bP"Y88b
# 888 888 888 .oP"888 888 888 888 .oP"888 888 888 888
# 888 888 888 d8( 888 `88bod8P' 888 d8( 888 888 888 888
# `Y8bod88P" o888o `Y888""8o `8oooooo. d888b `Y888""8o o888o o888o o888o
# d" YD
# "Y88888P'
# '''
def diagram(
self,
to_markdown: YesNoType = YesNoType.no,
with_raw_capability: YesNoType = YesNoType.yes,
cdf_project: str = None,
) -> None:
"""Diagram mode used to document the given configuration as a Mermaid diagram.
Args:
to_markdown (YesNoType, optional):
- Encapsulate Mermaid diagram in Markdown syntax.
- Defaults to 'YesNoType.no'.
with_raw_capability (YesNoType, optional):
- Create RAW DBs and 'rawAcl' capability. Defaults to 'YesNoType.tes'.
cdf_project (str, optional):
- Provide the CDF Project to use for the diagram 'idp-cdf-mappings'.
Example:
# requires a 'cognite' configuration section
➟ poetry run bootstrap-cli diagram configs/config-deploy-example-v2.yml | clip.exe
# precedence over 'cognite.project' which CDF Project to diagram 'bootstrap.idp-cdf-mappings'
# making a 'cognite' section optional
➟ poetry run bootstrap-cli diagram --cdf-project shiny-dev configs/config-deploy-example-v2.yml | clip.exe
# precedence over configuration 'bootstrap.features.with-raw-capability'
➟ poetry run bootstrap-cli diagram --with-raw-capability no --cdf-project shiny-prod configs/config-deploy-example-v2.yml
""" # noqa
diagram_cdf_project = cdf_project if cdf_project else self.cdf_project
# same handling as in 'deploy' command
# store parameter as bool
# if available it overrides configuration or defaults from yaml-config
if with_raw_capability:
self.with_raw_capability = with_raw_capability == YesNoType.yes
# debug new features and override with cli-parameters
_logger.info(f"From cli: {with_raw_capability=}")
_logger.info(f"Effective: {self.with_raw_capability=}")
# store all raw_dbs and datasets in scope of this configuration
self.all_scope_ctx = {
"owner": (
all_scopes := {
# generate_target_raw_dbs -> returns a Set[str]
"raw": list(self.generate_target_raw_dbs()), # all raw_dbs
# generate_target_datasets -> returns a Dict[str, Any]
"datasets": list(self.generate_target_datasets().keys()), # all datasets
}
),
# and copy the same to 'read'
"read": all_scopes,
}
def get_group_name_and_scopes(
action: str = None, ns_name: str = None, node_name: str = None, root_account: str = None
) -> Tuple[str, Dict[str, Any]]:
"""Adopted generate_group_name_and_capabilities() and get_scope_ctx_groupedby_action()
to respond with
- the full-qualified CDF Group name and
- all scopes sorted by action [read|owner] and [raw|datasets]
TODO: support 'root'
Args:
action (str, optional):
One of the action_dimensions ["read", "owner"].
Defaults to None.
ns_name (str, optional):
Namespace like "src" or "uc".
Defaults to None.
node_name (str, optional):
Core group like "src:001:sap" or "uc:003:demand".
Defaults to None.
root_account (str, optional):
Name of the root-account.
Defaults to None.
Returns:
Tuple[str, Dict[str, Any]]: (group_name, scope_ctx_by_action)
scope_ctx_by_action is a dictionary with the following structure:
{'owner': {
'raw': ['src:002:weather:rawdb', 'src:002:weather:rawdb:state'],
'datasets': ['src:002:weather:dataset']
},
'read': {
'raw': [],
'datasets': []
}}
"""
group_name_full_qualified, scope_ctx_by_action = None, None
# detail level like cdf:src:001:public:read
if action and ns_name and node_name:
group_name_full_qualified = f"{BootstrapCore.GROUP_NAME_PREFIX}{node_name}:{action}"
scope_ctx_by_action = self.get_scope_ctx_groupedby_action(action, ns_name, node_name)
# group-type level like cdf:src:all:read
elif action and ns_name:
# 'all' groups on group-type level
# (access to all datasets/ raw-dbs which belong to this group-type)
group_name_full_qualified = (
f"{BootstrapCore.GROUP_NAME_PREFIX}{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}:{action}"
)
scope_ctx_by_action = self.get_scope_ctx_groupedby_action(action, ns_name)
# top level like cdf:all:read
elif action:
# 'all' groups on action level (no limits to datasets or raw-dbs)
group_name_full_qualified = (
f"{BootstrapCore.GROUP_NAME_PREFIX}{BootstrapCore.AGGREGATED_LEVEL_NAME}:{action}"
)
# limit all_scopes to 'action'
scope_ctx_by_action = {action: self.all_scope_ctx[action]}
# root level like cdf:root
elif root_account: # no parameters
# all (no limits)
group_name_full_qualified = f"{BootstrapCore.GROUP_NAME_PREFIX}{root_account}"
return group_name_full_qualified, scope_ctx_by_action
class SubgraphTypes(str, Enum):
idp = "IdP Groups"
owner = "'Owner' Groups"
read = "'Read' Groups"
# OWNER
core_cdf_owner = "Node Level (Owner)"
ns_cdf_owner = "Namespace Level (Owner)"
scope_owner = "Scopes (Owner)"
# READ
core_cdf_read = "Node Level (Read)"
ns_cdf_read = "Namespace Level (Read)"
scope_read = "Scopes (Read)"
# TODO: refactoring required
def group_to_graph(
graph: GraphRegistry,
action: str = None,
ns_name: str = None,
node_name: str = None,
root_account: str = None,
) -> None:
if root_account:
return
group_name, scope_ctx_by_action = get_group_name_and_scopes(action, ns_name, node_name, root_account)
# check lookup from provided config
mapping = self.bootstrap_config.get_idp_cdf_mapping_for_group(
# diagram explicit given cdf_project, or configured in 'cognite' configuration section
cdf_project=diagram_cdf_project,
cdf_group=group_name,
)
# unpack
# idp_source_id, idp_source_name = self.aad_mapping_lookup.get(node_name, [None, None])
idp_source_id, idp_source_name = mapping.idp_source_id, mapping.idp_source_name
_logger.info(f"{ns_name=} : {group_name=} : {scope_ctx_by_action=} [{idp_source_name=}]")
# preload master subgraphs
core_cdf = graph.get_or_create(getattr(SubgraphTypes, f"core_cdf_{action}"))
ns_cdf_graph = graph.get_or_create(getattr(SubgraphTypes, f"ns_cdf_{action}"))
scope_graph = graph.get_or_create(getattr(SubgraphTypes, f"scope_{action}"))
#
# NODE - IDP GROUP
#
idp = graph.get_or_create(SubgraphTypes.idp)
if idp_source_name and (idp_source_name not in idp):
idp.elements.append(
TrapezNode(
id_name=idp_source_name,
display=idp_source_name,
comments=[f'IdP objectId: {idp_source_id}']
)
) # fmt: skip
graph.edges.append(
Edge(
id_name=idp_source_name,
dest=group_name,
annotation=None,
comments=[]
)
) # fmt: skip
# {'owner': {'raw': ['src:002:weather:rawdb', 'src:002:weather:rawdb:state'],
# 'datasets': ['src:002:weather:dataset']},
# 'read': {'raw': [], 'datasets': []}}
#
# NODE - CORE LEVEL
# 'cdf:src:001:public:read'
#
if action and ns_name and node_name:
core_cdf.elements.append(
RoundedNode(
id_name=group_name,
display=group_name,
comments=""
)
) # fmt: skip
#
# EDGE FROM PARENT 'src:all' to 'src:001:sap'
#
edge_type_cls = Edge if action == "owner" else DottedEdge
graph.edges.append(
edge_type_cls(
# link from all:{ns}
# multiline f-string split as it got too long
# TODO: refactor into string-templates
id_name=f"{BootstrapCore.GROUP_NAME_PREFIX}{ns_name}:"
f"{BootstrapCore.AGGREGATED_LEVEL_NAME}:{action}",
dest=group_name,
annotation="",
comments=[],
)
) # fmt: skip
# add core and all scopes
# shared_action: [read|owner]
for shared_action, scope_ctx in scope_ctx_by_action.items():
# scope_type: [raw|datasets]
# scopes: List[str]
for scope_type, scopes in scope_ctx.items():
if not self.with_raw_capability and scope_type == "raw":
continue # SKIP RAW
for scope_name in scopes:
#
# NODE DATASET or RAW scope
# 'src:001:sap:rawdb'
#
if scope_name not in scope_graph:
node_type_cls = SubroutineNode if scope_type == "raw" else AssymetricNode
scope_graph.elements.append(
node_type_cls(
id_name=f"{scope_name}__{action}__{scope_type}",
display=scope_name,
comments=""
)
) # fmt: skip
#
# EDGE FROM actual processed group-node to added scope
# cdf:src:001:sap:read to 'src:001:sap:rawdb'
#
edge_type_cls = Edge if shared_action == "owner" else DottedEdge
graph.edges.append(
edge_type_cls(
id_name=group_name,
dest=f"{scope_name}__{action}__{scope_type}",
annotation=shared_action,
comments=[],
)
) # fmt: skip
#
# NODE - NAMESPACE LEVEL
# 'src:all:read' or 'src:all:owner'
elif action and ns_name:
ns_cdf_graph.elements.append(
Node(
id_name=group_name,
display=group_name,
comments=""
)
) # fmt: skip
#
# EDGE FROM PARENT top LEVEL to NAMESPACE LEVEL
# 'all' to 'src:all'
#
edge_type_cls = Edge if action == "owner" else DottedEdge
graph.edges.append(
edge_type_cls(
id_name=f"{BootstrapCore.GROUP_NAME_PREFIX}{BootstrapCore.AGGREGATED_LEVEL_NAME}:{action}",
dest=group_name,
annotation="",
comments=[],
)
) # fmt: skip
# add namespace-node and all scopes
# shared_action: [read|owner]
for shared_action, scope_ctx in scope_ctx_by_action.items():
# scope_type: [raw|datasets]
# scopes: List[str]
for scope_type, scopes in scope_ctx.items():
if not self.with_raw_capability and scope_type == "raw":
continue # SKIP RAW
for scope_name in scopes:
# LIMIT only to direct scopes for readability
# which have for example 'src:all:' as prefix
if not scope_name.startswith(f"{ns_name}:{BootstrapCore.AGGREGATED_LEVEL_NAME}:"):
continue
#
# NODE DATASET or RAW scope
# 'src:all:rawdb'
#
if scope_name not in scope_graph:
node_type_cls = SubroutineNode if scope_type == "raw" else AssymetricNode
scope_graph.elements.append(
node_type_cls(
id_name=f"{scope_name}__{action}__{scope_type}",
display=scope_name,
comments=""
)
) # fmt: skip
#
# EDGE FROM actual processed group-node to added scope
# cdf:src:all:read to 'src:all:rawdb'
#
edge_type_cls = Edge if shared_action == "owner" else DottedEdge
graph.edges.append(
edge_type_cls(
id_name=group_name,
dest=f"{scope_name}__{action}__{scope_type}",
annotation=shared_action,
comments=[],
)
) # fmt: skip
#
# NODE - TOP LEVEL
# like `cdf:all:read`
#
elif action:
ns_cdf_graph.elements.append(
Node(
id_name=group_name,
display=group_name,
comments=""
)
) # fmt: skip
# add namespace-node and all scopes
# shared_action: [read|owner]
for shared_action, scope_ctx in scope_ctx_by_action.items():
# scope_type: [raw|datasets]
# scopes: List[str]
for scope_type, scopes in scope_ctx.items():
if not self.with_raw_capability and scope_type == "raw":
continue # SKIP RAW
for scope_name in scopes:
# LIMIT only to direct scopes for readability
# which have for example 'src:all:' as prefix
if not scope_name.startswith(f"{BootstrapCore.AGGREGATED_LEVEL_NAME}:"):
continue
# _logger.info(f"> {action=} {shared_action=} process {scope_name=} : all {scopes=}")
#
# NODE DATASET or RAW scope
# 'all:rawdb'
#
if scope_name not in scope_graph:
# _logger.info(f">> add {scope_name=}__{action=}")
node_type_cls = SubroutineNode if scope_type == "raw" else AssymetricNode
scope_graph.elements.append(
node_type_cls(
id_name=f"{scope_name}__{action}__{scope_type}",
display=scope_name,
comments=""
)
) # fmt: skip
#
# EDGE FROM actual processed group-node to added scope
# cdf:all:read to 'all:rawdb'
#
edge_type_cls = Edge if shared_action == "owner" else DottedEdge
graph.edges.append(
edge_type_cls(
id_name=group_name,
dest=f"{scope_name}__{action}__{scope_type}",
annotation=shared_action,
comments=[],
)
) # fmt: skip
#
# finished inline helper-methods
# starting diagram logic
#
if not self.with_raw_capability:
# no RAW DBs means no access to RAW at all
# which means no 'rawAcl' capability to create
# remove it form the default types
_logger.info("Without RAW_DBS and 'rawAcl' capability")
acl_default_types.remove("raw")
# sorting relationship output into potential subgraphs
graph = GraphRegistry()
# top subgraphs (three columns layout)
# provide Subgraphs with a 'subgraph_name' and a 'subgraph_short_name'
# using the SubgraphTypes enum 'name' (default) and 'value' properties
idp_group = graph.get_or_create(
SubgraphTypes.idp, f"{SubgraphTypes.idp.value} for CDF: '{diagram_cdf_project}'"
)
owner = graph.get_or_create(SubgraphTypes.owner, SubgraphTypes.owner.value)
read = graph.get_or_create(SubgraphTypes.read, SubgraphTypes.read.value)
# nested subgraphs
core_cdf_owner = graph.get_or_create(SubgraphTypes.core_cdf_owner, SubgraphTypes.core_cdf_owner.value)
ns_cdf_owner = graph.get_or_create(SubgraphTypes.ns_cdf_owner, SubgraphTypes.ns_cdf_owner.value)
core_cdf_read = graph.get_or_create(SubgraphTypes.core_cdf_read, SubgraphTypes.core_cdf_read.value)
ns_cdf_read = graph.get_or_create(SubgraphTypes.ns_cdf_read, SubgraphTypes.ns_cdf_read.value)
scope_owner = graph.get_or_create(SubgraphTypes.scope_owner, SubgraphTypes.scope_owner.value)
scope_read = graph.get_or_create(SubgraphTypes.scope_read, SubgraphTypes.scope_read.value)
# add the three top level groups to our graph
graph.elements.extend(
[
idp_group,
owner,
read,
# doc_group
]
)
# add/nest the owner-subgraphs to its parent subgraph
owner.elements.extend(
[
core_cdf_owner,
ns_cdf_owner,
scope_owner,
]
)
# add/nest the read-subgraphs to its parent subgraph
read.elements.extend(
[
core_cdf_read,
ns_cdf_read,
scope_read,
]
)
# permutate the combinations
for action in ["read", "owner"]: # action_dimensions w/o 'admin'
for ns in self.bootstrap_config.namespaces:
for ns_node in ns.ns_nodes:
# group for each dedicated group-type id
group_to_graph(graph, action, ns.ns_name, ns_node.node_name)
# 'all' groups on group-type level
# (access to all datasets/ raw-dbs which belong to this group-type)
group_to_graph(graph, action, ns.ns_name)
# 'all' groups on action level (no limits to datasets or raw-dbs)
group_to_graph(graph, action)
# all (no limits + admin)
# 211013 pa: for AAD root:client and root:user can be merged into 'root'
# for root_account in ["root:client", "root:user"]:
for root_account in ["root"]:
group_to_graph(graph, root_account=root_account)
mermaid_code = graph.to_mermaid()
_logger.info(f"Generated {len(mermaid_code)} characters")
markdown_wrapper_template = """
## auto-generated by bootstrap-cli
```mermaid
{mermaid_code}
```"""
# print to stdout that only the diagram can be piped to clipboard or file
print(
markdown_wrapper_template.format(mermaid_code=mermaid_code)
if to_markdown == YesNoType.yes
else mermaid_code
)
# '''
# 888 d8b 888
# 888 Y8P 888
# 888 888
# .d8888b 888 888 .d8888b 888 888
# d88P" 888 888 d88P" 888 .88P
# 888 888 888 888 888888K
# Y88b. 888 888 Y88b. 888 "88b
# "Y8888P 888 888 "Y8888P 888 888
# '''
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
@click.version_option(prog_name="bootstrap_cli", version=__version__)
@click.option(
"--cdf-project-name",
help="CDF Project to interact with CDF API, the 'BOOTSTRAP_CDF_PROJECT',"
"environment variable can be used instead. Required for OAuth2 and optional for api-keys.",
envvar="BOOTSTRAP_CDF_PROJECT",
)
# TODO: is cluster and alternative for host?
@click.option(
"--cluster",
default="westeurope-1",
help="The CDF cluster where CDF Project is hosted (e.g. greenfield, europe-west1-1),"
"Provide this or make sure to set the 'BOOTSTRAP_CDF_CLUSTER' environment variable. "
"Default: westeurope-1",
envvar="BOOTSTRAP_CDF_CLUSTER",
)
@click.option(
"--host",
default="https://bluefield.cognitedata.com/",
help="The CDF host where CDF Project is hosted (e.g. https://bluefield.cognitedata.com),"
"Provide this or make sure to set the 'BOOTSTRAP_CDF_HOST' environment variable."
"Default: https://bluefield.cognitedata.com/",
envvar="BOOTSTRAP_CDF_HOST",
)
@click.option(
"--api-key",
help="API key to interact with CDF API. Provide this or make sure to set the 'BOOTSTRAP_CDF_API_KEY',"
"environment variable if you want to authenticate with API keys.",
envvar="BOOTSTRAP_CDF_API_KEY",
)
@click.option(
"--client-id",
help="IdP Client ID to interact with CDF API. Provide this or make sure to set the "
"'BOOTSTRAP_IDP_CLIENT_ID' environment variable if you want to authenticate with OAuth2.",
envvar="BOOTSTRAP_IDP_CLIENT_ID",
)
@click.option(
"--client-secret",
help="IdP Client secret to interact with CDF API. Provide this or make sure to set the "
"'BOOTSTRAP_IDP_CLIENT_SECRET' environment variable if you want to authenticate with OAuth2.",
envvar="BOOTSTRAP_IDP_CLIENT_SECRET",
)
@click.option(
"--token-url",
help="IdP Token URL to interact with CDF API. Provide this or make sure to set the "
"'BOOTSTRAP_IDP_TOKEN_URL' environment variable if you want to authenticate with OAuth2.",
envvar="BOOTSTRAP_IDP_TOKEN_URL",
)
@click.option(
"--scopes",
help="IdP Scopes to interact with CDF API, relevant for OAuth2 authentication method. "
"The 'BOOTSTRAP_IDP_SCOPES' environment variable can be used instead.",
envvar="BOOTSTRAP_IDP_SCOPES",
)
@click.option(
"--audience",
help="IdP Audience to interact with CDF API, relevant for OAuth2 authentication method. "
"The 'BOOTSTRAP_IDP_AUDIENCE' environment variable can be used instead.",
envvar="BOOTSTRAP_IDP_AUDIENCE",
)
@click.option(
"--dotenv-path",
help="Provide a relative or absolute path to an .env file (for commandline usage only)",
)
@click.option(
"--debug",
is_flag=True,
help="Print debug information",
)
@click.option(
"--dry-run",
default="no",
type=click.Choice(["yes", "no"], case_sensitive=False),
help="Only logging planned CDF API action while doing nothing." " Defaults to 'no'",
)
@click.pass_context
def bootstrap_cli(
# click.core.Context
context: Context,
# cdf
cluster: str = "westeurope-1",
cdf_project_name: Optional[str] = None,
host: str = None,
api_key: Optional[str] = None,
# cdf idp
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
scopes: Optional[str] = None,
token_url: Optional[str] = None,
audience: Optional[str] = None,
# cli
# TODO: dotenv_path: Optional[click.Path] = None,
dotenv_path: Optional[str] = None,
debug: bool = False,
dry_run: str = "no",
) -> None:
# load .env from file if exists, use given dotenv_path if provided
load_dotenv(dotenv_path=dotenv_path)
context.obj = {
# cdf
"cluster": cluster,
"cdf_project_name": cdf_project_name,
"host": host,
"api_key": api_key,
# cdf idp
"client_id": client_id,
"client_secret": client_secret,
"scopes": scopes,
"token_url": token_url,
"audience": audience,
# cli
"dotenv_path": dotenv_path,
"debug": debug,
"dry_run": dry_run,
}
@click.command(help="Deploy a set of bootstrap from a config-file")
@click.argument(
"config_file",
default="./config-bootstrap.yml",
)
@click.option(
"--with-special-groups",
# having this as a flag is not working for gh-action 'actions.yml' manifest
# instead using explicit choice options
# is_flag=True,
# default="no",
type=click.Choice(["yes", "no"], case_sensitive=False),
help="Create special CDF Groups, which don't have capabilities (extractions, transformations). Defaults to 'no'",
)
@click.option(
"--with-raw-capability",
# default="yes", # default defined in 'configuration.BootstrapFeatures'
type=click.Choice(["yes", "no"], case_sensitive=False),
help="Create RAW DBs and 'rawAcl' capability. Defaults to 'yes'",
)
@click.pass_obj
def deploy(
# click.core.Context obj
obj: Dict,
config_file: str,
with_special_groups: YesNoType,
with_raw_capability: YesNoType,
) -> None:
click.echo(click.style("Deploying CDF Project bootstrap...", fg="red"))
if obj["debug"]:
# TODO not working yet :/
_logger.setLevel("DEBUG") # INFO/DEBUG
try:
(
BootstrapCore(config_file, command=CommandMode.DEPLOY)
.validate_config_length_limits()
.validate_config_is_cdf_project_in_mappings()
.dry_run(obj["dry_run"])
.deploy(
with_special_groups=with_special_groups,
with_raw_capability=with_raw_capability,
)
) # fmt:skip
click.echo(click.style("CDF Project bootstrap deployed", fg="blue"))
except BootstrapConfigError as e:
exit(e.message)
@click.command(
help="Prepare an elevated CDF Group 'cdf:bootstrap', using the same AAD Group link "
"as your initially provided 'oidc-admin-group'. "
"With additional capabilities to run the 'deploy' and 'delete' commands next. "
"The 'prepare' command is only required once per CDF Project."
)
@click.argument(
"config_file",
default="./config-bootstrap.yml",
)
# TODO: support '--idp-source-id' as an option too, to match v2 naming changes?
@click.option(
"--aad-source-id",
"--idp-source-id",
"idp_source_id", # explicit named variable for alternatives
required=True,
help="Provide the IdP Source ID to use for the 'cdf:bootstrap' Group. "
"Typically for a new project its the same configured for the initial provided "
"CDF Group named 'oidc-admin-group'. "
"The parameter option '--aad-source-id' will be deprecated in next major release",
)
@click.pass_obj
def prepare(
# click.core.Context obj
obj: Dict,
config_file: str,
idp_source_id: str,
dry_run: YesNoType = YesNoType.no,
) -> None:
click.echo(click.style("Prepare CDF Project ...", fg="red"))
if obj["debug"]:
# TODO not working yet :/
_logger.setLevel("DEBUG") # INFO/DEBUG
try:
(
BootstrapCore(config_file, command=CommandMode.PREPARE)
# .validate_config() # TODO
.dry_run(obj["dry_run"])
.prepare(idp_source_id=idp_source_id)
) # fmt:skip
click.echo(click.style("CDF Project bootstrap prepared for running 'deploy' command next.", fg="blue"))
except BootstrapConfigError as e:
exit(e.message)
@click.command(
help="Delete mode used to delete CDF Groups, Datasets and Raw Databases, "
"CDF Groups and RAW Databases will be deleted, while Datasets will be archived "
"and deprecated (as they cannot be deleted)."
)
@click.argument(
"config_file",
default="./config-bootstrap.yml",
)
@click.pass_obj
def delete(
# click.core.Context obj
obj: Dict,
config_file: str,
) -> None:
click.echo(click.style("Delete CDF Project ...", fg="red"))
if obj["debug"]:
# TODO not working yet :/
_logger.setLevel("DEBUG") # INFO/DEBUG
try:
(
BootstrapCore(config_file, command=CommandMode.DELETE)
# .validate_config() # TODO
.dry_run(obj["dry_run"]).delete()
)
click.echo(
click.style(
"CDF Project relevant groups and raw_dbs are deleted and/or datasets are archived and deprecated ",
fg="blue",
)
)
except BootstrapConfigError as e:
exit(e.message)
@click.command(help="Diagram mode used to document the given configuration as a Mermaid diagram")
@click.argument(
"config_file",
default="./config-bootstrap.yml",
)
@click.option(
"--markdown",
default="no",
type=click.Choice(["yes", "no"], case_sensitive=False),
help="Encapsulate Mermaid diagram in Markdown syntax. " "Defaults to 'no'",
)
@click.option(
"--with-raw-capability",
type=click.Choice(["yes", "no"], case_sensitive=False),
help="Create RAW DBs and 'rawAcl' capability. " "Defaults to 'yes'",
)
@click.option(
"--cdf-project",
help="[optional] Provide the CDF Project name to use for the diagram 'idp-cdf-mappings'.",
)
@click.pass_obj
def diagram(
# click.core.Context obj
obj: Dict,
config_file: str,
markdown: YesNoType,
with_raw_capability: YesNoType,
cdf_project: str,
) -> None:
# click.echo(click.style("Diagram CDF Project ...", fg="red"))
if obj["debug"]:
# TODO not working yet :/
_logger.setLevel("DEBUG") # INFO/DEBUG
try:
(
BootstrapCore(config_file, command=CommandMode.DIAGRAM)
.validate_config_length_limits()
.validate_config_is_cdf_project_in_mappings()
# .dry_run(obj['dry_run'])
.diagram(
to_markdown=markdown,
with_raw_capability=with_raw_capability,
cdf_project=cdf_project,
)
) # fmt:skip
# click.echo(
# click.style(
# "CDF Project relevant groups and raw_dbs are documented as Mermaid",
# fg="blue",
# )
# )
except BootstrapConfigError as e:
exit(e.message)
bootstrap_cli.add_command(deploy)
bootstrap_cli.add_command(prepare)
bootstrap_cli.add_command(delete)
bootstrap_cli.add_command(diagram)
def main() -> None:
# call click.pass_context
bootstrap_cli()
if __name__ == "__main__":
main()
| 42.241869
| 134
| 0.560934
| 72,307
| 0.784071
| 3,557
| 0.038571
| 10,902
| 0.118217
| 0
| 0
| 40,718
| 0.441531
|
e3e011a21c49b5509fea872c5fc1398a8616f542
| 4,440
|
py
|
Python
|
pyhcl/passes/expand_memory.py
|
raybdzhou/PyChip-py-hcl
|
08edc6ad4d2978eb417482f6f92678f8f9a1e3c7
|
[
"MIT"
] | null | null | null |
pyhcl/passes/expand_memory.py
|
raybdzhou/PyChip-py-hcl
|
08edc6ad4d2978eb417482f6f92678f8f9a1e3c7
|
[
"MIT"
] | null | null | null |
pyhcl/passes/expand_memory.py
|
raybdzhou/PyChip-py-hcl
|
08edc6ad4d2978eb417482f6f92678f8f9a1e3c7
|
[
"MIT"
] | null | null | null |
from typing import List, Dict
from pyhcl.ir.low_ir import *
from pyhcl.ir.low_prim import *
from pyhcl.passes._pass import Pass
from pyhcl.passes.utils import get_binary_width
DEFAULT_READ_LATENCY = 0
DEFAULT_WRITE_LATENCY = 1
@dataclass
class ExpandMemory(Pass):
def run(self, c: Circuit):
def get_mem_ports(stmts: List[Statement], writes: Dict[str, List[Statement]], reads: Dict[str, List[Statement]]):
for stmt in stmts:
if isinstance(stmt, DefMemPort):
if stmt.rw is True:
if stmt.mem.name in reads:
reads[stmt.mem.name] = reads[stmt.mem.name] + [stmt.name]
else:
reads[stmt.mem.name] = [stmt.name]
else:
if stmt.mem.name in writes:
writes[stmt.mem.name] = writes[stmt.mem.name] + [stmt.name]
else:
writes[stmt.mem.name] = [stmt.name]
def expand_mem_port(stmts: List[Statement], target: Statement):
addr_width = IntWidth(get_binary_width(target.mem.typ.size))
# addr
stmts.append(Connect(
SubField(SubField(Reference(target.mem.name, UIntType(addr_width)),target.name, UIntType(addr_width)), 'addr', UIntType(addr_width)),
UIntLiteral(target.index.value, addr_width)))
# en
stmts.append(Connect(
SubField(SubField(Reference(target.mem.name, UIntType(IntWidth(1))),target.name, UIntType(IntWidth(1))), 'en', UIntType(IntWidth(1))),
UIntLiteral(1, IntWidth(1))))
# clk
stmts.append(Connect(
SubField(SubField(Reference(target.mem.name, ClockType()),target.name, ClockType()), 'clk', ClockType()),
target.clk))
# mask
if target.rw is False:
stmts.append(Connect(
SubField(SubField(Reference(target.mem.name, UIntType(IntWidth(1))),target.name, UIntType(IntWidth(1))), 'mask', UIntType(IntWidth(1))),
UIntLiteral(1, IntWidth(1))))
def expand_memory_e(s: Statement, ports: Dict[str, Statement]) -> Statement:
loc, expr = s.loc, s.expr
if isinstance(loc, Reference) and loc.name in ports:
loc = SubField(SubField(Reference(ports[loc.name].mem.name, loc.typ), loc.name, loc.typ), 'data', loc.typ)
elif isinstance(expr, Reference) and expr.name in ports:
expr = SubField(SubField(Reference(ports[expr.name].mem.name, expr.typ), expr.name, expr.typ), 'data', expr.typ)
return Connect(loc, expr, s.info, s.blocking, s.bidirection, s.mem)
def expand_memory_s(stmts: List[Statement]) -> List[Statement]:
new_stmts: List[Statement] = []
writes: Dict[str, List[Statement]] = {}
reads: Dict[str, List[Statement]] = {}
ports: Dict[str, List[Statement]] = {}
get_mem_ports(stmts, writes, reads)
for stmt in stmts:
if isinstance(stmt, DefMemory):
new_stmts.append(WDefMemory(
stmt.name,
stmt.memType,
stmt.memType.typ,
stmt.memType.size,
DEFAULT_READ_LATENCY,
DEFAULT_WRITE_LATENCY,
reads[stmt.name],
writes[stmt.name]))
elif isinstance(stmt, DefMemPort):
expand_mem_port(new_stmts, stmt)
ports[stmt.name] = stmt
elif isinstance(stmt, Connect):
new_stmts.append(expand_memory_e(stmt, ports))
else:
new_stmts.append(stmt)
return new_stmts
def expand_memory_m(m: DefModule) -> DefModule:
return Module(
m.name,
m.ports,
Block(expand_memory_s(m.body.stmts)),
m.typ,
m.info
)
new_modules = []
for m in c.modules:
if isinstance(m, Module):
new_modules.append(expand_memory_m(m))
else:
new_modules.append(m)
return Circuit(new_modules, c.main, c.info)
| 45.773196
| 156
| 0.533333
| 4,200
| 0.945946
| 0
| 0
| 4,211
| 0.948423
| 0
| 0
| 54
| 0.012162
|
e3e0c634baf400be713a2f06ce7ace7a4e212de8
| 2,071
|
py
|
Python
|
ClydeLog.py
|
bnadeau/open-test-jig
|
99891aa96740eac267352d76a45b9dd5e1f55e0e
|
[
"Apache-2.0"
] | null | null | null |
ClydeLog.py
|
bnadeau/open-test-jig
|
99891aa96740eac267352d76a45b9dd5e1f55e0e
|
[
"Apache-2.0"
] | null | null | null |
ClydeLog.py
|
bnadeau/open-test-jig
|
99891aa96740eac267352d76a45b9dd5e1f55e0e
|
[
"Apache-2.0"
] | null | null | null |
import logging
import time
import os
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
format = "%(asctime)s %(levelname)-10s %(message)s"
id = time.strftime("%Y%m%d-%H%M%S")
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def formatter_message(message, use_color = True):
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED,
'PASS': GREEN
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color = True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format(self, record)
PASS_LEVEL_NUM = 45
logging.addLevelName(PASS_LEVEL_NUM, 'PASS')
def success(self, message, *args, **kws):
# Yes, logger takes its '*args' as 'args'.
self._log(PASS_LEVEL_NUM, message, args, **kws)
logging.Logger.success = success
def getLogger(name = 'clyde_log'):
return logging.getLogger();
log = getLogger()
log.setLevel(logging.DEBUG)
# Make sure log directory exists
if not os.path.exists('log'):
os.makedirs('log')
# Log to file
formatter = logging.Formatter(format)
filehandler = logging.FileHandler("log/clyde_%s.log" % id, "w")
filehandler.setLevel(logging.INFO)
filehandler.setFormatter(formatter)
log.addHandler(filehandler)
COLOR_FORMAT = formatter_message(format, True)
color_formatter = ColoredFormatter(COLOR_FORMAT)
# Log to stdout too
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.DEBUG)
streamhandler.setFormatter(color_formatter)
log.addHandler(streamhandler)
| 27.25
| 84
| 0.713182
| 445
| 0.214872
| 0
| 0
| 0
| 0
| 0
| 0
| 370
| 0.178658
|
e3e15e7f00bea2796ee5bd52b11a09a192eae24f
| 4,485
|
py
|
Python
|
private_market/test.py
|
sigmoid3/Dapper
|
469ddca6de3b5e977bcba05de57b9e07bf46dd13
|
[
"MIT"
] | 974
|
2015-01-01T08:37:37.000Z
|
2022-03-29T16:41:11.000Z
|
private_market/test.py
|
sigmoid3/Dapper
|
469ddca6de3b5e977bcba05de57b9e07bf46dd13
|
[
"MIT"
] | 45
|
2015-05-04T15:57:26.000Z
|
2022-03-22T14:40:24.000Z
|
private_market/test.py
|
sigmoid3/Dapper
|
469ddca6de3b5e977bcba05de57b9e07bf46dd13
|
[
"MIT"
] | 414
|
2015-01-05T14:43:01.000Z
|
2022-03-28T18:30:58.000Z
|
from ethereum import tester as t
from ethereum import utils
def test():
s = t.state()
test_company = s.abi_contract('company.se', ADMIN_ACCOUNT=utils.decode_int(t.a0))
order_book = s.abi_contract('orders.se')
test_currency = s.abi_contract('currency.se', sender=t.k0)
assert test_company.getAdmin() == t.a0.encode('hex')
# Issue 1000 shares to user a1
test_company.issueShares(1000, t.a1, sender=t.k0)
# Issue 50000 coins to users a2 and a3
test_currency.sendCoin(50000, t.a2, sender=t.k0)
test_currency.sendCoin(50000, t.a3, sender=t.k0)
# User a1 can have as many shares as he wants, but must retain at
# least 800
test_company.setShareholderMaxShares(t.a1, 2**100, sender=t.k0)
test_company.setShareholderMinShares(t.a1, 800, sender=t.k0)
# User a2 can have up to 500 shares
test_company.setShareholderMaxShares(t.a2, 500, sender=t.k0)
# User a2 tries to give himself the right to unlimited shares,
# fails because he is not the admin
test_company.setShareholderMaxShares(t.a2, 2**100, sender=t.k2)
# A few sanity checks
assert test_company.getCurrentShareholdingsOf(t.a1) == 1000
assert test_company.getShareholderMinShares(t.a1) == 800
assert test_company.getShareholderMaxShares(t.a2) == 500
# User a1 transfers 150 shares to a2
assert test_company.sendCoin(150, t.a2, sender=t.k1) is True
# User a1 tries to transfer 150 shares to a2 again, fails because
# such a transaction would result a1 having 700 shares, which is
# below his limit
assert test_company.sendCoin(150, t.a2, sender=t.k1) is False
# Check shareholdings
assert test_company.getCurrentShareholdingsOf(t.a1) == 850
assert test_company.getCurrentShareholdingsOf(t.a2) == 150
# Authorize the order book contract to accept lockups
test_company.setContractAuthorized(order_book.address, True)
# User a1 puts up 50 shares for sale; however, he tries to do
# this without first authorizing the order book to withdraw so
# the operation fails
assert order_book.mkSellOrder(test_company.address, 50,
test_currency.address, 10000,
sender=t.k1) == -1
# Now, try to create the order properly
test_company.authorizeLockup(order_book.address, 50, sender=t.k1)
_id = order_book.mkSellOrder(test_company.address, 50,
test_currency.address, 10000, sender=t.k1)
assert _id >= 0
assert test_company.getLockedShareholdingsOf(t.a1) == 50
# Accept the order by a3. This should fail because a3 has not
# authorized the order_book to withdraw coins
assert order_book.claimSellOrder(_id, sender=t.k3) is False
# Do the authorization
test_currency.approveOnce(order_book.address, 10000, sender=t.k3)
# It should still fail because a3 is not authorized to hold shares
assert order_book.claimSellOrder(_id, sender=t.k3) is False
# Now do it properly
test_currency.approveOnce(order_book.address, 10000, sender=t.k2)
assert order_book.claimSellOrder(_id, sender=t.k2) is True
# Check shareholdings and balances
assert test_company.getCurrentShareholdingsOf(t.a1) == 800
assert test_company.getCurrentShareholdingsOf(t.a2) == 200
assert test_company.getLockedShareholdingsOf(t.a1) == 0
assert test_currency.coinBalanceOf(t.a1) == 10000
assert test_currency.coinBalanceOf(t.a2) == 40000
assert test_currency.coinBalanceOf(t.a3) == 50000
# Authorize a3 to hold shares
test_company.setShareholderMaxShares(t.a3, 500)
# A3 buys shares
test_currency.approveOnce(order_book.address, 20000, sender=t.k3)
_id2 = order_book.mkBuyOrder(test_company.address, 100,
test_currency.address, 20000, sender=t.k3)
assert _id2 >= 0, _id2
test_company.authorizeLockup(order_book.address, 100, sender=t.k2)
assert order_book.claimBuyOrder(_id2, sender=t.k2) is True
# Check shareholdings and balances
assert test_company.getCurrentShareholdingsOf(t.a1) == 800
assert test_company.getCurrentShareholdingsOf(t.a2) == 100
assert test_company.getCurrentShareholdingsOf(t.a3) == 100
assert test_company.getLockedShareholdingsOf(t.a1) == 0
assert test_currency.coinBalanceOf(t.a1) == 10000
assert test_currency.coinBalanceOf(t.a2) == 60000
assert test_currency.coinBalanceOf(t.a3) == 30000
if __name__ == '__main__':
test()
| 50.965909
| 85
| 0.716611
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,114
| 0.248384
|
e3e284f2bcaf4183ceaa0d76915531a74b397b67
| 14,338
|
py
|
Python
|
meshreg/visualize/samplevis.py
|
jonashein/handobjectnet_baseline
|
29175be4528f68b8a2aa6dc6aa37ee0a042f93ab
|
[
"MIT"
] | 2
|
2021-07-09T15:10:44.000Z
|
2021-07-11T12:42:13.000Z
|
meshreg/visualize/samplevis.py
|
jonashein/handobjectnet_baseline
|
29175be4528f68b8a2aa6dc6aa37ee0a042f93ab
|
[
"MIT"
] | null | null | null |
meshreg/visualize/samplevis.py
|
jonashein/handobjectnet_baseline
|
29175be4528f68b8a2aa6dc6aa37ee0a042f93ab
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
from libyana.visutils.viz2d import visualize_joints_2d
from meshreg.datasets.queries import BaseQueries, TransQueries
from meshreg.visualize import consistdisplay
def get_check_none(data, key, cpu=True):
if key in data and data[key] is not None:
if cpu:
return data[key].cpu().detach()
else:
return data[key].detach().cuda()
else:
return None
def sample_vis(sample, results, save_img_path, fig=None, max_rows=5, display_centered=False):
fig.clf()
images = sample[TransQueries.IMAGE].permute(0, 2, 3, 1).cpu() + 0.5
batch_size = images.shape[0]
# pred_handverts2d = get_check_none(results, "verts2d")
gt_objverts2d = get_check_none(sample, TransQueries.OBJVERTS2D)
pred_objverts2d = get_check_none(results, "obj_verts2d")
gt_objcorners2d = None #get_check_none(sample, TransQueries.OBJCORNERS2D)
pred_objcorners2d = None #get_check_none(results, "obj_corners2d")
gt_objcorners3dw = None #get_check_none(sample, BaseQueries.OBJCORNERS3D)
pred_objcorners3d = None #get_check_none(results, "obj_corners3d")
gt_objverts3d = get_check_none(sample, TransQueries.OBJVERTS3D)
pred_objverts3d = get_check_none(results, "obj_verts3d")
gt_canobjverts3d = get_check_none(sample, TransQueries.OBJCANROTVERTS)
pred_objverts3dw = get_check_none(results, "recov_objverts3d")
gt_canobjcorners3d = get_check_none(sample, TransQueries.OBJCANROTCORNERS)
pred_objcorners3dw = None #get_check_none(results, "recov_objcorners3d")
gt_handjoints2d = get_check_none(sample, TransQueries.JOINTS2D)
pred_handjoints2d = get_check_none(results, "joints2d")
gt_handjoints3d = get_check_none(sample, TransQueries.JOINTS3D)
pred_handjoints3d = get_check_none(results, "joints3d")
gt_handverts3d = get_check_none(sample, TransQueries.HANDVERTS3D)
pred_handverts3d = get_check_none(results, "verts3d")
gt_objverts3dw = get_check_none(sample, BaseQueries.OBJVERTS3D)
gt_handjoints3dw = get_check_none(sample, BaseQueries.JOINTS3D)
pred_handjoints3dw = get_check_none(results, "recov_joints3d")
row_nb = min(max_rows, batch_size)
if display_centered:
col_nb = 7
else:
col_nb = 4
axes = fig.subplots(row_nb, col_nb)
for row_idx in range(row_nb):
# Column 0
axes[row_idx, 0].imshow(images[row_idx])
axes[row_idx, 0].axis("off")
# Visualize 2D hand joints
if pred_handjoints2d is not None:
visualize_joints_2d(axes[row_idx, 0], pred_handjoints2d[row_idx], alpha=1, joint_idxs=False)
if gt_handjoints2d is not None:
visualize_joints_2d(axes[row_idx, 0], gt_handjoints2d[row_idx], alpha=0.5, joint_idxs=False)
# Column 1
axes[row_idx, 1].imshow(images[row_idx])
axes[row_idx, 1].axis("off")
# Visualize 2D object vertices
if pred_objverts2d is not None:
axes[row_idx, 1].scatter(
pred_objverts2d[row_idx, :, 0], pred_objverts2d[row_idx, :, 1], c="r", s=1, alpha=0.2
)
if gt_objverts2d is not None:
axes[row_idx, 1].scatter(
gt_objverts2d[row_idx, :, 0], gt_objverts2d[row_idx, :, 1], c="b", s=1, alpha=0.02
)
# Visualize 2D object bounding box
if pred_objcorners2d is not None:
visualize_joints_2d(
axes[row_idx, 1],
pred_objcorners2d[row_idx],
alpha=1,
joint_idxs=False,
links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
)
if gt_objcorners2d is not None:
visualize_joints_2d(
axes[row_idx, 1],
gt_objcorners2d[row_idx],
alpha=0.5,
joint_idxs=False,
links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
)
# Visualize some (vertex position) errors for the 2D object vertices
if gt_objverts2d is not None and pred_objverts2d is not None:
idxs = list(range(6))
arrow_nb = len(idxs)
arrows = torch.cat([gt_objverts2d[:, idxs].float(), pred_objverts2d[:, idxs].float()], 1)
links = [[i, i + arrow_nb] for i in range(arrow_nb)]
visualize_joints_2d(
axes[row_idx, 1],
arrows[row_idx],
alpha=0.5,
joint_idxs=False,
links=links,
color=["k"] * arrow_nb,
)
# Column 2
# view from the top
col_idx = 2
# axes[row_idx, col_idx].set_title("rotY: {:.1f}".format(gt_drill_angle_Y[row_idx]))
if gt_objverts3dw is not None:
axes[row_idx, col_idx].scatter(
gt_objverts3dw[row_idx, :, 2], gt_objverts3dw[row_idx, :, 0], c="b", s=1, alpha=0.02
)
if pred_objverts3dw is not None:
axes[row_idx, col_idx].scatter(
pred_objverts3dw[row_idx, :, 2], pred_objverts3dw[row_idx, :, 0], c="r", s=1, alpha=0.02
)
if pred_handjoints3dw is not None:
visualize_joints_2d(
axes[row_idx, col_idx], pred_handjoints3dw[row_idx, :, [2, 0]], alpha=1, joint_idxs=False
)
if gt_handjoints3dw is not None:
visualize_joints_2d(
axes[row_idx, col_idx], gt_handjoints3dw[row_idx, :, [2, 0]], alpha=0.5, joint_idxs=False
)
axes[row_idx, col_idx].invert_yaxis()
# if pred_objcorners3dw is not None:
# visualize_joints_2d(
# axes[row_idx, col_idx],
# pred_objcorners3dw[row_idx],
# alpha=1,
# joint_idxs=False,
# links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
# )
# if gt_objcorners3dw is not None:
# visualize_joints_2d(
# axes[row_idx, col_idx],
# gt_objcorners3dw[row_idx],
# alpha=0.5,
# joint_idxs=False,
# links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
# )
# if pred_objverts3dw is not None and gt_objverts3dw is not None:
# arrow_nb = 6
# arrows = torch.cat([gt_objverts3dw[:, :arrow_nb], pred_objverts3dw[:, :arrow_nb]], 1)
# links = [[i, i + arrow_nb] for i in range(arrow_nb)]
# visualize_joints_2d(
# axes[row_idx, col_idx],
# arrows[row_idx],
# alpha=0.5,
# joint_idxs=False,
# links=links,
# color=["k"] * arrow_nb,
# )
# Column 3
# view from the right
col_idx = 3
# axes[row_idx, col_idx].set_title("rotX: {:.1f}".format(gt_drill_angle_X[row_idx]))
# invert second axis here for more consistent viewpoints
if gt_objverts3dw is not None:
axes[row_idx, col_idx].scatter(
gt_objverts3dw[row_idx, :, 2], -gt_objverts3dw[row_idx, :, 1], c="b", s=1, alpha=0.02
)
if pred_objverts3dw is not None:
axes[row_idx, col_idx].scatter(
pred_objverts3dw[row_idx, :, 2], -pred_objverts3dw[row_idx, :, 1], c="r", s=1, alpha=0.02
)
if pred_handjoints3dw is not None:
pred_handjoints3dw_inv = np.stack([pred_handjoints3dw[:, :, 2], -pred_handjoints3dw[:, :, 1]], axis=-1)
visualize_joints_2d(
axes[row_idx, col_idx], pred_handjoints3dw_inv[row_idx, :, :], alpha=1, joint_idxs=False
)
if gt_handjoints3dw is not None:
gt_handjoints3dw_inv = np.stack([gt_handjoints3dw[:, :, 2], -gt_handjoints3dw[:, :, 1]], axis=-1)
visualize_joints_2d(
axes[row_idx, col_idx], gt_handjoints3dw_inv[row_idx, :, :], alpha=0.5, joint_idxs=False
)
# if pred_objcorners3dw is not None:
# visualize_joints_2d(
# axes[row_idx, col_idx],
# pred_objcorners3dw[row_idx, :, 1:],
# alpha=1,
# joint_idxs=False,
# links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
# )
# if gt_objcorners3dw is not None:
# visualize_joints_2d(
# axes[row_idx, col_idx],
# gt_objcorners3dw[row_idx, :, 1:],
# alpha=0.5,
# joint_idxs=False,
# links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
# )
# if pred_objverts3dw is not None and gt_objverts3dw is not None:
# arrow_nb = 6
# arrows = torch.cat([gt_objverts3dw[:, :arrow_nb, 1:], pred_objverts3dw[:, :arrow_nb, 1:]], 1)
# links = [[i, i + arrow_nb] for i in range(arrow_nb)]
# visualize_joints_2d(
# axes[row_idx, col_idx],
# arrows[row_idx],
# alpha=0.5,
# joint_idxs=False,
# links=links,
# color=["k"] * arrow_nb,
# )
if display_centered:
# Column 4
col_idx = 4
if gt_canobjverts3d is not None:
axes[row_idx, col_idx].scatter(
gt_canobjverts3d[row_idx, :, 0], gt_canobjverts3d[row_idx, :, 1], c="b", s=1, alpha=0.02
)
if pred_objverts3d is not None:
axes[row_idx, col_idx].scatter(
pred_objverts3d[row_idx, :, 0], pred_objverts3d[row_idx, :, 1], c="r", s=1, alpha=0.02
)
if pred_objcorners3d is not None:
visualize_joints_2d(
axes[row_idx, col_idx],
pred_objcorners3d[row_idx],
alpha=1,
joint_idxs=False,
links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
)
if gt_canobjcorners3d is not None:
visualize_joints_2d(
axes[row_idx, col_idx],
gt_canobjcorners3d[row_idx],
alpha=0.5,
joint_idxs=False,
links=[[0, 1, 3, 2], [4, 5, 7, 6], [1, 5], [3, 7], [4, 0], [0, 2, 6, 4]],
)
if pred_objcorners3d is not None and gt_canobjcorners3d is not None:
arrow_nb = 6
arrows = torch.cat([gt_canobjcorners3d[:, :arrow_nb], pred_objcorners3d[:, :arrow_nb]], 1)
links = [[i, i + arrow_nb] for i in range(arrow_nb)]
visualize_joints_2d(
axes[row_idx, col_idx],
arrows[row_idx],
alpha=0.5,
joint_idxs=False,
links=links,
color=["k"] * arrow_nb,
)
axes[row_idx, col_idx].set_aspect("equal")
axes[row_idx, col_idx].invert_yaxis()
# Column 5
col_idx = 5
if gt_objverts3d is not None:
axes[row_idx, col_idx].scatter(
gt_objverts3d[row_idx, :, 0], gt_objverts3d[row_idx, :, 1], c="b", s=1, alpha=0.02
)
# if pred_objverts3d is not None:
# axes[row_idx, 2].scatter(
# pred_objverts3d[row_idx, :, 0], pred_objverts3d[row_idx, :, 1], c="r", s=1, alpha=0.02
# )
if gt_handverts3d is not None:
axes[row_idx, col_idx].scatter(
gt_handverts3d[row_idx, :, 0], gt_handverts3d[row_idx, :, 1], c="g", s=1, alpha=0.2
)
if pred_handverts3d is not None:
axes[row_idx, col_idx].scatter(
pred_handverts3d[row_idx, :, 0], pred_handverts3d[row_idx, :, 1], c="c", s=1, alpha=0.2
)
if pred_handjoints3d is not None:
visualize_joints_2d(
axes[row_idx, col_idx], pred_handjoints3d[row_idx], alpha=1, joint_idxs=False
)
if gt_handjoints3d is not None:
visualize_joints_2d(
axes[row_idx, col_idx], gt_handjoints3d[row_idx], alpha=0.5, joint_idxs=False
)
axes[row_idx, col_idx].invert_yaxis()
# Column 6
col_idx = 6
if gt_objverts3d is not None:
axes[row_idx, col_idx].scatter(
gt_objverts3d[row_idx, :, 1], gt_objverts3d[row_idx, :, 2], c="b", s=1, alpha=0.02
)
# if pred_objverts3d is not None:
# axes[row_idx, 3].scatter(
# pred_objverts3d[row_idx, :, 1], pred_objverts3d[row_idx, :, 2], c="r", s=1, alpha=0.02
# )
if gt_handverts3d is not None:
axes[row_idx, col_idx].scatter(
gt_handverts3d[row_idx, :, 1], gt_handverts3d[row_idx, :, 2], c="g", s=1, alpha=0.2
)
if pred_handverts3d is not None:
axes[row_idx, col_idx].scatter(
pred_handverts3d[row_idx, :, 1], pred_handverts3d[row_idx, :, 2], c="c", s=1, alpha=0.2
)
if pred_handjoints3d is not None:
visualize_joints_2d(
axes[row_idx, col_idx], pred_handjoints3d[row_idx][:, 1:], alpha=1, joint_idxs=False
)
if gt_handjoints3d is not None:
visualize_joints_2d(
axes[row_idx, col_idx], gt_handjoints3d[row_idx][:, 1:], alpha=0.5, joint_idxs=False
)
consistdisplay.squashfig(fig)
fig.savefig(save_img_path, dpi=300)
| 46.401294
| 119
| 0.522597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,204
| 0.223462
|
e3e49196e82b3c1f79806bdd2aeb6e1bcf532ba4
| 3,185
|
py
|
Python
|
api_app/api/dependencies/workspaces.py
|
gauravagrwal/AzureTRE
|
f3cb1e40e4926f8b196add807b05abec46bb36fc
|
[
"MIT"
] | 71
|
2021-03-04T15:10:18.000Z
|
2022-03-29T16:37:37.000Z
|
api_app/api/dependencies/workspaces.py
|
gauravagrwal/AzureTRE
|
f3cb1e40e4926f8b196add807b05abec46bb36fc
|
[
"MIT"
] | 1,498
|
2021-03-05T07:28:00.000Z
|
2022-03-31T16:28:06.000Z
|
api_app/api/dependencies/workspaces.py
|
gauravagrwal/AzureTRE
|
f3cb1e40e4926f8b196add807b05abec46bb36fc
|
[
"MIT"
] | 60
|
2021-04-30T10:09:26.000Z
|
2022-03-30T12:39:27.000Z
|
from fastapi import Depends, HTTPException, Path, status
from pydantic import UUID4
from api.dependencies.database import get_repository
from db.errors import EntityDoesNotExist, ResourceIsNotDeployed
from db.repositories.user_resources import UserResourceRepository
from db.repositories.workspace_services import WorkspaceServiceRepository
from db.repositories.workspaces import WorkspaceRepository
from models.domain.user_resource import UserResource
from models.domain.workspace import Workspace
from models.domain.workspace_service import WorkspaceService
from resources import strings
def get_workspace_by_id(workspace_id: UUID4, workspaces_repo) -> Workspace:
try:
return workspaces_repo.get_workspace_by_id(workspace_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_DOES_NOT_EXIST)
async def get_workspace_by_id_from_path(workspace_id: UUID4 = Path(...), workspaces_repo=Depends(get_repository(WorkspaceRepository))) -> Workspace:
return get_workspace_by_id(workspace_id, workspaces_repo)
async def get_deployed_workspace_by_id_from_path(workspace_id: UUID4 = Path(...), workspaces_repo=Depends(get_repository(WorkspaceRepository))) -> Workspace:
try:
return workspaces_repo.get_deployed_workspace_by_id(workspace_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_DOES_NOT_EXIST)
except ResourceIsNotDeployed:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=strings.WORKSPACE_IS_NOT_DEPLOYED)
async def get_workspace_service_by_id_from_path(workspace_id: UUID4 = Path(...), service_id: UUID4 = Path(...), workspace_services_repo=Depends(get_repository(WorkspaceServiceRepository))) -> WorkspaceService:
try:
return workspace_services_repo.get_workspace_service_by_id(workspace_id, service_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_SERVICE_DOES_NOT_EXIST)
async def get_deployed_workspace_service_by_id_from_path(workspace_id: UUID4 = Path(...), service_id: UUID4 = Path(...), workspace_services_repo=Depends(get_repository(WorkspaceServiceRepository))) -> WorkspaceService:
try:
return workspace_services_repo.get_deployed_workspace_service_by_id(workspace_id, service_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_SERVICE_DOES_NOT_EXIST)
except ResourceIsNotDeployed:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=strings.WORKSPACE_SERVICE_IS_NOT_DEPLOYED)
async def get_user_resource_by_id_from_path(workspace_id: UUID4 = Path(...), service_id: UUID4 = Path(...), resource_id: UUID4 = Path(...), user_resource_repo=Depends(get_repository(UserResourceRepository))) -> UserResource:
try:
return user_resource_repo.get_user_resource_by_id(workspace_id, service_id, resource_id)
except EntityDoesNotExist:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.USER_RESOURCE_DOES_NOT_EXIST)
| 56.875
| 224
| 0.82292
| 0
| 0
| 0
| 0
| 0
| 0
| 2,288
| 0.718367
| 0
| 0
|
e3e5cb6c2267ca3e81be3aad88376455fe125b55
| 14,828
|
py
|
Python
|
nnef_tools/conversion/tensorflow/tf_pb_to_tf_py.py
|
rgiduthuri/NNEF-Tools
|
8a9971f897fb5a110dd254e0c20077213f257700
|
[
"Apache-2.0"
] | null | null | null |
nnef_tools/conversion/tensorflow/tf_pb_to_tf_py.py
|
rgiduthuri/NNEF-Tools
|
8a9971f897fb5a110dd254e0c20077213f257700
|
[
"Apache-2.0"
] | null | null | null |
nnef_tools/conversion/tensorflow/tf_pb_to_tf_py.py
|
rgiduthuri/NNEF-Tools
|
8a9971f897fb5a110dd254e0c20077213f257700
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
import typing
from functools import partial
import numpy as np
import six
from nnef_tools.conversion import shape_fixer
from nnef_tools.core import utils
from nnef_tools.io.tensorflow.tf_graph import *
from nnef_tools.io.tensorflow.tf_pb import tf_pb_eval, tf_pb_shape_inference
_tf_py_dtype_by_tf_pb_dtype = {
'DT_INVALID': None,
'DT_HALF': 'float16',
'DT_FLOAT': 'float32',
'DT_DOUBLE': 'float64',
'DT_INT8': 'int8',
'DT_INT16': 'int16',
'DT_INT32': 'int32',
'DT_INT64': 'int64',
'DT_UINT8': 'uint8',
'DT_UINT16': 'uint16',
'DT_UINT32': 'uint32',
'DT_UINT64': 'uint64',
'DT_BOOL': 'bool',
'DT_STRING': 'string',
'DT_COMPLEX64': 'complex64',
'DT_COMPLEX128': 'complex128',
}
def _evaluate_constant(tf_tensor):
# type: (TFTensor)->np.ndarray
# noinspection PySimplifyBooleanCheck
if tf_tensor.data == []:
return np.array([], dtype=np.dtype(tf_tensor.dtype))
value = np.array(tf_tensor.data, dtype=np.dtype(tf_tensor.dtype))
last_val = value.flat[-1]
value2 = np.full(shape=tf_tensor.shape, fill_value=last_val, dtype=np.dtype(tf_tensor.dtype))
value2.flat[:value.size] = value.flat
return value2
# noinspection PyProtectedMember
def evaluate_and_convert(tf_graph, source_shapes=None):
# type: (TFGraph, typing.Union[typing.Dict[str, typing.List[int]], typing.List[int], int, None])->None
tf_graph.sort()
if isinstance(source_shapes, dict):
source_shapes = {(k + ':0' if ':' not in k else k): v for k, v in six.iteritems(source_shapes)}
shape_fixer.fix_input_shapes(tf_graph, source_shapes)
const_value_by_tensor = {}
for tensor in tf_graph.tensors:
if tensor.is_constant:
const_value_by_tensor[tensor] = tf_pb_eval._evaluate_constant(tensor)
elif tensor.is_variable:
const_value_by_tensor[tensor] = tensor.data
for op in tf_graph.operations:
# Shape prop
if op.name not in tf_pb_shape_inference._DefaultPropagators:
raise utils.NNEFToolsException("Operation '{}' is not supported".format(op.name))
propagated_shapes, propagated_dtypes = \
tf_pb_shape_inference._DefaultPropagators[op.name](op, const_value_by_tensor)
assert not utils.has_le_0(propagated_shapes)
assert len(propagated_shapes) == len(propagated_dtypes) == len(op.outputs)
for new_shape, new_dtype, tensor in zip(propagated_shapes, propagated_dtypes, op.outputs):
assert utils.compatible_shapes(tensor.shape, new_shape)
tensor.shape = new_shape
assert tensor.dtype is None or tensor.dtype == new_dtype
tensor.dtype = new_dtype
# Evaluation
if op.name in tf_pb_eval._DefaultOpEvaluators:
tf_pb_eval._DefaultOpEvaluators[op.name](op, const_value_by_tensor)
# Conversion
assert op.name in DefaultConverters, "No tf_pb_to_tf_py converter for {}".format(op.name)
DefaultConverters[op.name](op, const_value_by_tensor)
for tensor in tf_graph.tensors:
tensor.dtype = _tf_py_dtype_by_tf_pb_dtype.get(tensor.dtype, None)
for tensor in tf_graph.tensors:
if tensor.is_variable:
label = tensor.name
if label is not None:
if label.endswith(':0'):
label = label[:-2]
label = label.replace(':', '_')
tensor.label = label
def fix_types(list_):
# type: (typing.Any)->typing.Any
if isinstance(list_, list) and len(list_) >= 1 and utils.is_anyint(list_[0]):
list_ = [utils.anyint_to_int(i) for i in list_]
return list_
def generic_converter(op, # type: TFOperation
const_value_by_tensor, # type: typing.Dict[TFTensor, np.ndarray]
target_name, # type: str
attrib_name_dict=None, # type: typing.Optional[typing.Dict[str, str]]
input_to_attrib_dict=None, # type: typing.Optional[typing.Dict[int, str]]
revert_inputs=False, # type: bool
new_attribs=None, # type: typing.Optional[typing.Dict[str, typing.Any]]
list_attribs=None, # type: typing.List[str]
):
# type: (...)->None
op.name = target_name
if attrib_name_dict:
attribs = {}
for k, v in six.iteritems(op.attribs):
if k in attrib_name_dict:
attribs[attrib_name_dict[k]] = v
else:
attribs[k] = v
op.attribs = attribs
if input_to_attrib_dict:
inputs = []
for i in range(len(op.inputs)):
if i in input_to_attrib_dict:
assert "{}.{} not evaluated to constant".format(op.name, input_to_attrib_dict[i])
op.attribs[input_to_attrib_dict[i]] = fix_types(const_value_by_tensor[op.inputs[i]].tolist())
elif (i - len(op.inputs)) in input_to_attrib_dict:
assert "{}.{} not evaluated to constant".format(op.name, input_to_attrib_dict[i - len(op.inputs)])
op.attribs[input_to_attrib_dict[i - len(op.inputs)]] = fix_types(
const_value_by_tensor[op.inputs[i]].tolist())
else:
inputs.append(op.inputs[i])
op.inputs = tuple(inputs)
if revert_inputs:
op.inputs = tuple(reversed(op.inputs))
if new_attribs:
op.attribs.update(new_attribs)
if list_attribs:
op.attribs = {k: [v] if k in list_attribs and not isinstance(v, (list, tuple)) else v
for k, v in six.iteritems(op.attribs)}
def convert_cast(op, const_value_by_tensor):
# type: (TFOperation, typing.Dict[TFTensor, np.ndarray])->None
op.name = "tf.cast"
op.attribs['dtype'] = _tf_py_dtype_by_tf_pb_dtype[op.attribs['DstT']]
# See: https://www.tensorflow.org/api_docs/cc/
DefaultConverters = {
# attribless:
"Abs": partial(generic_converter, target_name="tf.abs"),
"Add": partial(generic_converter, target_name="tf.add"),
"BatchToSpaceND": partial(generic_converter, target_name="tf.batch_to_space"),
"BiasAdd": partial(generic_converter, target_name="tf.nn.bias_add"),
"Ceil": partial(generic_converter, target_name="tf.ceil"),
"Elu": partial(generic_converter, target_name="tf.nn.elu"),
"Equal": partial(generic_converter, target_name="tf.equal"),
"Exp": partial(generic_converter, target_name="tf.exp"),
"Floor": partial(generic_converter, target_name="tf.floor"),
"Greater": partial(generic_converter, target_name="tf.greater"),
"GreaterEqual": partial(generic_converter, target_name="tf.greater_equal"),
"Identity": partial(generic_converter, target_name="tf.identity"),
"LeakyRelu": partial(generic_converter, target_name="tf.nn.leaky_relu"),
"Less": partial(generic_converter, target_name="tf.less"),
"LessEqual": partial(generic_converter, target_name="tf.less_equal"),
"Log": partial(generic_converter, target_name="tf.log"),
"LogicalAnd": partial(generic_converter, target_name="tf.logical_and"),
"LogicalNot": partial(generic_converter, target_name="tf.logical_not"),
"LogicalOr": partial(generic_converter, target_name="tf.logical_or"),
"Maximum": partial(generic_converter, target_name="tf.maximum"),
"Minimum": partial(generic_converter, target_name="tf.minimum"),
"Mul": partial(generic_converter, target_name="tf.multiply"),
"Neg": partial(generic_converter, target_name="tf.negative"),
"NotEqual": partial(generic_converter, target_name="tf.not_equal"),
"Pow": partial(generic_converter, target_name="tf.pow"),
"RealDiv": partial(generic_converter, target_name="tf.divide"),
"Relu": partial(generic_converter, target_name="tf.nn.relu"),
"Relu6": partial(generic_converter, target_name="tf.nn.relu6"),
"Round": partial(generic_converter, target_name="tf.round"),
"Rsqrt": partial(generic_converter, target_name="tf.rsqrt"),
"Sigmoid": partial(generic_converter, target_name="tf.nn.sigmoid"),
"Sign": partial(generic_converter, target_name="tf.sign"),
"Softmax": partial(generic_converter, target_name="tf.nn.softmax", new_attribs={'axis': -1}),
"Softplus": partial(generic_converter, target_name="tf.nn.softplus"),
"Softsign": partial(generic_converter, target_name="tf.nn.softsign"),
"Sqrt": partial(generic_converter, target_name="tf.sqrt"),
"Square": partial(generic_converter, target_name="tf.square"),
"Sub": partial(generic_converter, target_name="tf.subtract"),
"Tanh": partial(generic_converter, target_name="tf.nn.tanh"),
"Select": partial(generic_converter, target_name="tf.where"),
'ClipByValue': partial(generic_converter, target_name='tf.clip_by_value'),
# more complex:
"AvgPool": partial(generic_converter, target_name="tf.nn.avg_pool"),
"Conv2D": partial(generic_converter, target_name="tf.nn.conv2d"),
"Conv3D": partial(generic_converter, target_name="tf.nn.conv3d"),
"Conv2DBackpropInput": partial(generic_converter,
target_name="tf.nn.conv2d_transpose",
input_to_attrib_dict={0: "output_shape"},
revert_inputs=True),
"Conv3DBackpropInputV2": partial(generic_converter,
target_name="tf.nn.conv3d_transpose",
input_to_attrib_dict={0: "output_shape"},
revert_inputs=True),
# "CudnnRNN": None,
"DepthwiseConv2dNative": partial(generic_converter, target_name="tf.nn.depthwise_conv2d_native"),
"FusedBatchNorm": partial(generic_converter, target_name="tf.nn.fused_batch_norm"),
"LRN": partial(generic_converter, target_name="tf.nn.lrn"),
"MatMul": partial(generic_converter, target_name="tf.matmul"),
"MaxPool": partial(generic_converter, target_name="tf.nn.max_pool"),
"MaxPoolWithArgmax": partial(generic_converter, target_name="tf.nn.max_pool_with_argmax"),
"Pack": partial(generic_converter, target_name="tf.stack"),
# "Placeholder": None,
# "PlaceholderWithDefault": None,
"Shape": partial(generic_converter, target_name="tf.shape"),
"Squeeze": partial(generic_converter, target_name="tf.squeeze", attrib_name_dict={"squeeze_dims": "axis"}),
# even more complex:
"ExpandDims": partial(generic_converter, target_name="tf.expand_dims", input_to_attrib_dict={1: "axis"}),
"ArgMin": partial(generic_converter, target_name="tf.argmin", input_to_attrib_dict={1: "axis"}),
"ArgMax": partial(generic_converter, target_name="tf.argmax", input_to_attrib_dict={1: "axis"}),
"Max": partial(generic_converter, target_name="tf.reduce_max", attrib_name_dict={"keep_dims": "keepdims"},
input_to_attrib_dict={1: "axis"}, list_attribs=['axis']),
"Min": partial(generic_converter, target_name="tf.reduce_min", attrib_name_dict={"keep_dims": "keepdims"},
input_to_attrib_dict={1: "axis"}, list_attribs=['axis']),
"Mean": partial(generic_converter, target_name="tf.reduce_mean", attrib_name_dict={"keep_dims": "keepdims"},
input_to_attrib_dict={1: "axis"}, list_attribs=['axis']),
"ConcatV2": partial(generic_converter, target_name="tf.concat", input_to_attrib_dict={-1: "axis"}),
"Pad": partial(generic_converter,
target_name="tf.pad",
input_to_attrib_dict={1: "paddings"},
new_attribs={'mode': 'CONSTANT',
'constant_values': 0.0}),
"MirrorPad": partial(generic_converter,
target_name="tf.pad",
input_to_attrib_dict={1: "paddings"},
new_attribs={'constant_values': 0.0}),
"Reshape": partial(generic_converter, target_name="tf.reshape", input_to_attrib_dict={1: "shape"}),
"ResizeArea": partial(generic_converter, target_name="tf.image.resize_area", input_to_attrib_dict={1: "size"}),
"ResizeBilinear": partial(generic_converter,
target_name="tf.image.resize_bilinear",
input_to_attrib_dict={1: "size"}),
"ResizeNearestNeighbor": partial(generic_converter,
target_name="tf.image.resize_nearest_neighbor",
input_to_attrib_dict={1: "size"}),
"Slice": partial(generic_converter, target_name="tf.slice", input_to_attrib_dict={1: "begin", 2: "size"}),
"SpaceToBatchND": partial(generic_converter, target_name="tf.space_to_batch"),
"Split": partial(generic_converter,
target_name="tf.split",
attrib_name_dict={'num_split': 'num_or_size_splits'},
input_to_attrib_dict={0: "axis"}),
"SplitV": partial(generic_converter,
target_name="tf.split",
input_to_attrib_dict={1: "num_or_size_splits", 2: "axis"}),
"StridedSlice": partial(generic_converter,
target_name="tf.strided_slice",
input_to_attrib_dict={1: "begin", 2: "end", 3: "strides"}),
"Sum": partial(generic_converter, target_name="tf.reduce_sum", input_to_attrib_dict={1: "axis"},
attrib_name_dict={"keep_dims": "keepdims"}, list_attribs=['axis']),
"Transpose": partial(generic_converter, target_name="tf.transpose", input_to_attrib_dict={1: "perm"}),
"Tile": partial(generic_converter, target_name="tf.tile", input_to_attrib_dict={1: "multiples"}),
"Cast": convert_cast,
"Sin": partial(generic_converter, target_name="tf.sin"),
"Cos": partial(generic_converter, target_name="tf.cos"),
"Any": partial(generic_converter, target_name="tf.reduce_any", attrib_name_dict={"keep_dims": "keepdims"},
input_to_attrib_dict={1: "axis"}, list_attribs=['axis']),
"All": partial(generic_converter, target_name="tf.reduce_all", attrib_name_dict={"keep_dims": "keepdims"},
input_to_attrib_dict={1: "axis"}, list_attribs=['axis']),
}
| 50.435374
| 115
| 0.658956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,149
| 0.279808
|
e3e5d7bb420ff6920778e91d10161cbdad69e4fa
| 1,015
|
py
|
Python
|
CS1410/p5test.py
|
Davidjbennett/DavidBennett.github.io
|
09a2652b7ace8741bf23c6432abd58ee790b9f0c
|
[
"MIT"
] | 3
|
2021-05-18T16:17:29.000Z
|
2022-01-20T15:46:59.000Z
|
CS1410/p5test.py
|
Davidjbennett/DavidBennett
|
09a2652b7ace8741bf23c6432abd58ee790b9f0c
|
[
"MIT"
] | null | null | null |
CS1410/p5test.py
|
Davidjbennett/DavidBennett
|
09a2652b7ace8741bf23c6432abd58ee790b9f0c
|
[
"MIT"
] | null | null | null |
import unittest
from payroll import *
class P2Test(unittest.TestCase):
def setUp(self):
self.emp = payroll.Employee('12-3456789', 'John', 'Doe', '123 Anystreet', 'Anytown', 'Anystate', '98765')
def testHourly(self):
rate = 35.5
self.emp.make_hourly(rate)
for d in range(10):
self.emp.classification.add_timecard(4.0 + d*0.5)
self.assertEqual(self.emp.classification.compute_pay(), 62.5*rate)
# def testSalaried(self):
# salary = 10100.0
# self.emp.make_salaried(salary)
# self.assertEqual(self.emp.classification.compute_pay(), round(salary/24, 2))
# def testCommissioned(self):
# salary = 50000.0
# rate = 25
# self.emp.make_commissioned(salary, rate)
# for d in range(5):
# self.emp.classification.add_receipt(400.0 + d*25)
# self.assertEqual(self.emp.classification.compute_pay(), round(salary/24+2250.0*rate/100.0, 2))
if __name__ == '__main__':
unittest.main()
| 39.038462
| 113
| 0.629557
| 928
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 534
| 0.526108
|
e3e6303d7750f26636e0532318d99a61631c9c10
| 17,884
|
py
|
Python
|
EXOSIMS/Completeness/BrownCompleteness.py
|
dgarrett622/EXOSIMS
|
ce41adc8c162b6330eb9cefee83f3a395bcff614
|
[
"BSD-3-Clause"
] | null | null | null |
EXOSIMS/Completeness/BrownCompleteness.py
|
dgarrett622/EXOSIMS
|
ce41adc8c162b6330eb9cefee83f3a395bcff614
|
[
"BSD-3-Clause"
] | 2
|
2016-08-13T18:39:39.000Z
|
2020-06-26T00:18:37.000Z
|
EXOSIMS/Completeness/BrownCompleteness.py
|
douglase/EXOSIMS
|
ce41adc8c162b6330eb9cefee83f3a395bcff614
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import time
import numpy as np
from scipy import interpolate
import astropy.units as u
import astropy.constants as const
import os, inspect
try:
import cPickle as pickle
except:
import pickle
import hashlib
from EXOSIMS.Prototypes.Completeness import Completeness
from EXOSIMS.util.eccanom import eccanom
from EXOSIMS.util.deltaMag import deltaMag
class BrownCompleteness(Completeness):
"""Completeness class template
This class contains all variables and methods necessary to perform
Completeness Module calculations in exoplanet mission simulation.
Args:
\*\*specs:
user specified values
Attributes:
minComp (float):
Minimum completeness level for detection
Nplanets (integer):
Number of planets for initial completeness Monte Carlo simulation
classpath (string):
Path on disk to Brown Completeness
filename (string):
Name of file where completeness interpolant is stored
visits (ndarray):
Number of observations corresponding to each star in the target list
(initialized in gen_update)
updates (nx5 ndarray):
Completeness values of successive observations of each star in the
target list (initialized in gen_update)
"""
def __init__(self, Nplanets=1e8, **specs):
# bring in inherited Completeness prototype __init__ values
Completeness.__init__(self, **specs)
# Number of planets to sample
self.Nplanets = int(Nplanets)
# get path to completeness interpolant stored in a pickled .comp file
self.classpath = os.path.split(inspect.getfile(self.__class__))[0]
self.filename = specs['modules']['PlanetPopulation']
atts = ['arange','erange','prange','Rprange','Mprange','scaleOrbits','constrainOrbits']
extstr = ''
for att in atts:
extstr += '%s: ' % att + str(getattr(self.PlanetPopulation, att)) + ' '
ext = hashlib.md5(extstr).hexdigest()
self.filename += ext
def target_completeness(self, targlist):
"""Generates completeness values for target stars
This method is called from TargetList __init__ method.
Args:
targlist (TargetList):
TargetList class object
Returns:
comp0 (ndarray):
1D numpy array of completeness values for each target star
"""
# set up "ensemble visit photometric and obscurational completeness"
# interpolant for initial completeness values
# bins for interpolant
bins = 1000
# xedges is array of separation values for interpolant
xedges = np.linspace(0., self.PlanetPopulation.rrange[1].value, bins)*\
self.PlanetPopulation.arange.unit
xedges = xedges.to('AU').value
# yedges is array of delta magnitude values for interpolant
ymin = np.round((-2.5*np.log10(self.PlanetPopulation.prange[1]*\
(self.PlanetPopulation.Rprange[1]/(self.PlanetPopulation.rrange[0]))\
.decompose().value**2)))
ymax = np.round((-2.5*np.log10(self.PlanetPopulation.prange[0]*\
(self.PlanetPopulation.Rprange[0]/(self.PlanetPopulation.rrange[1]))\
.decompose().value**2*1e-11)))
yedges = np.linspace(ymin, ymax, bins)
# number of planets for each Monte Carlo simulation
nplan = int(np.min([1e6,self.Nplanets]))
# number of simulations to perform (must be integer)
steps = int(self.Nplanets/nplan)
# path to 2D completeness pdf array for interpolation
Cpath = os.path.join(self.classpath, self.filename+'.comp')
Cpdf, xedges2, yedges2 = self.genC(Cpath, nplan, xedges, yedges, steps)
EVPOCpdf = interpolate.RectBivariateSpline(xedges, yedges, Cpdf.T)
EVPOC = np.vectorize(EVPOCpdf.integral)
# calculate separations based on IWA
smin = np.tan(targlist.OpticalSystem.IWA)*targlist.dist
if np.isinf(targlist.OpticalSystem.OWA):
smax = xedges[-1]*u.AU
else:
smax = np.tan(targlist.OpticalSystem.OWA)*targlist.dist
# calculate dMags based on limiting dMag
dMagmax = targlist.OpticalSystem.dMagLim #np.array([targlist.OpticalSystem.dMagLim]*targlist.nStars)
dMagmin = ymin
if self.PlanetPopulation.scaleOrbits:
L = np.where(targlist.L>0, targlist.L, 1e-10) #take care of zero/negative values
smin = smin/np.sqrt(L)
smax = smax/np.sqrt(L)
dMagmin -= 2.5*np.log10(L)
dMagmax -= 2.5*np.log10(L)
comp0 = EVPOC(smin.to('AU').value, smax.to('AU').value, dMagmin, dMagmax)
return comp0
def gen_update(self, targlist):
"""Generates dynamic completeness values for multiple visits of each
star in the target list
Args:
targlist (TargetList):
TargetList module
"""
print 'Beginning completeness update calculations'
self.visits = np.array([0]*targlist.nStars)
self.updates = []
# number of planets to simulate
nplan = int(2e4)
# normalization time
dt = 1e9*u.day
# sample quantities which do not change in time
a = self.PlanetPopulation.gen_sma(nplan) # AU
e = self.PlanetPopulation.gen_eccen(nplan)
I = self.PlanetPopulation.gen_I(nplan) # deg
O = self.PlanetPopulation.gen_O(nplan) # deg
w = self.PlanetPopulation.gen_w(nplan) # deg
p = self.PlanetPopulation.gen_albedo(nplan)
Rp = self.PlanetPopulation.gen_radius(nplan) # km
Mp = self.PlanetPopulation.gen_mass(nplan) # kg
rmax = a*(1.+e)
rmin = a*(1.-e)
# sample quantity which will be updated
M = np.random.uniform(high=2.*np.pi,size=nplan)
newM = np.zeros((nplan,))
# population values
smin = (np.tan(targlist.OpticalSystem.IWA)*targlist.dist).to('AU')
if np.isfinite(targlist.OpticalSystem.OWA):
smax = (np.tan(targlist.OpticalSystem.OWA)*targlist.dist).to('AU')
else:
smax = np.array([np.max(self.PlanetPopulation.arange.to('AU').value)*\
(1.+np.max(self.PlanetPopulation.erange))]*targlist.nStars)*u.AU
# fill dynamic completeness values
for sInd in xrange(targlist.nStars):
Mstar = targlist.MsTrue[sInd]*const.M_sun
# remove rmax < smin and rmin > smax
inside = np.where(rmax > smin[sInd])[0]
outside = np.where(rmin < smax[sInd])[0]
pInds = np.intersect1d(inside,outside)
dynamic = []
# calculate for 5 successive observations
for num in xrange(5):
if not pInds.any():
dynamic.append(0.)
break
# find Eccentric anomaly
if num == 0:
E = eccanom(M[pInds],e[pInds])
newM[pInds] = M[pInds]
else:
E = eccanom(newM[pInds],e[pInds])
r = a[pInds]*(1.-e[pInds]*np.cos(E))
r1 = r*(np.cos(E) - e[pInds])
r1 = np.hstack((r1.reshape(len(r1),1), r1.reshape(len(r1),1), r1.reshape(len(r1),1)))
r2 = (r*np.sin(E)*np.sqrt(1. - e[pInds]**2))
r2 = np.hstack((r2.reshape(len(r2),1), r2.reshape(len(r2),1), r2.reshape(len(r2),1)))
a1 = np.cos(O[pInds])*np.cos(w[pInds]) - np.sin(O[pInds])*np.sin(w[pInds])*np.cos(I[pInds])
a2 = np.sin(O[pInds])*np.cos(w[pInds]) + np.cos(O[pInds])*np.sin(w[pInds])*np.cos(I[pInds])
a3 = np.sin(w[pInds])*np.sin(I[pInds])
A = np.hstack((a1.reshape(len(a1),1), a2.reshape(len(a2),1), a3.reshape(len(a3),1)))
b1 = -np.cos(O[pInds])*np.sin(w[pInds]) - np.sin(O[pInds])*np.cos(w[pInds])*np.cos(I[pInds])
b2 = -np.sin(O[pInds])*np.sin(w[pInds]) + np.cos(O[pInds])*np.cos(w[pInds])*np.cos(I[pInds])
b3 = np.cos(w[pInds])*np.sin(I[pInds])
B = np.hstack((b1.reshape(len(b1),1), b2.reshape(len(b2),1), b3.reshape(len(b3),1)))
# planet position, planet-star distance, apparent separation
r = (A*r1 + B*r2)*u.AU # position vector
d = np.sqrt(np.sum(r**2, axis=1)) # planet-star distance
s = np.sqrt(np.sum(r[:,0:2]**2, axis=1)) # apparent separation
beta = np.arccos(r[:,2]/d) # phase angle
Phi = self.PlanetPhysicalModel.calc_Phi(beta) # phase function
dMag = deltaMag(p[pInds],Rp[pInds],d,Phi) # difference in magnitude
toremoves = np.where((s > smin[sInd]) & (s < smax[sInd]))[0]
toremovedmag = np.where(dMag < targlist.OpticalSystem.dMagLim)[0]
toremove = np.intersect1d(toremoves, toremovedmag)
pInds = np.delete(pInds, toremove)
if num == 0:
dynamic.append(targlist.comp0[sInd])
else:
dynamic.append(float(len(toremove))/nplan)
# update M
mu = const.G*(Mstar+Mp[pInds])
n = np.sqrt(mu/a[pInds]**3)
newM[pInds] = (newM[pInds] + n*dt)/(2*np.pi) % 1 * 2.*np.pi
self.updates.append(dynamic)
if (sInd+1) % 50 == 0:
print 'stars: %r / %r' % (sInd+1,targlist.nStars)
self.updates = np.array(self.updates)
print 'Completeness update calculations finished'
def completeness_update(self, sInd, targlist, obsbegin, obsend, nexttime):
"""Updates completeness value for stars previously observed
Args:
sInd (integer):
Index of star just observed
targlist (TargetList):
TargetList class module
obsbegin (astropy Quantity):
Time of observation begin in units of day
obsend (astropy Quantity):
Time of observation end in units of day
nexttime (astropy Quantity):
Time of next observational period in units of day
Returns:
comp0 (ndarray):
Completeness values for each star in the target list
"""
self.visits[sInd] += 1
if self.visits[sInd] > len(self.updates[sInd])-1:
targlist.comp0[sInd] = self.updates[sInd][-1]
else:
targlist.comp0[sInd] = self.updates[sInd][self.visits[sInd]]
return targlist.comp0
def genC(self, Cpath, nplan, xedges, yedges, steps):
"""Gets completeness interpolant for initial completeness
This function either loads a completeness .comp file based on specified
Planet Population module or performs Monte Carlo simulations to get
the 2D completeness values needed for interpolation.
Args:
Cpath (string):
path to 2D completeness value array
nplan (float):
number of planets used in each simulation
xedges (ndarray):
1D numpy ndarray of x edge of 2d histogram (separation)
yedges (ndarray):
1D numpy ndarray of y edge of 2d histogram (dMag)
steps (integer):
number of simulations to perform
Returns:
H (ndarray):
2D numpy ndarray of completeness probability density values
"""
# if the 2D completeness pdf array exists as a .comp file load it
if os.path.exists(Cpath):
print 'Loading cached completeness file from "%s".' % Cpath
H = pickle.load(open(Cpath, 'rb'))
print 'Completeness loaded from cache.'
#h, xedges, yedges = self.hist(nplan, xedges, yedges)
else:
# run Monte Carlo simulation and pickle the resulting array
print 'Cached completeness file not found at "%s".' % Cpath
print 'Beginning Monte Carlo completeness calculations.'
t0, t1 = None, None # keep track of per-iteration time
for i in xrange(steps):
t0, t1 = t1, time.time()
if t0 is None:
delta_t_msg = '' # no message
else:
delta_t_msg = '[%.3f s/iteration]' % (t1 - t0)
print 'Completeness iteration: %5d / %5d %s' % (i+1, steps, delta_t_msg)
# get completeness histogram
h, xedges, yedges = self.hist(nplan, xedges, yedges)
if i == 0:
H = h
else:
H += h
H = H/(self.Nplanets*(xedges[1]-xedges[0])*(yedges[1]-yedges[0]))
# store 2D completeness pdf array as .comp file
pickle.dump(H, open(Cpath, 'wb'))
print 'Monte Carlo completeness calculations finished'
print '2D completeness array stored in %r' % Cpath
return H, xedges, yedges
def hist(self, nplan, xedges, yedges):
"""Returns completeness histogram for Monte Carlo simulation
This function uses the inherited Planet Population module.
Args:
nplan (float):
Number of planets used
xedges (ndarray):
1D numpy ndarray of x edge of 2d histogram (separation)
yedges (ndarray):
1D numpy ndarray of y edge of 2d histogram (dMag)
Returns:
h (ndarray):
2D numpy ndarray containing completeness histogram
"""
s, dMag = self.genplans(nplan)
# get histogram
h, yedges, xedges = np.histogram2d(dMag, s.to('AU').value, bins=1000, \
range=[[yedges.min(), yedges.max()], [xedges.min(), xedges.max()]])
return h, xedges, yedges
def genplans(self, nplan):
"""Generates planet data needed for Monte Carlo simulation
Args:
nplan (integer):
Number of planets
Returns:
s (astropy Quantity array):
Planet apparent separations in units of AU
dMag (ndarray):
Difference in brightness
"""
nplan = int(nplan)
# sample uniform distribution of mean anomaly
M = np.random.uniform(high=2.*np.pi,size=nplan)
# sample semi-major axis
a = self.PlanetPopulation.gen_sma(nplan).to('AU').value
# sample other necessary orbital parameters
if np.sum(self.PlanetPopulation.erange) == 0:
# all circular orbits
r = a
e = 0.
E = M
else:
# sample eccentricity
if self.PlanetPopulation.constrainOrbits:
e = self.PlanetPopulation.gen_eccen_from_sma(nplan,a*u.AU)
else:
e = self.PlanetPopulation.gen_eccen(nplan)
# Newton-Raphson to find E
E = eccanom(M,e)
# orbital radius
r = a*(1-e*np.cos(E))
# orbit angle sampling
O = self.PlanetPopulation.gen_O(nplan).to('rad').value
w = self.PlanetPopulation.gen_w(nplan).to('rad').value
I = self.PlanetPopulation.gen_I(nplan).to('rad').value
r1 = r*(np.cos(E) - e)
r1 = np.hstack((r1.reshape(len(r1),1), r1.reshape(len(r1),1), r1.reshape(len(r1),1)))
r2 = r*np.sin(E)*np.sqrt(1. - e**2)
r2 = np.hstack((r2.reshape(len(r2),1), r2.reshape(len(r2),1), r2.reshape(len(r2),1)))
a1 = np.cos(O)*np.cos(w) - np.sin(O)*np.sin(w)*np.cos(I)
a2 = np.sin(O)*np.cos(w) + np.cos(O)*np.sin(w)*np.cos(I)
a3 = np.sin(w)*np.sin(I)
A = np.hstack((a1.reshape(len(a1),1), a2.reshape(len(a2),1), a3.reshape(len(a3),1)))
b1 = -np.cos(O)*np.sin(w) - np.sin(O)*np.cos(w)*np.cos(I)
b2 = -np.sin(O)*np.sin(w) + np.cos(O)*np.cos(w)*np.cos(I)
b3 = np.cos(w)*np.sin(I)
B = np.hstack((b1.reshape(len(b1),1), b2.reshape(len(b2),1), b3.reshape(len(b3),1)))
# planet position, planet-star distance, apparent separation
r = (A*r1 + B*r2)*u.AU
d = np.sqrt(np.sum(r**2, axis=1))
s = np.sqrt(np.sum(r[:,0:2]**2, axis=1))
# sample albedo, planetary radius, phase function
p = self.PlanetPopulation.gen_albedo(nplan)
Rp = self.PlanetPopulation.gen_radius(nplan)
beta = np.arccos(r[:,2]/d)
Phi = self.PlanetPhysicalModel.calc_Phi(beta)
# calculate dMag
dMag = deltaMag(p,Rp,d,Phi)
return s, dMag
| 42.08
| 109
| 0.537575
| 17,485
| 0.97769
| 0
| 0
| 0
| 0
| 0
| 0
| 6,724
| 0.375979
|
e3e6feda3445e87c646510a9a3a710d5ae1d2df6
| 1,418
|
py
|
Python
|
pylsp/plugins/hover.py
|
nemethf/python-lsp-server
|
34be02a6ce37bab7fb9ba1845006c0af16fb7efc
|
[
"MIT"
] | 1
|
2021-07-08T01:27:25.000Z
|
2021-07-08T01:27:25.000Z
|
pylsp/plugins/hover.py
|
nemethf/python-lsp-server
|
34be02a6ce37bab7fb9ba1845006c0af16fb7efc
|
[
"MIT"
] | null | null | null |
pylsp/plugins/hover.py
|
nemethf/python-lsp-server
|
34be02a6ce37bab7fb9ba1845006c0af16fb7efc
|
[
"MIT"
] | null | null | null |
# Copyright 2017-2020 Palantir Technologies, Inc.
# Copyright 2021- Python Language Server Contributors.
import logging
from pylsp import hookimpl, _utils
log = logging.getLogger(__name__)
@hookimpl
def pylsp_hover(document, position):
code_position = _utils.position_to_jedi_linecolumn(document, position)
definitions = document.jedi_script().infer(**code_position)
word = document.word_at_position(position)
# Find first exact matching definition
definition = next((x for x in definitions if x.name == word), None)
# Ensure a definition is used if only one is available
# even if the word doesn't match. An example of this case is 'np'
# where 'numpy' doesn't match with 'np'. Same for NumPy ufuncs
if len(definitions) == 1:
definition = definitions[0]
if not definition:
return {'contents': ''}
# raw docstring returns only doc, without signature
doc = _utils.format_docstring(definition.docstring(raw=True))
# Find first exact matching signature
signature = next((x.to_string() for x in definition.get_signatures()
if x.name == word), '')
contents = []
if signature:
contents.append({
'language': 'python',
'value': signature,
})
if doc:
contents.append(doc)
if not contents:
return {'contents': ''}
return {'contents': contents}
| 28.36
| 74
| 0.662906
| 0
| 0
| 0
| 0
| 1,223
| 0.862482
| 0
| 0
| 471
| 0.332158
|
e3e789b09b4bc5d5bd9a4f91dddf897a4ef4d03a
| 4,753
|
py
|
Python
|
LocStat/pipelines/output.py
|
nhtoshiaki/LocStat
|
0196d627d1f16a778cbc8f1996d217d8fee72afb
|
[
"MIT"
] | null | null | null |
LocStat/pipelines/output.py
|
nhtoshiaki/LocStat
|
0196d627d1f16a778cbc8f1996d217d8fee72afb
|
[
"MIT"
] | null | null | null |
LocStat/pipelines/output.py
|
nhtoshiaki/LocStat
|
0196d627d1f16a778cbc8f1996d217d8fee72afb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import items
class TxtFileWriter:
"""
Write the repository representation in the file.
"""
def __init__(self, file_path):
self.file_path = file_path
def __enter__(self):
self.file = open(self.file_path, 'w', encoding='utf-8')
return self
def __exit__(self, *args):
if self.file and not self.file.closed:
self.file.close()
@property
def file_path(self):
return self._file_path
@file_path.setter
def file_path(self, file_path):
self._file_path = file_path
@property
def file(self):
return self._file
@file.setter
def file(self, file):
self._file = file
@property
def closed(self):
if self.file:
return self.file.closed
else:
return True
def write(self, root_dir_item):
if self.file and not self.file.closed:
self.file.write(f'Repositorio: '
f'{root_dir_item["repository_relurl"]}\n')
self.file.write(f'Total de linhas: '
f'{root_dir_item["amount_lines"]}\n')
self.file.write(f'Total de bytes: '
f'{root_dir_item["amount_bytes"]}\n')
self.file.write('\n')
self.write_extension_statistic(root_dir_item)
self.file.write('\n')
self.write_tree_structure(root_dir_item)
def write_extension_statistic(self, root_dir_item):
"""
Writes the table with the number of lines and bytes for each file
extension.
"""
if self.file and not self.file.closed:
self.file.write(f'{"Extensao":<10} | {"Linhas":^15} | '
f'{"Bytes":^15}\n')
self.file.write(f'{"":=<11}|{"":=^17}|{"":=^16}\n')
if 'index' in root_dir_item:
for ext, info in root_dir_item['index'].items():
if len(ext) == 0:
ext = '<outros>'
amount_lines, amount_bytes = 0, 0
perc_lines, perc_bytes = 0, 0
if 'amount_lines' in info:
amount_lines = info['amount_lines']
if 'amount_bytes' in info:
amount_bytes = info['amount_bytes']
if 'amount_lines' in root_dir_item and \
root_dir_item['amount_lines'] != 0:
perc_lines = int(100 * amount_lines
/ root_dir_item['amount_lines'])
if 'amount_bytes' in root_dir_item and \
root_dir_item['amount_bytes'] != 0:
perc_bytes = int(100 * amount_bytes
/ root_dir_item['amount_bytes'])
self.file.write(f'{ext:<10} | {amount_lines:>7} '
f'({perc_lines:>3} %) | '
f'{amount_bytes:>6} '
f'({perc_bytes:>3} %)\n')
def write_tree_structure(self, root_dir_item):
"""
Writes the repository file structure.
"""
def _tree_structure(file_item, depth):
"""
Recursive function to create the file structure.
"""
structure = ''
for i in range(depth - 1):
structure += '| '
structure += '|-- '
if 'name' in file_item:
if isinstance(file_item, items.DirectoryItem):
structure += f'[{file_item["name"]}]\n'
if 'children' in file_item \
and type(file_item['children']) is list:
for child in file_item['children']:
structure += \
_tree_structure(child, depth + 1)
elif isinstance(file_item, items.TextFileItem):
structure += f'{file_item["name"]}'
if 'amount_lines' in file_item:
structure += f' ({file_item["amount_lines"]} linhas)'
structure += '\n'
return structure
if self.file and not self.file.closed:
structure = ''
if 'repository_name' in root_dir_item:
structure += f'[{root_dir_item["repository_name"]}]\n'
if 'children' in root_dir_item and type(root_dir_item['children'])\
is list:
for child in root_dir_item['children']:
structure += _tree_structure(child, 1)
self.file.write(structure)
| 37.722222
| 79
| 0.487482
| 4,712
| 0.991374
| 0
| 0
| 400
| 0.084157
| 0
| 0
| 1,128
| 0.237324
|
e3e858c279c7da79f073153068c7d9c2b91c90b3
| 736
|
py
|
Python
|
greensinversion/regularization.py
|
isuthermography/greensinversion
|
92f272a3649bb2f6b132f8cd239edd68dd2a6a62
|
[
"Unlicense"
] | 1
|
2020-07-25T23:23:04.000Z
|
2020-07-25T23:23:04.000Z
|
greensinversion/regularization.py
|
isuthermography/greensinversion
|
92f272a3649bb2f6b132f8cd239edd68dd2a6a62
|
[
"Unlicense"
] | 1
|
2018-10-04T01:43:25.000Z
|
2018-11-28T17:59:12.000Z
|
greensinversion/regularization.py
|
isuthermography/greensinversion
|
92f272a3649bb2f6b132f8cd239edd68dd2a6a62
|
[
"Unlicense"
] | 1
|
2020-07-25T23:23:06.000Z
|
2020-07-25T23:23:06.000Z
|
import numpy as np
def apply_tikhonov_regularization(u,s,v,usetikparam,vector):
#alpha = usetikparam*np.sqrt(u.shape[0]/v.shape[1]) # Tikhonov parameter interpreted as scaled by sqrt(matrix rows/matrix cols) so that it is directly interpretable as NETD/NESI (noise equivalent temperature difference over noise equivalent source intensity, with NETD measured in deg. K and NESI measured in J/m^2
# NOTE: u and v no longer orthogonal as they have already been pre-multiplied by scaling factors
# tikhonov scaling temporarily disabled
alpha=usetikparam
d = s/(s**2+alpha**2)
#inverse = np.dot(v.T*(d.reshape(1,d.shape[0])),u.T)
#return inverse
return np.dot(v.T,np.dot(u.T,vector)*d)
| 36.8
| 319
| 0.716033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 517
| 0.702446
|
e3e870bbf5df4a845585a4326902f3311e5fcf1d
| 1,563
|
py
|
Python
|
examples/telebot.py
|
b3ntuz1/words
|
5d14439e18d9462a02a836afc3497a188bfc3224
|
[
"MIT"
] | null | null | null |
examples/telebot.py
|
b3ntuz1/words
|
5d14439e18d9462a02a836afc3497a188bfc3224
|
[
"MIT"
] | null | null | null |
examples/telebot.py
|
b3ntuz1/words
|
5d14439e18d9462a02a836afc3497a188bfc3224
|
[
"MIT"
] | null | null | null |
import flask
import telebot
import words
from dotenv import load_dotenv
load_dotenv()
app = flask.Flask(__name__)
bot = telebot.TeleBot(environ.get("TG_TOKEN"), threaded=False)
WEBHOOK_URL_PATH = "/%s/" % (environ.get("TG_TOKEN"))
# # Remove webhook, it fails sometimes the set if there is a previous webhook
# bot.remove_webhook()
# time.sleep(1)
# # Set webhook
# bot.set_webhook(url=environ.get("WEBHOOK_URL") + WEBHOOK_URL_PATH)
@bot.message_handler(commands=['ping'])
def ping(message):
return bot.reply_to(message, "pong")
@bot.message_handler(commands=['start_game'])
def start_game(message):
if "group" in message.chat.type:
admins = bot.get_chat_administrators(message.chat.id)
w = words.Words()
for a in admins:
if message.from_user.id == a.user.id:
return bot.reply_to(message, w.start_game())
return bot.reply_to(message, "Only admins can do that!")
@bot.message_handler(commands=['ranks'])
def ranks(message):
w = words.Words()
return bot.reply_to(message, "`" + w.rankings() + "`", parse_mode="Markdown")
@bot.message_handler(commands=['ans'])
def answer(message):
if message.chat.id == message.from_user.id:
return bot.reply_to(message, "Sorry, its command work only on public chats.")
w = words.Words()
ans = message.text.split(' ')
if len(ans) == 2:
return bot.reply_to(message, w.check(message.from_user.first_name, ans[1]), parse_mode="Markdown")
return bot.reply_to(message, "Wrong command. You should use /ans <pkm_name>")
| 31.26
| 106
| 0.68778
| 0
| 0
| 0
| 0
| 1,115
| 0.713372
| 0
| 0
| 415
| 0.265515
|
e3e8a02a4a0c93dadb97549166e65600c319f251
| 547
|
py
|
Python
|
lvsfunc/__init__.py
|
DeadNews/lvsfunc
|
15bc8b99595c5066c15f4aba9fb9989e1068a9ee
|
[
"MIT"
] | null | null | null |
lvsfunc/__init__.py
|
DeadNews/lvsfunc
|
15bc8b99595c5066c15f4aba9fb9989e1068a9ee
|
[
"MIT"
] | null | null | null |
lvsfunc/__init__.py
|
DeadNews/lvsfunc
|
15bc8b99595c5066c15f4aba9fb9989e1068a9ee
|
[
"MIT"
] | null | null | null |
"""
lvsfunc, a collection of VapourSynth functions and wrappers written and/or modified by LightArrowsEXE.
If you spot any issues, please do not hesitate to send in a Pull Request
or reach out to me on Discord (LightArrowsEXE#0476)!
"""
# flake8: noqa
from . import aa, comparison, deinterlace, denoise, misc, scale
# Aliases:
comp = comparison.compare
diff = comparison.tvbd_diff
ef = misc.edgefixer
qden = denoise.quick_denoise
rfs = misc.replace_ranges
scomp = comparison.stack_compare
sraa = aa.upscaled_sraa
src = misc.source
| 26.047619
| 106
| 0.758684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 273
| 0.499086
|
e3e90ea49def6ec58ac5f2b5f001c13fe85417ac
| 529
|
py
|
Python
|
Roman_to_Integer.py
|
sujitmandal/leetCode
|
b52bfd68cd93243765a94a190807f9b79ec4b4af
|
[
"MIT"
] | null | null | null |
Roman_to_Integer.py
|
sujitmandal/leetCode
|
b52bfd68cd93243765a94a190807f9b79ec4b4af
|
[
"MIT"
] | null | null | null |
Roman_to_Integer.py
|
sujitmandal/leetCode
|
b52bfd68cd93243765a94a190807f9b79ec4b4af
|
[
"MIT"
] | null | null | null |
roman_dict = {
"I" : 1,
"V" : 5,
"X" : 10,
"L" : 50,
"C" : 100,
"D" : 500,
"M" : 1000
}
class Solution:
def romanToInt(self, s: str) -> int:
previous = 0
current = 0
result = 0
for x in s[::-1]:
current = roman_dict[x]
if (previous > current):
result -= current
else:
result += current
previous = current
print(result)
obj = Solution()
obj.romanToInt("III")
obj.romanToInt("XXVII")
obj.romanToInt("IV")
| 18.241379
| 40
| 0.478261
| 355
| 0.671078
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 0.069943
|
e3ea89a73be617f94a289a935e3c1a5396be4890
| 4,214
|
py
|
Python
|
utils/warmup.py
|
hengwei-chan/3D_SBDD
|
eda6d51aaf01ef25581a46920a25161678fab76d
|
[
"MIT"
] | 67
|
2021-12-02T05:53:44.000Z
|
2022-03-31T07:21:26.000Z
|
utils/warmup.py
|
hengwei-chan/3D_SBDD
|
eda6d51aaf01ef25581a46920a25161678fab76d
|
[
"MIT"
] | 13
|
2021-12-05T14:23:46.000Z
|
2022-03-25T21:07:20.000Z
|
utils/warmup.py
|
hengwei-chan/3D_SBDD
|
eda6d51aaf01ef25581a46920a25161678fab76d
|
[
"MIT"
] | 16
|
2022-01-11T11:48:24.000Z
|
2022-03-27T19:20:58.000Z
|
"""
MIT License
Copyright (c) 2019 Ildoo Kim
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier if multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with the base_lr.
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
self.multiplier = multiplier
if self.multiplier < 1.:
raise ValueError('multiplier should be greater thant or equal to 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super(GradualWarmupScheduler, self).__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
return self.after_scheduler.get_last_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs]
else:
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
def step_ReduceLROnPlateau(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch if epoch != 0 else 1 # ReduceLROnPlateau is called at the end of epoch, whereas others are called at beginning
if self.last_epoch <= self.total_epoch:
warmup_lr = [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
param_group['lr'] = lr
else:
if epoch is None:
self.after_scheduler.step(metrics, None)
else:
self.after_scheduler.step(metrics, epoch - self.total_epoch)
def step(self, epoch=None, metrics=None):
if type(self.after_scheduler) != ReduceLROnPlateau:
if self.finished and self.after_scheduler:
if epoch is None:
self.after_scheduler.step(None)
else:
self.after_scheduler.step(epoch - self.total_epoch)
self._last_lr = self.after_scheduler.get_last_lr()
else:
return super(GradualWarmupScheduler, self).step(epoch)
else:
self.step_ReduceLROnPlateau(metrics, epoch)
| 48.436782
| 152
| 0.685809
| 3,032
| 0.719506
| 0
| 0
| 0
| 0
| 0
| 0
| 1,743
| 0.413621
|
e3eab184db32babbdcd60c4ea7969530ce380571
| 2,154
|
py
|
Python
|
src/ee/deployers.py
|
marcelotrevisani/ee
|
933d6a80402b30943ca3df4a1a120047f7163a4b
|
[
"MIT"
] | 5
|
2021-12-09T21:54:35.000Z
|
2021-12-14T11:25:57.000Z
|
src/ee/deployers.py
|
marcelotrevisani/ee
|
933d6a80402b30943ca3df4a1a120047f7163a4b
|
[
"MIT"
] | 6
|
2021-12-09T21:04:19.000Z
|
2022-02-11T11:19:44.000Z
|
src/ee/deployers.py
|
marcelotrevisani/ee
|
933d6a80402b30943ca3df4a1a120047f7163a4b
|
[
"MIT"
] | 2
|
2021-02-12T20:20:26.000Z
|
2021-12-14T11:24:24.000Z
|
import abc
import logging
from typing import List
from ee.models import EnvironmentDefinition
logger = logging.getLogger(__name__)
class DeploymentBackend(abc.ABC):
def run(self, env_def: EnvironmentDefinition, command: List[str]):
"""
This is the main public method.
This is a template method which relies on the DeploymentBackend
subclasses to provide the methods:
.env_exists(env_id)
.create_env(env_def)
.execute(env_id, command_args)
Args:
env_def: the full EnvironmentDefinition object
command: the list of command line arguments to be executed
inside the environment
"""
if not self.env_exists(env_def.id):
logger.info(
f"Environment not found: {env_def.id}"
" - Please wait while EE creates the env ..."
)
if self.create_env(env_def):
logger.info(f"Environment created successfully: {env_def.id}")
else:
logger.error(f"Failed to create environment: {env_def.id}")
if command:
self.execute(env_def.id, command)
@abc.abstractmethod
def env_exists(self, env_id: str) -> bool:
"""
Checks whether an environment already exists or not, given
its environment id.
Args:
env_id: hash/identifier for the environment
Returns:
True if the environment exists, False otherwise
"""
@abc.abstractmethod
def create_env(self, env_def: EnvironmentDefinition):
"""
Create an environment using the environment def
Args:
env_def: Full environment definition
Returns:
None
"""
@abc.abstractmethod
def execute(self, env_id: str, command: List[str]):
"""
Executes the given command inside the given environment.
Args:
env_id: hash/identifier for the environment
command: list of command line arguments (including the main command)
Returns:
None
"""
| 27.615385
| 80
| 0.597029
| 2,018
| 0.936862
| 0
| 0
| 944
| 0.438254
| 0
| 0
| 1,339
| 0.621634
|
e3eb6d0f0d638a2beae2a17150b8764d8ef995b7
| 2,946
|
py
|
Python
|
vb_simulation_pkgs/example_pkgs/pkg_moveit_examples/scripts/node_eg2_predefined_pose.py
|
ROBODITYA/Eyantra-2021-Vargi-Bots
|
f1c6a82c46e6e84486a4832b3fbcd02625849447
|
[
"MIT"
] | 1
|
2021-07-13T07:05:29.000Z
|
2021-07-13T07:05:29.000Z
|
vb_simulation_pkgs/example_pkgs/pkg_moveit_examples/scripts/node_eg2_predefined_pose.py
|
TejasPhutane/Eyantra-2021-Vargi-Bots
|
ab84a1304101850be8c0f69cfe6de70d53c33189
|
[
"MIT"
] | 1
|
2021-06-05T07:58:03.000Z
|
2021-06-05T07:58:03.000Z
|
vb_simulation_pkgs/example_pkgs/pkg_moveit_examples/scripts/node_eg2_predefined_pose.py
|
ROBODITYA/Eyantra-2021-Vargi-Bots
|
f1c6a82c46e6e84486a4832b3fbcd02625849447
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import rospy
import sys
import copy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import actionlib
class Ur5Moveit:
# Constructor
def __init__(self, arg_robot_name):
rospy.init_node('node_eg2_predefined_pose', anonymous=True)
self._robot_ns = '/' + arg_robot_name
self._planning_group = "manipulator"
self._commander = moveit_commander.roscpp_initialize(sys.argv)
self._robot = moveit_commander.RobotCommander(robot_description= self._robot_ns + "/robot_description", ns=self._robot_ns)
self._scene = moveit_commander.PlanningSceneInterface(ns=self._robot_ns)
self._group = moveit_commander.MoveGroupCommander(self._planning_group, robot_description= self._robot_ns + "/robot_description", ns=self._robot_ns)
self._display_trajectory_publisher = rospy.Publisher( self._robot_ns + '/move_group/display_planned_path', moveit_msgs.msg.DisplayTrajectory, queue_size=1)
self._exectute_trajectory_client = actionlib.SimpleActionClient( self._robot_ns + '/execute_trajectory', moveit_msgs.msg.ExecuteTrajectoryAction)
self._exectute_trajectory_client.wait_for_server()
self._planning_frame = self._group.get_planning_frame()
self._eef_link = self._group.get_end_effector_link()
self._group_names = self._robot.get_group_names()
self._box_name = ''
# Current State of the Robot is needed to add box to planning scene
self._curr_state = self._robot.get_current_state()
rospy.loginfo(
'\033[94m' + "Planning Group: {}".format(self._planning_frame) + '\033[0m')
rospy.loginfo(
'\033[94m' + "End Effector Link: {}".format(self._eef_link) + '\033[0m')
rospy.loginfo(
'\033[94m' + "Group Names: {}".format(self._group_names) + '\033[0m')
rospy.loginfo('\033[94m' + " >>> Ur5Moveit init done." + '\033[0m')
def go_to_predefined_pose(self, arg_pose_name):
rospy.loginfo('\033[94m' + "Going to Pose: {}".format(arg_pose_name) + '\033[0m')
self._group.set_named_target(arg_pose_name)
plan = self._group.plan()
goal = moveit_msgs.msg.ExecuteTrajectoryGoal()
goal.trajectory = plan
self._exectute_trajectory_client.send_goal(goal)
self._exectute_trajectory_client.wait_for_result()
rospy.loginfo('\033[94m' + "Now at Pose: {}".format(arg_pose_name) + '\033[0m')
# Destructor
def __del__(self):
moveit_commander.roscpp_shutdown()
rospy.loginfo(
'\033[94m' + "Object of class Ur5Moveit Deleted." + '\033[0m')
def main():
ur5 = Ur5Moveit(sys.argv[1])
while not rospy.is_shutdown():
ur5.go_to_predefined_pose("straightUp")
rospy.sleep(2)
ur5.go_to_predefined_pose("allZero")
rospy.sleep(2)
del ur5
if __name__ == '__main__':
main()
| 37.291139
| 163
| 0.681942
| 2,519
| 0.855058
| 0
| 0
| 0
| 0
| 0
| 0
| 576
| 0.195519
|
e3ecff00be006576e1644fd5e646a6c21330ba43
| 5,047
|
py
|
Python
|
plugins/pelican_gist/plugin.py
|
kura/kura.io
|
7f9ba2140b93bba86d1367e41706ad72f9e672bf
|
[
"MIT"
] | 13
|
2015-02-19T22:14:07.000Z
|
2021-02-07T14:16:34.000Z
|
plugins/pelican_gist/plugin.py
|
kura/kura.gg
|
42c8e0a7a6d9480297df004452b073883ff9693e
|
[
"MIT"
] | 2
|
2015-07-28T10:02:57.000Z
|
2017-07-28T18:08:59.000Z
|
plugins/pelican_gist/plugin.py
|
kura/kura.io
|
7f9ba2140b93bba86d1367e41706ad72f9e672bf
|
[
"MIT"
] | 7
|
2015-08-26T16:52:00.000Z
|
2019-10-11T05:32:37.000Z
|
# -*- coding: utf-8 -*-
"""
Gist embedding plugin for Pelican
=================================
This plugin allows you to embed `Gists`_ into your posts.
.. _Gists: https://gist.github.com/
"""
from __future__ import unicode_literals
import hashlib
import logging
import os
import re
import codecs
import pygments
logger = logging.getLogger(__name__)
gist_regex = re.compile(
r'(<p>\[gist:id\=([0-9a-fA-F]+)(,file\=([^\],]+))?(,filetype\=([a-zA-Z]+))?\]</p>)')
gist_template = """<div class="gist">
<script src='{{script_url}}' crossorigin='anonymous'></script>
<noscript>
{{code}}
</noscript>
</div>"""
def gist_url(gist_id, filename=None):
url = "https://gist.githubusercontent.com/raw/{}".format(gist_id)
if filename is not None:
url += "/{}".format(filename)
return url
def script_url(gist_id, filename=None):
url = "https://gist.github.com/{}.js".format(gist_id)
if filename is not None:
url += "?file={}".format(filename)
return url
def cache_filename(base, gist_id, filename=None):
h = hashlib.md5()
h.update(str(gist_id).encode())
if filename is not None:
h.update(filename.encode())
return os.path.join(base, '{}.cache'.format(h.hexdigest()))
def get_cache(base, gist_id, filename=None):
cache_file = cache_filename(base, gist_id, filename)
if not os.path.exists(cache_file):
return None
with codecs.open(cache_file, 'rb', 'utf-8') as f:
return f.read()
def set_cache(base, gist_id, body, filename=None):
with codecs.open(cache_filename(base, gist_id, filename), 'wb', 'utf-8') as f:
f.write(body)
def fetch_gist(gist_id, filename=None):
"""Fetch a gist and return the contents as a string."""
import requests
url = gist_url(gist_id, filename)
response = requests.get(url)
if response.status_code != 200:
raise Exception('Got a bad status looking up gist.')
body = response.text
if not body:
raise Exception('Unable to get the gist contents.')
return body
def setup_gist(pelican):
"""Setup the default settings."""
pelican.settings.setdefault('GIST_CACHE_ENABLED', True)
pelican.settings.setdefault('GIST_CACHE_LOCATION',
'/tmp/gist-cache')
pelican.settings.setdefault('GIST_PYGMENTS_STYLE', 'default')
pelican.settings.setdefault('GIST_PYGMENTS_LINENUM', False)
# Make sure the gist cache directory exists
cache_base = pelican.settings.get('GIST_CACHE_LOCATION')
if not os.path.exists(cache_base):
os.makedirs(cache_base)
def render_code(code, filetype, pygments_style):
"""Renders a piece of code into HTML. Highlights syntax if filetype is specfied"""
if filetype:
lexer = pygments.lexers.get_lexer_by_name(filetype)
formatter = pygments.formatters.HtmlFormatter(style=pygments_style)
return pygments.highlight(code, lexer, formatter)
else:
return "<pre><code>{}</code></pre>".format(code)
def replace_gist_tags(generator):
"""Replace gist tags in the article content."""
from jinja2 import Template
template = Template(gist_template)
should_cache = generator.context.get('GIST_CACHE_ENABLED')
cache_location = generator.context.get('GIST_CACHE_LOCATION')
pygments_style = generator.context.get('GIST_PYGMENTS_STYLE')
body = None
for article in generator.articles:
for match in gist_regex.findall(article._content):
gist_id = match[1]
filename = None
filetype = None
if match[3]:
filename = match[3]
if match[5]:
filetype = match[5]
logger.info('[gist]: Found gist id {} with filename {} and filetype {}'.format(
gist_id,
filename,
filetype,
))
if should_cache:
body = get_cache(cache_location, gist_id, filename)
# Fetch the gist
if not body:
logger.info('[gist]: Gist did not exist in cache, fetching...')
body = fetch_gist(gist_id, filename)
if should_cache:
logger.info('[gist]: Saving gist to cache...')
set_cache(cache_location, gist_id, body, filename)
else:
logger.info('[gist]: Found gist in cache.')
# Create a context to render with
context = generator.context.copy()
context.update({
'script_url': script_url(gist_id, filename),
'code': render_code(body, filetype, pygments_style)
})
# Render the template
replacement = template.render(context)
article._content = article._content.replace(match[0], replacement)
def register():
"""Plugin registration."""
from pelican import signals
signals.initialized.connect(setup_gist)
signals.article_generator_finalized.connect(replace_gist_tags)
| 30.96319
| 91
| 0.624529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,382
| 0.273826
|
e3ed166cf5e760668330d7ff8e4a946c7c875bce
| 1,188
|
py
|
Python
|
ass1/rsc/ts.py
|
suryaavala/network
|
291b59dce961448b2a9b92b6a0754ec994a6fb91
|
[
"MIT"
] | null | null | null |
ass1/rsc/ts.py
|
suryaavala/network
|
291b59dce961448b2a9b92b6a0754ec994a6fb91
|
[
"MIT"
] | null | null | null |
ass1/rsc/ts.py
|
suryaavala/network
|
291b59dce961448b2a9b92b6a0754ec994a6fb91
|
[
"MIT"
] | null | null | null |
import time
from socket import *
import sys
host = sys.argv[1]
#port = sys.argv[2]
#message = sys.argv[2]
sock = socket(AF_INET, SOCK_DGRAM)
sock.settimeout(1)
sock.bind((str(host),0))
print(sock.getsockname(),sock.getsockname()[1])
port = 5967
for message in 'abcdefghijklmnopqrstuvwxyz':
sock.sendto(message.encode('ascii'), (str(host), int(port)))
print ("sent message: {} to address {}".format(message, (host,port)))
print('*******sleeping*********')
#time.sleep(10)
print ('*********woke up**********')
for message in 'abcdefghijklmnopqrstuvwxyz':
sock.sendto(message.encode('ascii'), (str(host), int(port)))
print ("sent message: {} to address {}".format(message, (host,port)))
#message_list = ["souce#","dest#","seq_nb","ack nb","ACK","SYN","FIN","RST",str("surya avinash avala data sfkjgd tjgt df".encode('ascii'))]
pay_load = "surya avinash avala data sfkjgd tjgt df"
header = ["souce#","dest#","seq_nb","ack nb","ACK","SYN","FIN","RST"]
message = ("+".join(header)+"+"+pay_load)
print("final message: {}".format(message))
sock.sendto(message.encode("ascii"), (str(host), int(port)))
print ("sent message: {} to address {}".format(message, (host,port)))
| 32.108108
| 139
| 0.653199
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 539
| 0.453704
|
e3edc74364411dacd749db536296fed60fe22954
| 26,799
|
py
|
Python
|
django/docs/ref/contrib/auth.txt.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
django/docs/ref/contrib/auth.txt.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
django/docs/ref/contrib/auth.txt.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXX XXXXXXXX XXX XXXXXXXXX XXXXXXXX XXX XXX XXXXXXXXXX XX XXXXXXXX
XXXXXXXXXXXXXX XXXXXXX XXX XXXX XXXXXXX XX XXX XXXXX XX XXXXX XXXXXXXXXX XX
XXX XX XXXXXXXXX XXXXXXXXXXXXXX XXX XXXXXXXXXXXXX XXX XXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXX XXXXX
XXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXX
XXXXXX
XXXXXX
XX XXXXXXX XXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXX XXX XXXXXXXXX
XXXXXXX
XX XXXXXXXXXXX XXXXXXXX
XXXXXXXXX XXX XXXXXXXXXX XX XXXXXX XXXXXXXXX XXX XXXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXX XXXXXX XXXXX XXX XXXXX XXXXXXXXXXX
XXX XXXXXXXXXXXXXX XXXXXX XX XXXXXXXXXX XXX XXXX XXX XXXXXX XX XXX XXXX
X XXXXXX XXXXXXX XXXXXX XXX X XXXXXXXXXXXX XXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXX XXX XXXXX XXXX XXX XXXXXXXXXXX
XXXXXXXX XXXXXXXXXXXX XXX XXXXXX XXXXXXX XXXXXXXXX XXXXXXX XX XXXX
XXXXXXXXXXXXXXXXXX XXXXXXX XXXXX XXX XXXX XXXXXX XXXXXX XXXXXXX XXXX
XXX XXXXXXXXXX XX XXXX XXXX XX XXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXX
XXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXX XX XXXXXX
XX XXXXXXXXXXXXXXXX XXX
XXX XXXXXXXXXXXXXX XXXXXXXXX XXXX XX XX XXX XXXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXX
XXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXX XX XXXXXX
XX XXXXXXXXXXX XXXXX
XXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX
XXXXXXXX
XX XXXXXXXXXXX XXXXXXXX
XXXXXXXXX X XXXX XXX XXX XXXXXXXX XXXXXX XXX XXXXXXXXX XXXXXXX XXXXXXX
XXXXX XXX XXX XXXXXXXXXX XXX XXXXXXXXX XXX XX XXXXXXXXXXX XXXX XXX XXX
XXXXXXX XXX XXXXXXXXXX XXX XXX XXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXX XXXXXX
XXXXXXXXXXXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXX
XXXXXXXX XXXXXXXXXX XXXXXXX XXXX XXXX XXX XXXXXX XXX XXXXX XXXXX
XX XXXXXXXXXXX XXXXXXXXX
XXXXXXXX XXXXXXXXXX XXXXXXX XXXX XXXX XXXXXXX XXXXXX XX XXXXXXXXXX
XXXXXXX XX XXXXXXXXX XXXX XXX XXX XXXX XXXX XX XXXXXXXXX XXXXXXX XX
XXXXXXXX XXXXXXXXX XXXX XXXX XX XXXX XXXXXXXXXXXX XXXX XXX XXXXXXX XXXX
XX XXXXXX XXX XXXXXXX XXXX XXXXX XXXXXX
XXXX XXXXXXX XXXXXXXXXXX XXXXXXX XXXXXXX XX XXX XXX XXXX XXX XXX XXX
XXXXXXXXXXXXXX XXXXXXXX XXXXXX XXXXXXXX XX XXXXX XXX XXX XXXXXXXXXXXXX
XXXX XXX XXX XXXXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXX XXXX XX XXXXX XXXXXXXX XXXXX XX XXXXXX XX XXXX XXXXX XXXXXX XXXX
XXXX XX XXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XXXXXXX XXXXXXXX
XXXXXX XX XXXXX XXXX XXX XXXXXXXXXXXXXXXXXXX XXXXXXX XXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX
XXXXXXXXXXXXXX XX XXX XXXXXX XXXXX XXX XXXXXX XXXXXXXXX XXX XXXXXXXX
XXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXX XXXXXXXXXX XXXX XXXX XXXX XXX XXX XXXXXXXXXXX XXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXX
XX XXXXXXXXXXX XXXXXXXXXX
X XXXXXXXX XX XXX XXXXXX XXXX XXXXXX
XX XXXXXXXXXXX XXXXXXXXXXX
X XXXXXXXX XXXXXXXXXXX XXXX XXX XXXXXXX XXX XXXXXXXX XX XXX XX XXX
XXXXXXX XXXXXXXXX XX XXXXXXX XXXX XXX XXXXXXX XX XXXXXXXX
XXXXXXXXXX
XXXXXXXXXX
XX XXXXXXX XXXXXXXXXXX
XXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXX XX XXXXXX XXXXXXXX XXX XXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX XXXXXX XXXXXXXXXXX XXXX XX
X XXX XX XXXX XX XXX XXXX XXX XXXX XXXXXXXXXXXXXX XXXX XXXX XXX XXXXX
XXX XXXXXXXXXXX XXX XXXXXXX XXXXX XX XXX XXXX XX XXXXXX XX XXX X XXXXX
XXXXXXXX XXXX XXXXXX XXXXXXXX XXX XXXX XXXXX XXXX XXXXXXXXX XX
XXXXXXXXXXXXXXXX XX XXXX XXX XXXXXXX XX XXX XXXX XXXXXXXXX XX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX XXX XXXXXXXXX XXXXXXXXX XXXXXX XXX XXXXXX XXXX XXXX
XXXXXXXXX XX XXXXXXXX XXX XXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXX XX XXXXXX XXXXXXXXXX XXXX XX X XXX XX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXX XXXXXXXXXX XXX XXXXXX XXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXX
XXXXXXXXXX
XXXXXXX
XXXXXXX
XX XXXXXXX XXXXXXXXXXX
XXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXX XXXXXXXX XXX XXX XXXXX XXXXX XXX XXXXXXXX XXXXX XXX XX
XXXXXXX XXXX XXX XXXXXX XXX XXXX XXXXXX XXXXXXX XX XXXXXXXXXXX XXX
XXXXXXXX XXXXXXXXX XXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX X XXXXX XX
XXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXX XXXXXX XXXXXXXX XX XXX XXXXX XXX XXXXXXX XXXXXX XXXX XX XXX
XXXXXXXX XXXXXXXX XXXXXXX XXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX
XXXX XXX XXXXXXXXXXXXXXXX XX XXXXXXXXX XXX XXXXXXXX XXXX XX XXX XX XX
XXXXXXXX XXXXXXXXX XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXX XX XXX XXXXX XXX XXXXXX XX XXX XXXXXXX XXXXXXXX XXX
XXX XXXXX XXXXX XXXXX XXXX XX XXX XXXXXXXX XXXXXXX XX XXXXXX XXX
XXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXX XXXX XX XXXXXX XX XXXXXXXX XXXX XXXX XXXXX XXX XXXX XX
XXXXXX X XXXXX XXXXXX XXX X XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXX XXXX
XXXX XXXXX XXXXXX XXXXXXXXX XXXXXXX XXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX
XXX XXX XXXX XXXX XX XXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXX XXXXX XXXXX
XXXXXXX XX XXXXXXXX XXXXXXXX XXXXXX XXXX XX XX XXXX XXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXX XXXXXX XXX XXXX XXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX X XXX XX XXXXXXXXXX XXXXXXX XXXX XXX XXXX XXX XXXXXXXXX
XX XXXXXXX XX XXXXXX XXX XXXX XXXXXXX XXX XXXX XXXXXXXXXXX XXX XXXX
XXXXXXXX XXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX X XXX XX XXXXXXXXXX XXXXXXX XXXX XXX XXXX XXXX XXXXXXX XXXXX
XXXXXXX
XX XXXXXXX XX XXXXXX XXX XXXX XXXXXXX XXX XXXXX XXXXXXXXXXX XXX
XXXX XXXXXXXX XXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX X XXX XX XXXXXXXXXX XXXXXXX XXXX XXX XXXX XXXX XXXX XXXXXXX
XXXXX XXX XXXX XXXXXXXXXXXX
XX XXXXXXX XX XXXXXX XXX XXXX XXXXXXX XXX XXXXXXXXXXX XXX XXXX
XXXXXXXX XXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXXXXXXX XX XXX XXXX XXX XXX XXXXXXXXX XXXXXXXXXXX XXXXX XXXX
XX XX XXX XXXXXX XXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXX
XXXXXXXXXXXXX XX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XX XXX XXXX XX
XXXXXXXXX XXXX XXXXXX XXXX XXXXXX XXXXXX XXXXXXXXXX XXX XX XXXXXX
XXXXXXXXXX XXXX XXXXXX XXXX XXXXXX XXXXXX XXXXXXXXX
XX XXXXXXX XX XXXXXX XXX XXXX XXXXXX XXXXX XXXXX XXX X XXXXXXXXXX XXX
XXX XXXXXX XXX XXX XXXX XXXXXXXX XXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXXXXXXX XX XXX XXXX XXX XXXX XX XXX XXXXXXXXX XXXXXXXXXXXX
XXXXX XXXX XXXX XX XX XXX XXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XX XXX XXXX XX XXXXXXXXX
XXXX XXXXXX XXXX XXXXXX XXXXXX XXXXXXXXXX XXX XX XXXXXX XXXXXXXXXX XXXX
XXXXXX XXXX XXXXXX XXXXXX XXXXXXXXX
XX XXXXXXX XX XXXXXX XXX XXXX XXXXXX XXXXX XXXXX XXX XXXXXXXXXXX XXX
XXX XXXXXX XXX XXX XXX XXXXXXXX XXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXX XX XXX XXXX XXX XXX XXXXXXXXXXX XX XXX XXXXX XXXXXXX
XXXX XXXXXX XXX XXXXXXX XX XXX XXXX XX XXXXXXXXX XXXX XXXXXX XXXX
XXXXXX XXXXXX XXXXXXXXXX XXX XX XXXXXX XXXXXXXXXX XXXX XXXXXX XXXX
XXXXXX XXXXXX XXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXX
XXXXX XX XXXXX XX XXX XXXXX XX XXXXXXXXXXXXXX XX XXXXXXXXX XXXXXX XXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXX XXX XXXXXX XX XXX
XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX
XXXXXXX XXXXXXX
XXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXX X XXXXXX XXXXXXX
XXXX XXX XXX XXXXXXXXX XXXXXX XXXXXXX XXX XXXXXXXX XX XXX XXXXXXX XXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXX XXXXX XXX XXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XX XXXXXX XXX
XXXXXX XXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXX XXXXXXXXX XX XXXXXXXXXX XXX XXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XX XXXXXXXXX
XX XX XXXXXXXX XX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX
XX XXXXXXX
XXX XXXXXXXXXXXXXXXX XXXXXXX XXXXXXXXX XXX XXXXXX XXXXXXX XX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXX XX
XXXXX XXXXXXX XXXXXXXXX XXXXXX XX X XXXXXXXXXXXX XXXX XXXXX
XXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXX XXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXX XX XXXXXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXXXX XXXX XXXX XXX XXXXX XXXXXXXXXX XXXXXXXX XXXXXX XX XXX
XXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXX XX XX X
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXX XX
XXXXX XXXXXXXX XX XX XXXXX XXX XXXX XXX XXXXXXXX XXXXXX
XX XXXXXXXXXXXXX XX XXXXXXXX XXXXXXXXXX XXXXXXX XXXX XXXXXX XXXXXX XX
XX XXXXXXXXXX XXXXXXX XXXX XXXXXXXX XXXXXX XXX XXXXXXXX XX XXXXXX XXX
XXXXX XXXXXXXXXXXX XX XXXXXX XXXXXX
XX XXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXX XXXXXXXXXX XXX XXXXXX XXXX
XXXXXXX XXXXXXXXXXX
XX XXXXXXXXXXX XX XXXXXX XX XXX XXXX XXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXX XXXXXX XXXX XXX XXX
XXXXXXXXXX XX XXXX XXX XXX XXXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXX XX XXXX XXXX XX XXXXX XX
XXXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXX XXXX
XXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXX
XXXXX XXXXXXXXXXXX
X XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXX XXXXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXX XXX XXXXX
XXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXX
XXX XXXXX XXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXX
XXXXXXX XX XXXXXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXX XXXXXXX XX XXXXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXX
XXXXXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXX XXXXXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXX
XXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXX XXXXXXXX XXXXX XXXX XX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XX XXXX XXXX XXX
XXXXXXX XXXX XX XXX XXXXXXXXX XX XXXXXXXXX XX XXX XXXX XXXXXXXX
XXXXXXXXXXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXX XXX XXXXXXXXX
XXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX
XXXXXXXXX
XX XXXXXXXXXXX XXXX
XXXXXXXXX XXX XXXXXXXXXX XX XXXXXX XXXXXXXX XXXXXX XXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXX X XXXXXXXXX XX XXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXX
XXXXX XXXXXXXX X XXXXXX XXX XXXX XXXXXXXXX XXXXXX
XX XXXXXXXXXXX XXXXXXXX
XXXXXXXXX XXX XXXXXXXXXX XX XXXXXX XXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX
XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXX XXX XXXXXXXX
XXXXXXXXXXX XXXXXXX XXXX XXX XXXXX XXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX
XXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXX XXX XXXXXXXXX XXXXXXX
XX XXXXXXX XXXXXXXXXXXX
XXXXXXXXX
XX XXXXXXXXXXX XXXX
XXXXXXXXX XXX XXXXXXXXXX XX XXXXXX XXX XXXXXXXXXX XXX XXXXXXXXXX
XXXXXXXX XXXXXXXXXX XXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXX
XXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXXXX XXXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXX XXX XXXXXXXX XX XXXXXXXX XX
XXXXXX XXXXXX XXXXXX XXXXXX XXX XXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXXXX XXXXXXXXX XXXXXXXX XXXXXXX XXXXXXXXXXX XX XXXXXXXX XX XXXXXX XXXXXX
XXXXXX XXXXXX XXX XXXXXX XXX XXXXXXX XXXXXXXXX XXX XXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXX
XXXXX XXX XXXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXX XXXXXXXXX XXXX XXX XXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXX
XXX XX XXXX XXX XXXXXXXXXXXX XXXX X XXXX XXXX XX XX XXXX
XX XXXXXXXXXX XXXXXXXXXXXXXX
XXXX XXXX X XXXX XXXX XX XXXXXXXXXXXXX
XXXXXXXXX XXXX XXXX XXXX XXXXXXX
XXXXXXXXXX
XXX XXXXX XX XXX XXXX XXXX XXXX XXXXXX XXX
XXXXXXXXXXX
XXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXXX
XXX XXXX XXXXXXXX XXXX XXXX XXXXXX XXX
XX XXXXXXXXXX XXXXXXXXXXXXXXX
XXXX XXXX XXX XXXXXX XXXXXX XX XXXXXXX
XXXXXXXXXX
XX XXXXXX XXX XXXXX XX XXX XXXX XXXX XXXX XXXXXX XXX XX XXXXXXXX
XX XXX XXXX XXX XXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXXX
XXX XXXX XXXXXXXX XXXX XXXX XXXXXX XXX XX XXXXXXXX XX XXX
XXXX XXX XXX XXXXXXXXXXXXXX
XX XXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXX XXXX XXX XXXX XXXXXX XX XXXXX XXXXXXXXXXXX
XXXXXXXXXX
XXX XXXX XX XXX XXXXXX XXXX XXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
X XXXXXXXXXX XX XXXXXXX XXXXXXXXX XXXXXXXXXX XXX XXXX XXXXXXXXXXX XXXX XXXX
XXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXX XXX XXXXXX
XXXXXXXXXXXXXX XXXXXXXX XXXXXXXXXXX XXXXXXXX X XXX XX XXXXXXXXXXX XXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXX XXX XX XXXX XX XXX XXXXX XX XXXX XX XXX XXXXXXX
XXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XX XXX XXX XXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXXXXXXXX XXXXXXX XXXXXXXX
XXXX XXXXXXX XXXXXXX XXX XXXXXXXXXXXXXX XXXXXXXX XXXX XXXX XXXX XXXXXXX XXX
XXXXXXXXXXX XX XXX XX XXX XXXX XXX XXX XX XXXXX XXXX XXX XXXXXXXXXXXXXX
XXXXXXXXX XXX XXX XXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX XX XXX XXXXXXXXXX XXXXXXXXXXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXX XXXXXXXX XXX XXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXX
X XXXX XXXXX XXXX XXXXXXXX XXXXXXX XXXXXXXXXXXXXXX XXX XXX XXXXXXXX
XXXXXXXX XX XXXXXXXX XX XXXX XXXXXX XXX XXXX XXX XXXXXXX XX XXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XX XXXXX XXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XX XXXXX XXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXX XXX XXX XX XXXXXXXXXX XXXXXXX XXX XXXXXXXXXXXX XXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXX XX XXXXXXXXXXXX XXX XXX
XXXXXXXXXX XXXXXX XXXXXXXXX
XX XXXXXXX XXXXXXXXXXXX
XXXX XX XXX XXXXXXX XXXXXXXXXXXXXX XXXXXXX XXXX XX XXXXXXX XX
XXXXXXXXXXXXX XXXXX XXXXXXXXXXX XXXXXXXXXX XX X XXXX XXXXXXXXXX XXX
XXXXXXXXX XXX XXXXXXXX XXXXXXX XXXX XXXXXX XXX XXXX XXXXXXXXXX XX XXX
XXXXXXXXX XXX XXXXXX XXXX XXXXXX XX XX XXX XXXXX XXXXXXXXX XX
XXXXXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXX XXXXX XXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXX XXXXXXX XXX XXXXXXX XXXXXXXXXXX XXXXX XX XXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX XXXXXX XX XX XXXXXX XX X
XXXXXXXXX XXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXX XXXX XXXXXXX XXXX XXX
XXXXXXXXX XXXX XXXXX XXXX XXXXXXXXX XX XXXXX XXX XX XXXXXXXXXXX XX
XXXXX XX XXX XXXXXXX
XXXXXXXXXXXXXXXXX XXXX XXXXXX XX XXXXXX XX XX XXXXXX XX X XXXXXXXXXX XXX
XXXXXX XXXXXX XXXXXXX XX XXXXXXX XX XXXXX XXXXXXXX XX XXXXX XX XXX XXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXX
XXXXX XX XXXXXXXXXXXX XXXXXXXXXXXX XXXX XXXXXXXXXXXX XX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XXXXXXXXXXXX
XX XXXXXXXXX XX XXXXX XX XXXXX X XXXXXXXX XXXX XXXXXXXXXX XXXXX XXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XX
XXXXXXXXXXXXX XXXX XX XXXXXXXXX
XXXXXXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XX XXXXXXXX
XX XX XXXXXX XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXX XX XX XX XXX XXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXX XXX XX XXXXXXXXXX XXXXXXX XXX XXXXXXXXXXXX XXX XXXX XXXXX
XXX XXXX XXXXXXXXXXXX XXXXXXX XX XXXXX XXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXX XXX XX XXXXXXXXXX XXXXXXX XXX XXXXXXXXXXXX XXX XXXX XXX
XXXXXXXXXXX XX XXX XXXXXX XXXX XXXXXXX XXXXXXX XX XXXXX XXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXX XXX XX XXXXXXXXXX XXXXXXX XXX XXXXXXXXXXXX XXXX XXXXXXXXX XXXX
XXXX XXXXXXXXXXX XXX XXXXX XXXXXXXXXXXX XXXXXXX XX XXXXX XXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXX XX XXXXXXXXXXXX XXX XXX
XXXXXXXXXX XXXXXX XXXXXXXXX XXXXXXX XXXXXXXXX XX XXX XXXX XX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXX XXX XXXXXXXXXXXX XXX XXX XXXXXXXXXXX XX XXX XXX
XXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXX XXX XXXX XX XXXXXXX XX XXXXXXXXXXXXX XX XXXXX XXX
XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXX XXXXX XXXX XXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXX XXXXXXX XXXXXXXXX XXX XXXXX XXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX XXXXXX XXXX
XXXXX XXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXX XXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXX XXXXXX XXXXX XXX XXXX XXX XXXXXXXXXX XXXXXXXX XXXXXX XX
XXX XXXX XX XXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXX XX X
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXX XX
XXXXX XXXXXXXX XX XX XXXXX XXX XXXX XXX XXXXXXXX XXXXXX
XX XXXXXXXXXXXXX XX XXXXXXXX XXXXXXXXXX XXXXXXX XXXX XXXXXX XXXXXX XX
XX XXXXXXXXXX XXXXXXX XXXX XXXXXXXX XXXXXX XXX XXXXXXXX XX XXXXXX XXX
XXXXX XXXXXXXXXXXX XX XXXXXX XXXXXX
XX XXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXX XXXXXXXXXX XXX XXXXXX XXXX
XXXXXXX XXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XX XXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX XX XXXXXXX XXXXXX XXXXXXXX XXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXX XXXXXXXXX
XXXX XXXXX XXXX XXXXXXXX XXXXXX XXXXXX XXXX XX XXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XX XX XXXXXXX XXXXXXXX XXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX
XXX XXXX XXXXXXX XX XXXX XXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XX XXXXXXXXXXXXX XXXXX XXXXXXXXX XXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXX XXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XX XXX XXXX XXXX XXXXXXXX XXX XXX XXXXXX XXXX XXX XXXXXXXXXXXXXX XXXXXXX
XXXX XXXXXXXX XXXX XXXX XXXXX XXX XXXXXXXX XXXXX XXXXXXXXXX XX XXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXX XX XXXXXXXXXX XXXXXXXXXX XXXXXXX XX XXX X XXXX XXXXXX XX
XXXXXXX XX XXX XXXXXXX XX XXX XXXXXXXX XXXXXXXX XX XXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXX
XXX XXXXXXXX XXXXXX XX XXXXXXXXXXXXXXX XX XXXXXXXXXX XXXXXXXX XXXX
XXXXXX XXXXXXX XXX XXXX XXXXXX XXXX XXX XXXXX XXXXXXXXX XXXXXXXX X XXX
XXXX XXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXX
XXXXXXX XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXX XXX X XXXXXXXX XXXXXX XXXX XXX XXXXX XXXXXXXX XX XXX XXXXX XX
XXX XXXXXXXXX
XXXXXXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XX XXXXXXXX
XX XX XXXXXX XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXX XX XX XX XXX XXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXX XXX XXXXXXXX XX XXX XXXXXXXXXXXX XXXXX XXXXXXXXX XXXX XX
XXXXXXXXXXXX XXXXX XX XXXXX XX XX XXX XX XXXXXX X XXXX XXXXXXX XXXXXXX
XXX XXXXXXX XXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX
XXXXXXXXXX X XXXXX XXXXXXX XXXXX XXXX XXXXXX XX XXXXXX XXXXXXXXXXX
XXXXX X XXX XXXX XX XXXXXXXX XXX XXX XX XXXX XX XXXXXXX XXXXXX XXXXX
XXXXXXXX XXXX XX XXXXXXX XXX XXXXXX XXXXXX XXXXX XX XXXXXXXXXX XX XX
XXXX XXXXXXXXXX XXXXXXX XXX XXXX XXXXXXX
XXXXXXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XX XXXXXXXX
XX XX XXXXXX XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXX XX XX XX XXX XXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXX XXX XXXX XX XXXXXXX XX XXXXXXXXXXXXX XXXX XXXXXX
XXXXXXX XXXXXXXXX XXX XXXXX XXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX XXXXXX XXXX
XXXXX XXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXX XXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX XX XXXXXXX XXXXXX XXXXXXXX
XXXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX
XXXXXXX XXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXXXX XXX XXXX XXXXX XXXXXXXX XXXXXXXXXX XXXX XXX XXXXX XXXXXXXXXXXXX
XXXXXXXX
XX XXXXXX XX XXX XXXXXXXXXXXXXX XXXXXXX XXXXXX XX XXX XXXXXXX XX XXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXX XX XXXX XXX XXXXXXXXX
XXXXXXXXXXXXXX XXXXXX XX XXXXXXXX XXX XXXX XXXXX XXXXXXXX XXX XXXX XXXXXXXX
XXX XXXXXXX XX XXXXXXX XXX XXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX
XXXXXXX XX XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXX XXXXXXXXXXXXXX XXXXXXX XXXXXX XX XXX XXXXXXX XX XX XXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXX XXXXX XXXXXXXX XX XXX
XXXXXXXXX XXXXXXXXXXXXXX XXXXXXX XX XX XXX XXXXXXX XXXX XXXX XXXXXXX
XXXXXXXXX
| 38.12091
| 96
| 0.79044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e3efc019f189a8a6987dd98946e89c72d64fa190
| 2,218
|
py
|
Python
|
BIA 660 - Web Analytics/Assignments/Assignment 2/webcounter.py
|
ParasGarg/Stevens-Computer-Science-Courses-Materials
|
13015e6e83471d89ae29474857fe83a81994420f
|
[
"MIT"
] | 25
|
2017-03-23T04:51:18.000Z
|
2022-03-03T21:51:11.000Z
|
BIA 660 - Web Analytics/Assignments/Assignment 2/webcounter.py
|
vaishnavimecit/Stevens-Computer-Science-Courses-Materials
|
13015e6e83471d89ae29474857fe83a81994420f
|
[
"MIT"
] | null | null | null |
BIA 660 - Web Analytics/Assignments/Assignment 2/webcounter.py
|
vaishnavimecit/Stevens-Computer-Science-Courses-Materials
|
13015e6e83471d89ae29474857fe83a81994420f
|
[
"MIT"
] | 19
|
2018-05-10T05:17:05.000Z
|
2022-03-12T05:18:58.000Z
|
"""
A script that reads a file from the web and
returns the all the words having frequency in between two words passed
"""
import re
from nltk.corpus import stopwords
import requests
from operator import itemgetter
def run(url, word1, word2):
freq = {} # keep the freq of each word in the file
freq[word1] = 0;
freq[word2] = 0;
stopLex = set() # build a set of english stopwrods
success = False# become True when we get the file
for i in range(5): # try 5 times
try:
#use the browser to access the url
response = requests.get(url,headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36', })
success = True # success
break # we got the file, break the loop
except: # browser.open() threw an exception, the attempt to get the response failed
print ('failed attempt',i)
# all five attempts failed, return None
if not success:
return None
readText = response.text # read in the text from the file
sentences = readText.split('.') # split the text into sentences
for sentence in sentences: # for each sentence
sentence=sentence.lower().strip() # loewr case and strip
sentence=re.sub('[^a-z]', ' ', sentence) # replace all non-letter characters with a space
words = sentence.split(' ') # split to get the words in the sentence
for word in words: # for each word in the sentence
if word == '' or word in stopLex:
continue # ignore empty words and stopwords
else:
freq[word] = freq.get(word, 0) + 1 # update the frequency of the word
wordList = set() # set to store all the unique words
for word in freq: # traversing through all keys in the dictionary
if freq[word1] < freq[word] and freq[word2] > freq[word]:
wordList.add(word) # adding word to the set
return wordList # return the set
if __name__=='__main__':
word1 = "park"
word2 = "amazon"
print(run('http://tedlappas.com/wp-content/uploads/2016/09/textfile.txt', word1, word2))
| 35.774194
| 175
| 0.626691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,092
| 0.492335
|
e3f2542b1e8fcfc1c962b23f153fdbfa31f29be1
| 4,487
|
py
|
Python
|
dev/archery/archery/integration/util.py
|
palmerlao/arrow
|
4e680c46ad5aa76ba1dc85574c4e96a51450364f
|
[
"Apache-2.0"
] | null | null | null |
dev/archery/archery/integration/util.py
|
palmerlao/arrow
|
4e680c46ad5aa76ba1dc85574c4e96a51450364f
|
[
"Apache-2.0"
] | 8
|
2020-04-10T19:03:51.000Z
|
2021-01-21T01:06:28.000Z
|
dev/archery/archery/integration/util.py
|
signavio/arrow
|
866e6a82e2794b151235c19b8c5cbf1fcaf780ef
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import io
import os
import socket
import string
import subprocess
import sys
import threading
import uuid
import numpy as np
def guid():
return uuid.uuid4().hex
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
# SKIP categories
SKIP_ARROW = 'arrow'
SKIP_FLIGHT = 'flight'
ARROW_ROOT_DEFAULT = os.environ.get(
'ARROW_ROOT',
os.path.abspath(__file__).rsplit("/", 5)[0]
)
class _Printer:
"""
A print()-providing object that can override the stream output on
a per-thread basis.
"""
def __init__(self):
self._tls = threading.local()
def _get_stdout(self):
try:
return self._tls.stdout
except AttributeError:
self._tls.stdout = sys.stdout
self._tls.corked = False
return self._tls.stdout
def print(self, *args, **kwargs):
"""
A variant of print() that writes to a thread-local stream.
"""
print(*args, file=self._get_stdout(), **kwargs)
@property
def stdout(self):
"""
A thread-local stdout wrapper that may be temporarily buffered
using `cork()`.
"""
return self._get_stdout()
@contextlib.contextmanager
def cork(self):
"""
Temporarily buffer this thread's stream and write out its contents
at the end of the context manager. Useful to avoid interleaved
output when multiple threads output progress information.
"""
outer_stdout = self._get_stdout()
assert not self._tls.corked, "reentrant call"
inner_stdout = self._tls.stdout = io.StringIO()
self._tls.corked = True
try:
yield
finally:
self._tls.stdout = outer_stdout
self._tls.corked = False
outer_stdout.write(inner_stdout.getvalue())
outer_stdout.flush()
printer = _Printer()
log = printer.print
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def tobytes(o):
if isinstance(o, str):
return o.encode('utf8')
return o
def frombytes(o):
if isinstance(o, bytes):
return o.decode('utf8')
return o
def run_cmd(cmd):
if isinstance(cmd, str):
cmd = cmd.split(' ')
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
sio = io.StringIO()
print('Command failed:', " ".join(cmd), file=sio)
print('With output:', file=sio)
print('--------------', file=sio)
print(frombytes(e.output), file=sio)
print('--------------', file=sio)
raise RuntimeError(sio.getvalue())
return frombytes(output)
# Adapted from CPython
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
"""
with socket.socket(family, socktype) as tempsock:
tempsock.bind(('', 0))
port = tempsock.getsockname()[1]
del tempsock
return port
| 28.398734
| 77
| 0.654558
| 1,478
| 0.329396
| 654
| 0.145754
| 869
| 0.193671
| 0
| 0
| 2,099
| 0.467796
|
e3f2b53a7343d04d14b8c9e8a2dd45c0ae9f242e
| 4,715
|
py
|
Python
|
python/src/cmdline/write_struct.py
|
hgmelectronics/xcpsetup
|
646d22537f58e59c3fe324da08c4dbe0d5881efa
|
[
"BSD-2-Clause"
] | null | null | null |
python/src/cmdline/write_struct.py
|
hgmelectronics/xcpsetup
|
646d22537f58e59c3fe324da08c4dbe0d5881efa
|
[
"BSD-2-Clause"
] | null | null | null |
python/src/cmdline/write_struct.py
|
hgmelectronics/xcpsetup
|
646d22537f58e59c3fe324da08c4dbe0d5881efa
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python3
import argparse
import ctypes
import json
import sys
if not '..' in sys.path:
sys.path.append('..')
from comm import BoardTypes
from comm import CANInterface
from comm import XCPConnection
from util import plugins
from util import ctypesdict
from util import config
import argProc
plugins.loadPlugins()
config.loadSysConfigs()
parser = argparse.ArgumentParser(description="writes data to a board using a preparsed C struct to define layout in memory")
parser.add_argument('-c', nargs='*', help='Extra configuration files to load', dest='configFiles', default=[])
parser.add_argument('-d', help="CAN device URI", dest="deviceURI", default=None)
parser.add_argument('-T', help="Target device type (ibem,cda,cs2) for automatic XCP ID selection", dest="targetType", default=None)
parser.add_argument('-i', help="Target ID or range of IDs (e.g. 2, 1-3, recovery) for automatic XCP ID selection", dest="targetID", default=None)
parser.add_argument('-l', help="Location of config structure in form <segment>:<baseaddr>", default="0:0", dest="structLocation")
parser.add_argument('-s', help="Pickled structure definition", dest="structSpec")
parser.add_argument('-D', help="Dump all XCP traffic, for debugging purposes", dest="dumpTraffic", action="store_true", default=False)
parser.add_argument('-r', help='Maximum times to retry read-modify-write operation', dest='maxAttempts', type=int, default=10)
parser.add_argument('inputFile', help="Input file name (if range of IDs specified must contain a {} to be replaced with the ID)", default=None)
args = parser.parse_args()
config.loadConfigs(args.configFiles)
BoardTypes.SetupBoardTypes()
try:
boardType = BoardTypes.types[args.targetType]
except KeyError:
print('Could not find board type ' + str(args.targetType))
sys.exit(1)
try:
ConfigType = argProc.GetStructType(args.structSpec)
structSegment,structBaseaddr = argProc.GetStructLocation(args.structLocation)
except argProc.ArgError as exc:
print(str(exc))
sys.exit(1)
def OpenInFile(name, idx):
if name == None:
return sys.stdin
else:
return open(name.format(idx), 'r')
with CANInterface.MakeInterface(args.deviceURI) as interface:
targetSlaves = boardType.SlaveListFromIdxArg(args.targetID)
if len(targetSlaves) == 0:
slaves = boardType.GetSlaves(interface)
for i in range(0, len(slaves)):
print(str(i) + ': ' + slaves[i][0].description() + ', ID ' + str(slaves[i][1]))
index = int(input('Slave: '))
if index >= len(slaves):
exit
targetSlaves = [slaves[index]]
for targetSlave in targetSlaves:
if targetSlave[1] != None:
print('Connecting to target addr ' + targetSlave[0].description() + ', ID ' + str(targetSlave[1]))
else:
print('Connecting to target addr ' + targetSlave[0].description())
for attempt in range(1, args.maxAttempts + 1):
try:
inFile = OpenInFile(args.inputFile, targetSlave[1])
inDict = json.loads(inFile.read())
inFile.close()
conn = boardType.Connect(interface, targetSlave, args.dumpTraffic)
# Read the existing data from the board - in case the dict we have loaded does not cover the entire struct
conn.set_cal_page(structSegment, 0)
dataBuffer = conn.upload(XCPConnection.Pointer(structBaseaddr, 0), ctypes.sizeof(ConfigType))
dataStruct = ConfigType.from_buffer_copy(dataBuffer)
# Set the data in the struct from the existing one
writeDataStruct = dataStruct
# Merge in data from the loaded dictionary
ctypesdict.setfromdict(writeDataStruct, inDict)
writeDataBuffer=bytes(memoryview(writeDataStruct))
# Write the new buffer to the board
conn.download(XCPConnection.Pointer(structBaseaddr, 0), writeDataBuffer)
conn.nvwrite()
try:
conn.close()
except XCPConnection.Error:
pass # swallow any errors when closing connection due to bad target implementations - we really don't care
print('Write OK')
writeOK = True
break
except XCPConnection.Error as err:
print('Write failure (' + str(err) + '), attempt #' + str(attempt))
writeOK = False
if not writeOK:
sys.exit(1)
| 42.477477
| 145
| 0.634783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,232
| 0.261294
|
e3f35c53bc5f2d93179fd278d659372e135f798d
| 2,383
|
py
|
Python
|
doc/python_api/examples/bpy.types.Depsgraph.1.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 365
|
2015-02-10T15:10:55.000Z
|
2022-03-03T15:50:51.000Z
|
doc/python_api/examples/bpy.types.Depsgraph.1.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 45
|
2015-01-09T15:34:20.000Z
|
2021-10-05T14:44:23.000Z
|
doc/python_api/examples/bpy.types.Depsgraph.1.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 172
|
2015-01-25T15:16:53.000Z
|
2022-01-31T08:25:36.000Z
|
"""
Dependency graph: Evaluated ID example
++++++++++++++++++++++++++++++++++++++
This example demonstrates access to the evaluated ID (such as object, material, etc.) state from
an original ID.
This is needed every time one needs to access state with animation, constraints, and modifiers
taken into account.
"""
import bpy
class OBJECT_OT_evaluated_example(bpy.types.Operator):
"""Access evaluated object state and do something with it"""
bl_label = "DEG Access Evaluated Object"
bl_idname = "object.evaluated_example"
def execute(self, context):
# This is an original object. Its data does not have any modifiers applied.
obj = context.object
if obj is None or obj.type != 'MESH':
self.report({'INFO'}, "No active mesh object to get info from")
return {'CANCELLED'}
# Evaluated object exists within a specific dependency graph.
# We will request evaluated object from the dependency graph which corresponds to the
# current scene and view layer.
#
# NOTE: This call ensure the dependency graph is fully evaluated. This might be expensive
# if changes were made made to the scene, but is needed to ensure no dangling or incorrect
# pointers are exposed.
depsgraph = context.evaluated_depsgraph_get()
# Actually request evaluated object.
#
# This object has animation and drivers applied on it, together with constraints and
# modifiers.
#
# For mesh objects the object.data will be a mesh with all modifiers applied.
# This means that in access to vertices or faces after modifier stack happens via fields of
# object_eval.object.
#
# For other types of objects the object_eval.data does not have modifiers applied on it,
# but has animation applied.
#
# NOTE: All ID types have `evaluated_get()`, including materials, node trees, worlds.
object_eval = obj.evaluated_get(depsgraph)
mesh_eval = object_eval.data
self.report({'INFO'}, f"Number of evaluated vertices: {len(mesh_eval.vertices)}")
return {'FINISHED'}
def register():
bpy.utils.register_class(OBJECT_OT_evaluated_example)
def unregister():
bpy.utils.unregister_class(OBJECT_OT_evaluated_example)
if __name__ == "__main__":
register()
| 39.065574
| 99
| 0.675619
| 1,854
| 0.778011
| 0
| 0
| 0
| 0
| 0
| 0
| 1,557
| 0.653378
|
e3f5aaf3ddf858989f83bcba1743ef73978162e1
| 2,411
|
py
|
Python
|
upgrade-insecure-requests/support/generate.py
|
Thezone1975/wpt
|
9e201113cf36aefe07fe9c14caa47705d541e141
|
[
"BSD-3-Clause"
] | 1
|
2019-09-10T22:45:24.000Z
|
2019-09-10T22:45:24.000Z
|
upgrade-insecure-requests/support/generate.py
|
Thezone1975/wpt
|
9e201113cf36aefe07fe9c14caa47705d541e141
|
[
"BSD-3-Clause"
] | 3
|
2017-10-06T15:45:34.000Z
|
2018-09-11T12:49:57.000Z
|
upgrade-insecure-requests/support/generate.py
|
Thezone1975/wpt
|
9e201113cf36aefe07fe9c14caa47705d541e141
|
[
"BSD-3-Clause"
] | null | null | null |
# Usage: execute
# $ python support/generate.py
# at wpt/upgrade-insecure-requests/.
#
# Note: Some tests (link-upgrade.sub.https.html and
# websocket-upgrade.https.html) are not covered by this generator script.
template = '''<!DOCTYPE html>
<html>
<head>
<!-- Generated by wpt/upgrade-insecure-requests/support/generate.py -->%(additionalMeta)s
<title>Upgrade Insecure Requests: %(name)s.</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="./support/testharness-helper.sub.js"></script>
<script src="/common/security-features/resources/common.sub.js"></script>
<meta http-equiv="Content-Security-Policy" content="upgrade-insecure-requests">
</head>
<body>
<script>
const tests = %(generatorName)s(ResourceType.%(resourceType)s, %(sameOriginOnly)s);
tests.forEach(test => testMap['%(name)s'](test));
</script>
</body>
</html>
'''
def getLong(file):
testsThatNeedMoreTime = [ "worker-subresource-fetch-redirect-upgrade.https.html" ]
if any(file in item for item in testsThatNeedMoreTime ):
return '\n<meta name="timeout" content="long">'
return ""
# resourceType is |ResourceType| in testharness-helper.sub.js.
for name, resourceType in [
('image', 'IMAGE'), ('iframe', 'FRAME'),
('animation-worklet', 'WORKLET'), ('audio-worklet', 'WORKLET'),
('layout-worklet', 'WORKLET'), ('paint-worklet', 'WORKLET'),
('worker', 'WORKER'),
('module-worker', 'WORKER'),
('worker-subresource-xhr', 'FETCH'),
('worker-subresource-fetch', 'FETCH'),
('shared-worker', 'SHARED_WORKER')]:
# TODO(https://crbug.com/989399): Add tests for subresource requests on shared
# workers, and main/subresource requests on service workers.
sameOriginOnly = 'false'
if resourceType == 'WORKER' or resourceType == 'SHARED_WORKER':
sameOriginOnly = 'true'
types = [('', 'generateTests'), ('-redirect', 'generateRedirectTests')]
if name == 'module-worker' or resourceType == 'WORKLET':
types.append(('-import', 'generateModuleImportTests'))
for typeName, generatorName in types:
filename = '%s%s-upgrade.https.html' % (name, typeName)
with open(filename, 'w') as html_file:
html_file.write(template % {
'name': name,
'additionalMeta': getLong(filename),
'resourceType': resourceType,
'generatorName': generatorName,
'sameOriginOnly': sameOriginOnly})
| 37.092308
| 89
| 0.690585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,691
| 0.701369
|
e3f5cd033fa43c92ae4a7eb4ce55f52eab4be962
| 424
|
py
|
Python
|
model_constructor/mxresnet.py
|
ayasyrev/model_constructor
|
3759a02dd9f7aa1ca3e6a4a5aefe72380886207e
|
[
"Apache-2.0"
] | 3
|
2020-08-02T09:18:27.000Z
|
2021-12-22T07:43:37.000Z
|
model_constructor/mxresnet.py
|
ayasyrev/model_constructor
|
3759a02dd9f7aa1ca3e6a4a5aefe72380886207e
|
[
"Apache-2.0"
] | 16
|
2020-11-09T11:35:13.000Z
|
2021-12-23T13:04:54.000Z
|
model_constructor/mxresnet.py
|
ayasyrev/model_constructor
|
3759a02dd9f7aa1ca3e6a4a5aefe72380886207e
|
[
"Apache-2.0"
] | 2
|
2020-04-08T20:56:48.000Z
|
2021-01-20T13:37:52.000Z
|
from functools import partial
from .activations import Mish
from .net import Net
__all__ = ['mxresnet_parameters', 'mxresnet34', 'mxresnet50']
mxresnet_parameters = {'stem_sizes': [3, 32, 64, 64], 'act_fn': Mish()}
mxresnet34 = partial(Net, name='MXResnet32', expansion=1, layers=[3, 4, 6, 3], **mxresnet_parameters)
mxresnet50 = partial(Net, name='MXResnet50', expansion=4, layers=[3, 4, 6, 3], **mxresnet_parameters)
| 32.615385
| 101
| 0.71934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.209906
|
e3f60625a8143b4d147e2f952742a97ef41ee31f
| 1,242
|
py
|
Python
|
1.undersampling.py
|
Moons08/TalkingData-Fraud-Detection
|
c88fb8b5358f6057603b7725ed2767fab47c51c6
|
[
"MIT"
] | 1
|
2019-01-18T06:20:54.000Z
|
2019-01-18T06:20:54.000Z
|
1.undersampling.py
|
Moons08/LightGBM-tutorial-Fraud_Detection
|
c88fb8b5358f6057603b7725ed2767fab47c51c6
|
[
"MIT"
] | null | null | null |
1.undersampling.py
|
Moons08/LightGBM-tutorial-Fraud_Detection
|
c88fb8b5358f6057603b7725ed2767fab47c51c6
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
from imblearn.under_sampling import RandomUnderSampler
from contextlib import contextmanager
import psutil
import time
@contextmanager
def timer_memory(name):
t0 = time.time()
yield
print(
f'Memory: {(psutil.Process(os.getpid()).memory_info().rss/2**30):.02f}GB')
print(f'{name} done in {time.time()-t0:.0f}s')
print('=====================================================')
def under_sampling():
base = pd.read_csv('./data/edited.csv', chunksize=2000000)
for idx, df in enumerate(base):
y = df['is_attributed']
X = df.drop('is_attributed', axis=1)
X0, y0 = RandomUnderSampler(random_state=34).fit_sample(X, y)
X = pd.DataFrame(data=X0, columns=X.columns)
y = pd.Series(y0, name='is_attributed')
del X0, y0
df = X.join(y)
if not os.path.isfile('./data/undersampled.csv'):
df.to_csv('./data/undersampled.csv',
header=df.columns, index=False)
else:
df.to_csv('./data/undersampled.csv',
mode='a', header=False, index=False)
print(idx, "th under sampling done!")
with timer_memory('undersampling'):
under_sampling()
| 26.425532
| 82
| 0.587762
| 0
| 0
| 266
| 0.214171
| 282
| 0.227053
| 0
| 0
| 349
| 0.280998
|
e3f69e5b14024599fb273e979ccbc45a1c411ded
| 8,652
|
py
|
Python
|
spydrnet/plugins/namespace_manager/tests/test_edif_namespace.py
|
ganeshgore/spydrnet
|
22672b8fc7d63461a71077bd20f29df6d38e96f4
|
[
"BSD-3-Clause"
] | 34
|
2020-03-12T15:40:49.000Z
|
2022-02-28T07:13:47.000Z
|
spydrnet/plugins/namespace_manager/tests/test_edif_namespace.py
|
ganeshgore/spydrnet
|
22672b8fc7d63461a71077bd20f29df6d38e96f4
|
[
"BSD-3-Clause"
] | 104
|
2020-01-06T20:32:19.000Z
|
2022-01-02T00:20:14.000Z
|
spydrnet/plugins/namespace_manager/tests/test_edif_namespace.py
|
ganeshgore/spydrnet
|
22672b8fc7d63461a71077bd20f29df6d38e96f4
|
[
"BSD-3-Clause"
] | 10
|
2020-09-02T20:24:00.000Z
|
2022-02-24T16:10:07.000Z
|
import unittest
import spydrnet as sdn
class TestEdifNamespace(unittest.TestCase):
original_default = None
@classmethod
def setUpClass(cls) -> None:
cls.original_default = sdn.namespace_manager.default
sdn.namespace_manager.default = "EDIF"
@classmethod
def tearDownClass(cls) -> None:
sdn.namespace_manager.default = cls.original_default
def gen_netlist(self):
netlist = sdn.Netlist()
return netlist
def gen_library(self):
netlist = self.gen_netlist()
lib = netlist.create_library()
return lib
def gen_definition(self):
lib = self.gen_library()
defin = lib.create_definition()
return defin
def test_basic_setup(self):
netlist = self.gen_netlist()
lib1 = netlist.create_library()
lib2 = netlist.create_library()
lib1['EDIF.identifier'] = "my_lib1"
lib2['EDIF.identifier'] = "my_lib2"
def1 = lib1.create_definition()
def1['EDIF.identifier'] = "d1"
def2 = lib2.create_definition()
def2['EDIF.identifier'] = "d1"
def3 = lib1.create_definition()
def3['EDIF.identifier'] = "my_lib1"
c1 = def1.create_cable()
p1 = def1.create_port()
i1 = def1.create_child()
c2 = def1.create_cable()
p2 = def1.create_port()
i2 = def1.create_child()
c1['EDIF.identifier'] = "&1"
i1['EDIF.identifier'] = "&1"
p1['EDIF.identifier'] = "&1"
c2['EDIF.identifier'] = "&2"
i2['EDIF.identifier'] = "&2"
p2['EDIF.identifier'] = "&2"
def test_dont_track_orphaned(self):
netlist = self.gen_netlist()
lib1 = sdn.Library()
lib2 = sdn.Library()
lib1['EDIF.identifier'] = "my_lib1"
lib2['EDIF.identifier'] = "my_lib1"
@unittest.expectedFailure
def test_duplicate_library_name(self):
netlist = self.gen_netlist()
lib1 = netlist.create_library()
lib2 = netlist.create_library()
lib1['EDIF.identifier'] = "my_lib"
lib2['EDIF.identifier'] = "my_lib"
@unittest.expectedFailure
def test_duplicate_definition_name(self):
lib1 = self.gen_library()
def1 = lib1.create_definition()
def2 = lib1.create_definition()
def1['EDIF.identifier'] = "my_lib"
def2['EDIF.identifier'] = "my_lib"
def test_duplicate_definition_elements(self):
def1 = self.gen_definition()
port = def1.create_port()
instance = def1.create_child()
cable = def1.create_cable()
port['EDIF.identifier'] = "my_lib"
instance['EDIF.identifier'] = "my_lib"
cable['EDIF.identifier'] = "my_lib"
@unittest.expectedFailure
def test_duplicate_definition_ports(self):
def1 = self.gen_definition()
port = def1.create_port()
port2 = def1.create_port()
port['EDIF.identifier'] = "my_lib"
port2['EDIF.identifier'] = "my_lib"
@unittest.expectedFailure
def test_duplicate_definition_cables(self):
def1 = self.gen_definition()
cable = def1.create_cable()
cable2 = def1.create_cable()
cable['EDIF.identifier'] = "my_lib"
cable2['EDIF.identifier'] = "my_lib"
@unittest.expectedFailure
def test_duplicate_definition_children(self):
def1 = self.gen_definition()
instance = def1.create_child()
instance2 = def1.create_child()
instance['EDIF.identifier'] = "my_lib"
instance2['EDIF.identifier'] = "my_lib"
def test_rename(self):
netlist = self.gen_netlist()
lib1 = netlist.create_library()
lib1['EDIF.identifier'] = "my_lib1"
lib1['EDIF.identifier'] = "my_lib2"
lib1['EDIF.identifier'] = "my_lib1"
lib2 = netlist.create_library()
lib2['EDIF.identifier'] = "my_lib2"
def1 = lib1.create_definition()
def1['EDIF.identifier'] = "my_lib1"
def1['EDIF.identifier'] = "my_lib2"
def1['EDIF.identifier'] = "my_lib1"
def2 = lib1.create_definition()
def2['EDIF.identifier'] = "my_lib2"
c = def1.create_cable()
c['EDIF.identifier'] = "&1"
c['EDIF.identifier'] = "&2"
c['EDIF.identifier'] = "&1"
p = def1.create_port()
p['EDIF.identifier'] = "&1"
p['EDIF.identifier'] = "&2"
p['EDIF.identifier'] = "&1"
i = def1.create_child()
i['EDIF.identifier'] = "&1"
i['EDIF.identifier'] = "&2"
i['EDIF.identifier'] = "&1"
def test_remove(self):
netlist = self.gen_netlist()
lib1 = netlist.create_library()
lib1['EDIF.identifier'] = "my_lib1"
netlist.remove_library(lib1)
lib2 = netlist.create_library()
lib2['EDIF.identifier'] = "my_lib1"
def1 = lib2.create_definition()
def1['EDIF.identifier'] = "my_lib1"
lib2.remove_definition(def1)
def2 = lib2.create_definition()
def2['EDIF.identifier'] = "my_lib1"
c1 = def2.create_cable()
c2 = def2.create_cable()
p1 = def2.create_port()
p2 = def2.create_port()
i1 = def2.create_child()
i2 = def2.create_child()
c1['EDIF.identifier'] = "&1"
def2.remove_cable(c1)
c2['EDIF.identifier'] = "&1"
p1['EDIF.identifier'] = "&1"
def2.remove_port(p1)
p2['EDIF.identifier'] = "&1"
i1['EDIF.identifier'] = "&1"
def2.remove_child(i1)
i2['EDIF.identifier'] = "&1"
def test_orphaned_add(self):
netlist = self.gen_netlist()
lib1 = sdn.Library()
lib1["EDIF.identifier"] = '&1'
netlist.add_library(lib1)
@unittest.expectedFailure
def test_orphaned_add_collision(self):
netlist = self.gen_netlist()
lib1 = sdn.Library()
lib1["EDIF.identifier"] = '&1'
netlist.add_library(lib1)
lib2 = sdn.Library()
lib2["EDIF.identifier"] = '&1'
netlist.add_library(lib2)
def test_remove_twice_library(self):
netlist = self.gen_netlist()
lib1 = netlist.create_library()
lib1['EDIF.identifier'] = "my_lib1"
netlist.remove_library(lib1)
self.assertRaises(Exception, netlist.remove_library, lib1)
def test_remove_twice_definition(self):
lib = self.gen_library()
d1 = lib.create_definition()
d1['EDIF.identifier'] = "&1"
lib.remove_definition(d1)
self.assertRaises(Exception, lib.remove_definition, d1)
def test_remove_untracked(self):
netlist = self.gen_netlist()
lib1 = netlist.create_library()
def1 = lib1.create_definition()
c1 = def1.create_cable()
p1 = def1.create_port()
i1 = def1.create_child()
def1.remove_cable(c1)
def1.remove_child(i1)
def1.remove_port(p1)
lib1.remove_definition(def1)
netlist.remove_library(lib1)
def test_remove_tracked(self):
netlist = self.gen_netlist()
lib1 = netlist.create_library()
lib1["EDIF.identifier"] = "test"
def1 = lib1.create_definition()
def1["EDIF.identifier"] = "test"
c1 = def1.create_cable()
c1["EDIF.identifier"] = "test"
p1 = def1.create_port()
p1["EDIF.identifier"] = "test"
i1 = def1.create_child()
i1["EDIF.identifier"] = "test"
def1.remove_cable(c1)
def1.remove_child(i1)
def1.remove_port(p1)
lib1.remove_definition(def1)
netlist.remove_library(lib1)
def test_pop_name(self):
netlist = self.gen_netlist()
lib1 = netlist.create_library()
lib1['EDIF.identifier'] = "my_lib1"
lib1.pop('EDIF.identifier')
lib2 = netlist.create_library()
lib2['EDIF.identifier'] = "my_lib1"
def1 = lib2.create_definition()
def1['EDIF.identifier'] = "my_lib1"
def1.pop('EDIF.identifier')
def2 = lib2.create_definition()
def2['EDIF.identifier'] = "my_lib1"
c1 = def2.create_cable()
c2 = def2.create_cable()
p1 = def2.create_port()
p2 = def2.create_port()
i1 = def2.create_child()
i2 = def2.create_child()
c1['EDIF.identifier'] = "&1"
c1.pop('EDIF.identifier')
c2['EDIF.identifier'] = "&1"
p1['EDIF.identifier'] = "&1"
p1.pop('EDIF.identifier')
p2['EDIF.identifier'] = "&1"
i1['EDIF.identifier'] = "&1"
i1.pop('EDIF.identifier')
i2['EDIF.identifier'] = "&1"
# TODO: rename an object
# TODO: orphan an object and see what happens
| 33.66537
| 66
| 0.592811
| 8,610
| 0.995146
| 0
| 0
| 1,936
| 0.223763
| 0
| 0
| 1,865
| 0.215557
|
e3f744a34f5cd637c13b66b21c7bdf2144d67708
| 3,344
|
py
|
Python
|
tf_agents/benchmark/distribution_strategy_utils.py
|
FlorisHoogenboom/agents
|
2cd5a61e1838b52012271f1fb8617c29a55279a9
|
[
"Apache-2.0"
] | 16
|
2020-09-23T06:21:49.000Z
|
2022-03-28T05:45:04.000Z
|
tf_agents/benchmark/distribution_strategy_utils.py
|
FlorisHoogenboom/agents
|
2cd5a61e1838b52012271f1fb8617c29a55279a9
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/benchmark/distribution_strategy_utils.py
|
FlorisHoogenboom/agents
|
2cd5a61e1838b52012271f1fb8617c29a55279a9
|
[
"Apache-2.0"
] | 6
|
2020-10-09T06:33:23.000Z
|
2022-02-03T16:16:36.000Z
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Helper functions for running models in a distributed setting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
def get_distribution_strategy(distribution_strategy="default",
num_gpus=0,
num_packs=-1):
"""Return a DistributionStrategy for running the model.
Args:
distribution_strategy: a string specifying which distribution strategy to
use. Accepted values are 'off', 'default', 'one_device', and 'mirrored'
case insensitive. 'off' means not to use Distribution Strategy; 'default'
means to choose from `MirroredStrategy`or `OneDeviceStrategy` according to
the number of GPUs.
num_gpus: Number of GPUs to run this model.
num_packs: Optional. Sets the `num_packs` in `tf.distribute.NcclAllReduce`.
Returns:
tf.distribute.DistibutionStrategy object.
Raises:
ValueError: if `distribution_strategy` is 'off' or 'one_device' and
`num_gpus` is larger than 1; or `num_gpus` is negative.
"""
if num_gpus < 0:
raise ValueError("`num_gpus` can not be negative.")
distribution_strategy = distribution_strategy.lower()
if distribution_strategy == "off":
if num_gpus > 1:
raise ValueError("When {} GPUs are specified, distribution_strategy "
"cannot be set to 'off'.".format(num_gpus))
return None
if (distribution_strategy == "one_device" or
(distribution_strategy == "default" and num_gpus <= 1)):
if num_gpus == 0:
return tf.distribute.OneDeviceStrategy("device:CPU:0")
else:
if num_gpus > 1:
raise ValueError("`OneDeviceStrategy` can not be used for more than "
"one device.")
return tf.distribute.OneDeviceStrategy("device:GPU:0")
if distribution_strategy in ("mirrored", "default"):
if num_gpus == 0:
assert distribution_strategy == "mirrored"
devices = ["device:CPU:0"]
else:
devices = ["device:GPU:%d" % i for i in range(num_gpus)]
cross_device_ops = None
if num_packs > -1:
cross_device_ops = tf.distribute.NcclAllReduce(num_packs=num_packs)
return tf.distribute.MirroredStrategy(devices=devices,
cross_device_ops=cross_device_ops)
def strategy_scope_context(strategy):
if strategy:
strategy_scope = strategy.scope()
else:
strategy_scope = DummyContextManager()
return strategy_scope
class DummyContextManager(object):
def __enter__(self):
pass
def __exit__(self, *args):
pass
| 34.474227
| 80
| 0.696172
| 106
| 0.031699
| 0
| 0
| 0
| 0
| 0
| 0
| 1,775
| 0.530801
|
e3f85ec084254dfe08068ef5fd90d188baae09d8
| 72
|
py
|
Python
|
barbarism.py
|
Matimed/Barbarism
|
4892092f24f314bc6cfacc1c780436dc59fc90ac
|
[
"MIT"
] | 2
|
2021-09-09T14:03:40.000Z
|
2021-11-03T03:35:55.000Z
|
barbarism.py
|
Matimed/Barbarism
|
4892092f24f314bc6cfacc1c780436dc59fc90ac
|
[
"MIT"
] | null | null | null |
barbarism.py
|
Matimed/Barbarism
|
4892092f24f314bc6cfacc1c780436dc59fc90ac
|
[
"MIT"
] | null | null | null |
import pygame as pg
pg.init()
from src.main import Main
main = Main()
| 10.285714
| 25
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e3f8f7b3257c5bd12d8d3490e725fe8a7a51ecb9
| 388
|
py
|
Python
|
frappe/patches/v7_0/desktop_icons_hidden_by_admin_as_blocked.py
|
anandpdoshi/frappe
|
b3546f1ebcac3480eccf5d19371ca534e7ce79bb
|
[
"MIT"
] | null | null | null |
frappe/patches/v7_0/desktop_icons_hidden_by_admin_as_blocked.py
|
anandpdoshi/frappe
|
b3546f1ebcac3480eccf5d19371ca534e7ce79bb
|
[
"MIT"
] | null | null | null |
frappe/patches/v7_0/desktop_icons_hidden_by_admin_as_blocked.py
|
anandpdoshi/frappe
|
b3546f1ebcac3480eccf5d19371ca534e7ce79bb
|
[
"MIT"
] | 5
|
2016-06-20T08:48:11.000Z
|
2018-12-12T09:42:31.000Z
|
import frappe
def execute():
# all icons hidden in standard are "blocked"
# this is for the use case where the admin wants to remove icon for everyone
# in 7.0, icons may be hidden by default, but still can be shown to the user
# e.g. Accounts, Stock etc, so we need a new property for blocked
frappe.db.sql('update `tabDesktop Icon` set blocked = 1 where standard=1 and hidden=1')
| 43.111111
| 88
| 0.737113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 333
| 0.858247
|
e3f9d1e7fbd73db26f8548fce222535435572494
| 3,985
|
py
|
Python
|
gen_mirror_json.py
|
Ashwin4RC/api
|
e6fc38b5ef8510ab4a11cb492fe49b9ed2cbcc58
|
[
"Apache-2.0"
] | null | null | null |
gen_mirror_json.py
|
Ashwin4RC/api
|
e6fc38b5ef8510ab4a11cb492fe49b9ed2cbcc58
|
[
"Apache-2.0"
] | null | null | null |
gen_mirror_json.py
|
Ashwin4RC/api
|
e6fc38b5ef8510ab4a11cb492fe49b9ed2cbcc58
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# pylint: disable=missing-docstring,invalid-name,broad-except,cell-var-from-loop
import hashlib
import json
import os
import sys
from utils import get_date_from_zip, get_metadata_from_zip
ALLOWED_BUILDTYPES = ["Alpha", "Beta", "Official", "Gapps"]
ALLOWED_VERSIONS = ["9.0", "10"]
FILE_BASE: str = os.getenv("FILE_BASE", "/mnt/builds")
DEBUG = False
builds: dict = {}
zips: dict = {}
for file in [
os.path.join(dp, file) for dp, dn, fn in os.walk(FILE_BASE) for file in fn
]:
try:
if file.split(".")[-1] != "zip":
continue
zip_name = file.replace(FILE_BASE, "")
if zip_name.split(".")[0].split("-")[-1] == "img":
continue
version, buildtype, device, builddate = get_metadata_from_zip(zip_name)
if buildtype not in ALLOWED_BUILDTYPES:
if DEBUG:
print(
f"{zip_name} has a buildtype of {buildtype}, which is not allowed!",
file=sys.stderr,
)
continue
if version not in ALLOWED_VERSIONS:
if DEBUG:
print(
f"{zip_name} has a version of {version}, which is not allowed!",
file=sys.stderr,
)
continue
if device in zips:
for build in zips[device]:
if buildtype in zips[device]:
if builddate > get_date_from_zip(zips[device][buildtype]):
zips[device][buildtype] = zip_name
else:
raise Exception
else:
zips[device][buildtype] = zip_name
else:
zips[device] = {}
zips[device][buildtype] = zip_name
except Exception as e:
continue
for key, value in zips.items():
for device in value:
file = zips[key][device]
try:
filename = file.split("/")[-1]
if file[0] == "/":
file = file[1:]
file = os.path.join(FILE_BASE, file)
img_file = os.path.isfile(file.replace('.zip', '-img.zip'))
boot_img = os.path.isfile(file.replace('.zip', '-boot.img'))
sha256_file = file.replace(".zip", ".sha256")
version, buildtype, device, builddate = get_metadata_from_zip(file)
if os.path.isfile(sha256_file):
if DEBUG:
print(
f"SHA256 for {filename} already exists, skipping!",
file=sys.stderr,
)
else:
print(f"Hashing SHA256 for {filename}!", file=sys.stderr)
sha256 = hashlib.sha256()
with open(file, "rb") as f:
for buf in iter(lambda: f.read(128 * 1024), b""):
sha256.update(buf)
f = open(sha256_file, "w")
f.write(sha256.hexdigest())
f.close()
f = open(sha256_file, "r")
zip_sha256 = f.read()
f.close()
builds.setdefault(device, []).append(
{
"sha256": zip_sha256,
"size": os.path.getsize(file),
"date": "{}-{}-{}".format(
builddate[0:4], builddate[4:6], builddate[6:8]
),
"filename": filename,
"filepath": file.replace(filename, "").replace(FILE_BASE, ""),
"version": version,
"type": buildtype.lower(),
"fastboot_images": img_file,
"boot_image": boot_img,
}
)
except IndexError:
continue
# pylint: disable=consider-iterating-dictionary
for device in builds.keys():
builds[device] = sorted(builds[device], key=lambda x: x["date"])
print(json.dumps(builds, sort_keys=True, indent=4))
| 36.227273
| 88
| 0.496863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 617
| 0.154831
|
e3f9dc11cb81a8cb80e6cd940f8a035848122990
| 431
|
py
|
Python
|
button/take_screen.py
|
PitPietro/gpiozero-pyqt5
|
0384d34348841d193c025a1909d909d1bf772a7d
|
[
"MIT"
] | null | null | null |
button/take_screen.py
|
PitPietro/gpiozero-pyqt5
|
0384d34348841d193c025a1909d909d1bf772a7d
|
[
"MIT"
] | null | null | null |
button/take_screen.py
|
PitPietro/gpiozero-pyqt5
|
0384d34348841d193c025a1909d909d1bf772a7d
|
[
"MIT"
] | null | null | null |
import os
# from signal import pause
from gpiozero import Button
from datetime import datetime
def take_screen():
screen_btn = Button(2)
while True:
if screen_btn.is_pressed:
timestamp = datetime.now()
cmd = "scrot -u d 5 $n {}.png".format('screen_' + str(timestamp))
os.system(cmd)
#screen_btn.when_pressed=os.system(cmd)
#pause()
take_screen()
| 21.55
| 77
| 0.605568
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 106
| 0.24594
|
e3fb07a9be04e9aa4d5249fcb856df6a2aede22a
| 1,435
|
py
|
Python
|
year2020/day21.py
|
3schwartz/AdventOfCode
|
32f259c4e20c3c4834718411f1053b6a11f71c86
|
[
"MIT"
] | null | null | null |
year2020/day21.py
|
3schwartz/AdventOfCode
|
32f259c4e20c3c4834718411f1053b6a11f71c86
|
[
"MIT"
] | null | null | null |
year2020/day21.py
|
3schwartz/AdventOfCode
|
32f259c4e20c3c4834718411f1053b6a11f71c86
|
[
"MIT"
] | null | null | null |
import common
lines = common.get_lines('day21_data.txt')
food_dict = {}
all_ingredients = []
for line in lines:
ingredients, allergens = line.split(' (contains ')
allergens = allergens[:-1].split(', ')
ingredients = ingredients.split(' ')
all_ingredients.extend(ingredients)
for allergen in allergens:
if food_dict.get(allergen) is None:
food_dict[allergen] = set(ingredients)
else:
food_dict[allergen] = food_dict[allergen].intersection(set(ingredients))
safe_ingredients = set(all_ingredients) \
.difference(set(ingredient
for value in food_dict.values()
for ingredient in value))
print(f"Part 1: {sum(ingredient in safe_ingredients for ingredient in all_ingredients)}")
while any(len(ingredients) > 1 for ingredients in food_dict.values()):
for allergen, ingredients in food_dict.items():
if len(ingredients) == 1:
for inner_allergen, inner_ingredients in food_dict.items():
if allergen == inner_allergen:
continue
ingredient = next(iter(ingredients))
if ingredient in inner_ingredients:
food_dict[inner_allergen].remove(ingredient)
names = list(food_dict.keys())
names.sort()
print(f"Part 2: {','.join(next(iter(food_dict[allergen])) for allergen in names)}")
| 32.613636
| 90
| 0.627875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 194
| 0.135192
|
e3fb126e341fe57625eff17359d622708faa18e2
| 4,279
|
py
|
Python
|
src/model_evaluation.py
|
Littleote/Analisis_de_contrasenyes
|
3837153e82b9da0c6f8ed1c372103944f3acaca6
|
[
"MIT"
] | null | null | null |
src/model_evaluation.py
|
Littleote/Analisis_de_contrasenyes
|
3837153e82b9da0c6f8ed1c372103944f3acaca6
|
[
"MIT"
] | null | null | null |
src/model_evaluation.py
|
Littleote/Analisis_de_contrasenyes
|
3837153e82b9da0c6f8ed1c372103944f3acaca6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: david
"""
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import PrecisionRecallDisplay, RocCurveDisplay
class ModelEvaluation:
def evaluate(pipe, dades, objectiu, name, **evaluacio):
x = dades
y = objectiu
w = np.zeros(len(y))
pred = np.zeros(len(y))
classes = np.sort(np.unique(y))
for c in classes:
w[y==c] = 1 / sum(y==c)
kFolds = evaluacio.get('kFold', 5)
use_weights = evaluacio.get('class_weighted', True)
kf = KFold(n_splits=kFolds)
for ind_train, ind_test in kf.split(y):
x_t, y_t, w_t = x[ind_train], y[ind_train], w[ind_train]
x_cv = x[ind_test]
if use_weights:
pipe.fit(x_t, y_t, model__sample_weight=w_t)
else:
pipe.fit(x_t, y_t)
pred[ind_test] = pipe.predict(x_cv)
pred = pipe.predict(dades)
plots = evaluacio.get('plot', [])
if not type(plots) == list:
plots = [plots]
for plot in plots:
if plot == 'confusion':
cm = confusion_matrix(y, pred)
plt.subplots(figsize=(10, 6))
sns.heatmap(cm, annot = True, fmt = 'g')
plt.xlabel("Predit")
plt.ylabel("Real")
plt.title(f"Matriu de Confusió pel model {name}")
plt.show()
elif plot == 'percentage':
cm = confusion_matrix(y, pred, sample_weight=w)
plt.subplots(figsize=(10, 6))
sns.heatmap(cm, annot = True, fmt = 'g')
plt.xlabel("Predit")
plt.ylabel("Real")
plt.title(f"Matriu dels percentatges pel model {name}")
plt.show()
elif plot == 'AUC':
plt.figure(figsize=(15,10))
ax = plt.gca()
for c in classes:
yi = np.copy(y)
yi[yi!=c] = -1
yi[yi==c] = 1
predi = np.copy(pred)
predi[predi!=c] = -1
predi[predi==c] = 1
PrecisionRecallDisplay.from_predictions(yi, predi, sample_weight=w,\
ax=ax, name=f'Precision-recall curve of class {c}')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(loc="lower left")
plt.title('Precision-Recall Curve')
plt.show()
elif plot == 'ROC':
plt.figure(figsize=(15,10))
ax = plt.gca()
for c in classes:
yi = np.copy(y)
yi[yi!=c] = -1
yi[yi==c] = 1
predi = np.copy(pred)
predi[predi!=c] = -1
predi[predi==c] = 1
RocCurveDisplay.from_predictions(yi, predi, sample_weight=w,\
ax=ax, name=f'ROC curve of class {c}')
plt.xlabel('False Positive')
plt.ylabel('True Positive')
plt.legend(loc="lower right")
plt.title('ROC Curve')
plt.show()
else:
print(f'Plot for {plot} not implemented.')
scores = evaluacio.get('score', [])
if not type(plots) == list:
scores = [scores]
for score in scores:
if score == 'all':
print(classification_report(y, pred))
elif score == 'accuracy':
print(f'Accuracy = {sum(y==pred) / len(y)} : {sum(y==pred)}/{len(y)}')
print(f'Macro accuracy = {sum([sum(c==pred[y==c]) / sum(y==c) for c in classes]) / len(classes)}')
elif score == 'class accuracy':
for c in classes:
ind = y==c
print(f'Accuracy of class {c} = {sum(c==pred[ind]) / sum(ind)} : {sum(c==pred[ind])}/{sum(ind)}')
else:
print(f'Score for {score} not implemented.')
| 40.752381
| 117
| 0.475111
| 3,980
| 0.929907
| 0
| 0
| 0
| 0
| 0
| 0
| 751
| 0.175467
|
e3fb3094156efbfadeca185946c48f3c4d800789
| 1,632
|
py
|
Python
|
setup.py
|
zhuzhenping/hf_at_py
|
edbbefc7dd1d476ed7fd62ad9635888cfc5fcb44
|
[
"Apache-2.0"
] | 130
|
2017-03-10T02:01:38.000Z
|
2021-01-10T03:55:30.000Z
|
setup.py
|
zhuzhenping/hf_at_py
|
edbbefc7dd1d476ed7fd62ad9635888cfc5fcb44
|
[
"Apache-2.0"
] | 3
|
2018-11-30T00:07:50.000Z
|
2020-12-01T13:01:13.000Z
|
setup.py
|
zhuzhenping/hf_at_py
|
edbbefc7dd1d476ed7fd62ad9635888cfc5fcb44
|
[
"Apache-2.0"
] | 69
|
2017-04-01T13:57:21.000Z
|
2020-10-07T11:29:45.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/20 8:15
# @Author : HaiFeng
# @Email : 24918700@qq.com
from setuptools import setup
import os
this_directory = os.path.abspath(os.path.dirname(__file__))
# 读取文件内容
def read_file(filename):
with open(os.path.join(this_directory, filename), encoding='utf-8') as f:
desc = f.read()
return desc
# 获取依赖
def read_requirements(filename):
return [line.strip() for line in read_file(filename).splitlines()
if not line.startswith('#')]
long_description = read_file('readme.md')
long_description_content_type = 'text/markdown' # 指定包文档格式为markdown
# talib无需加入 os.system('pipreqs . --encoding=utf8 --force') # 生成 requirements.txt
setup(
name='hfpy', # 包名
python_requires='>=3.6.0', # python环境
version='0.2.2', # 包的版本
description="Hai Feng Future Trading Platform with SE", # 包简介,显示在PyPI
long_description=long_description, # 读取的Readme文档内容
long_description_content_type=long_description_content_type, # 指定包文档格式为markdown
author="HaiFeng", # 作者相关信息
author_email='haifengat@vip.qq.com',
url='https://github.com/haifengat/hf_at_py',
# 指定包信息,还可以用find_packages()函数
# packages=find_packages(),
packages=['hfpy'],
install_requires=read_requirements('requirements.txt'), # 指定需要安装的依赖
include_package_data=True,
license="MIT License",
platforms="any",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 29.142857
| 84
| 0.674632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 902
| 0.502787
|
e3fbc1eef6b6ab6c9a1ff3c81478fc53b610ea00
| 6,478
|
py
|
Python
|
web_console_v2/api/fedlearner_webconsole/scheduler/transaction.py
|
chen1i/fedlearner
|
981514dadbd0aa49ae87d185dd247d310e35605c
|
[
"Apache-2.0"
] | 5
|
2020-04-14T06:37:45.000Z
|
2021-04-26T15:58:01.000Z
|
web_console_v2/api/fedlearner_webconsole/scheduler/transaction.py
|
chen1i/fedlearner
|
981514dadbd0aa49ae87d185dd247d310e35605c
|
[
"Apache-2.0"
] | 1
|
2020-04-27T03:01:27.000Z
|
2020-04-27T03:01:27.000Z
|
web_console_v2/api/fedlearner_webconsole/scheduler/transaction.py
|
chen1i/fedlearner
|
981514dadbd0aa49ae87d185dd247d310e35605c
|
[
"Apache-2.0"
] | 13
|
2020-02-20T05:56:52.000Z
|
2020-06-08T07:11:25.000Z
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from fedlearner_webconsole.db import db
from fedlearner_webconsole.rpc.client import RpcClient
from fedlearner_webconsole.workflow.models import (
Workflow, WorkflowState, TransactionState, VALID_TRANSITIONS
)
from fedlearner_webconsole.proto import common_pb2
class TransactionManager(object):
def __init__(self, workflow_id):
self._workflow_id = workflow_id
self._workflow = Workflow.query.get(workflow_id)
assert self._workflow is not None
self._project = self._workflow.project
assert self._project is not None
@property
def workflow(self):
return self._workflow
@property
def project(self):
return self._project
def process(self):
# reload workflow and resolve -ing states
self._workflow.update_state(
self._workflow.state, self._workflow.target_state,
self._workflow.transaction_state)
self._reload()
if not self._recover_from_abort():
return self._workflow
if self._workflow.target_state == WorkflowState.INVALID:
return self._workflow
if self._workflow.state == WorkflowState.INVALID:
raise RuntimeError(
"Cannot process invalid workflow %s"%self._workflow.name)
assert (self._workflow.state, self._workflow.target_state) \
in VALID_TRANSITIONS
if self._workflow.transaction_state == TransactionState.READY:
# prepare self as coordinator
self._workflow.update_state(
self._workflow.state,
self._workflow.target_state,
TransactionState.COORDINATOR_PREPARE)
self._reload()
if self._workflow.transaction_state == \
TransactionState.COORDINATOR_COMMITTABLE:
# prepare self succeeded. Tell participants to prepare
states = self._broadcast_state(
self._workflow.state, self._workflow.target_state,
TransactionState.PARTICIPANT_PREPARE)
committable = True
for state in states:
if state != TransactionState.PARTICIPANT_COMMITTABLE:
committable = False
if state == TransactionState.ABORTED:
# abort as coordinator if some participants aborted
self._workflow.update_state(
None, None, TransactionState.COORDINATOR_ABORTING)
self._reload()
break
# commit as coordinator if participants all committable
if committable:
self._workflow.update_state(
None, None, TransactionState.COORDINATOR_COMMITTING)
self._reload()
if self._workflow.transaction_state == \
TransactionState.COORDINATOR_COMMITTING:
# committing as coordinator. tell participants to commit
if self._broadcast_state_and_check(
self._workflow.state, self._workflow.target_state,
TransactionState.PARTICIPANT_COMMITTING,
TransactionState.READY):
# all participants committed. finish.
self._workflow.commit()
self._reload()
self._recover_from_abort()
return self._workflow
def _reload(self):
db.session.commit()
db.session.refresh(self._workflow)
def _broadcast_state(
self, state, target_state, transaction_state):
project_config = self._project.get_config()
states = []
for party in project_config.participants:
client = RpcClient(project_config, party)
forked_from_uuid = Workflow.query.filter_by(
id=self._workflow.forked_from
).first().uuid if self._workflow.forked_from else None
resp = client.update_workflow_state(
self._workflow.name, state, target_state, transaction_state,
self._workflow.uuid,
forked_from_uuid)
if resp.status.code == common_pb2.STATUS_SUCCESS:
if resp.state == WorkflowState.INVALID:
self._workflow.invalidate()
self._reload()
raise RuntimeError("Peer workflow invalidated. Abort.")
states.append(TransactionState(resp.transaction_state))
else:
states.append(None)
return states
def _broadcast_state_and_check(self,
state, target_state, transaction_state, target_transaction_state):
states = self._broadcast_state(state, target_state, transaction_state)
for i in states:
if i != target_transaction_state:
return False
return True
def _recover_from_abort(self):
if self._workflow.transaction_state == \
TransactionState.COORDINATOR_ABORTING:
if not self._broadcast_state_and_check(
self._workflow.state, WorkflowState.INVALID,
TransactionState.PARTICIPANT_ABORTING,
TransactionState.ABORTED):
return False
self._workflow.update_state(
None, WorkflowState.INVALID, TransactionState.ABORTED)
self._reload()
if self._workflow.transaction_state != TransactionState.ABORTED:
return True
assert self._workflow.target_state == WorkflowState.INVALID
if not self._broadcast_state_and_check(
self._workflow.state, WorkflowState.INVALID,
TransactionState.READY, TransactionState.READY):
return False
self._workflow.update_state(None, None, TransactionState.READY)
self._reload()
return True
| 39.742331
| 78
| 0.636153
| 5,585
| 0.862149
| 0
| 0
| 124
| 0.019142
| 0
| 0
| 1,004
| 0.154986
|
e3fd5d581d1b57f36ef591f8271741509e6dd229
| 4,636
|
py
|
Python
|
src/openeo_grass_gis_driver/models/schema_base.py
|
marcjansen/openeo-grassgis-driver
|
57b309819fdc456fba02cd1ab8fe6731ddfbb66a
|
[
"Apache-2.0"
] | 7
|
2018-03-16T17:26:14.000Z
|
2022-03-09T08:19:10.000Z
|
src/openeo_grass_gis_driver/models/schema_base.py
|
marcjansen/openeo-grassgis-driver
|
57b309819fdc456fba02cd1ab8fe6731ddfbb66a
|
[
"Apache-2.0"
] | 70
|
2018-03-09T11:28:12.000Z
|
2022-02-17T09:06:17.000Z
|
src/openeo_grass_gis_driver/models/schema_base.py
|
marcjansen/openeo-grassgis-driver
|
57b309819fdc456fba02cd1ab8fe6731ddfbb66a
|
[
"Apache-2.0"
] | 13
|
2018-03-12T09:58:24.000Z
|
2022-02-23T10:40:11.000Z
|
# -*- coding: utf-8 -*-
import json
from typing import List, Optional
from flask import make_response
__author__ = "Sören Gebbert"
__copyright__ = "Copyright 2018, Sören Gebbert, mundialis"
__maintainer__ = "Sören Gebbert"
__email__ = "soerengebbert@googlemail.com"
def as_dict_without_nones(o):
d = o.__dict__
r = dict()
for key in d:
if d[key] is None:
continue
# allow nullable but required keys
value = d[key]
if value == "json:null":
value = None
elif value == "json:true":
value = True
elif value == "json:false":
value = False
# ___ is a placeholder for : as in eo:bands
r[key.replace("___", ":")] = value
return r
class JsonableObject:
"""This class is the base class for all openEO responses that serialises
the response classes into JSON"""
def to_json(self):
return json.dumps(
self,
default=lambda o: as_dict_without_nones(o),
sort_keys=False,
indent=2)
def as_response(self, http_status):
response = make_response(self.to_json(), http_status)
response.headers['Content-Type'] = 'application/json'
return response
class Link(JsonableObject):
"""A link to another resource on the web. Bases on RFC5899 and SHOULD
follow registered link relation types whenever feasible.
rel:
string
href:
required
string <url>
The value MUST be a dereferenceable URL.
type:
string
The value MUST be a string that hints at the format used to represent
data at the provided URI, preferably a media (MIME) type.
title:
string
Used as a human-readable label for a link.
"""
def __init__(self, href: str, title: Optional[str] = None,
rel: Optional[str] = None, type_: Optional[str] = None):
self.href = href
self.title = title
self.rel = rel
self.type = type_
class EoLink(JsonableObject):
"""link related to this collection.
rel:
string
href:
required
string <url>
The value MUST be a dereferenceable URL.
type:
string
The value MUST be a string that hints at the format used to represent data at
the provided URI, preferably a media (MIME) type.
title:
string
Used as a human-readable label for a link.
"""
def __init__(self, href: str, title: Optional[str] = None,
rel: Optional[str] = None, type_: Optional[str] = None):
self.href = href
self.title = title
self.rel = rel
self.type = type_
class EoLinks(JsonableObject):
"""Additional links related to this collection.
Could reference to other meta data formats
with additional information or a preview image.
links: A list of EoLink's
"""
def __init__(self, links: List[EoLink]):
self.links = links
class UDFLinks(JsonableObject):
"""Related links, e.g. additional external documentation for this runtime.
array of (link)
"""
def __init__(self, links: List[Link]):
self.links = links
class ListLinks(JsonableObject):
"""Additional links related to this list of resources.
Could reference to alternative formats such as a
rendered HTML version. The links could also be used for
pagination using the [rel types]
(https://www.iana.org/assignments/link-relations/link-relations.xhtml)
`first`, `prev`, `next` and `last`. Pagination is
currently OPTIONAL and clients may not support it.
Therefore it MUST be implemented in a way that clients
not supporting pagination get all resources regardless.
links: A list of EoLink's
"""
def __init__(self, links: List[EoLink]):
self.links = links
class File(JsonableObject):
""" Workspace File
path:
string
Path of the file, relative to the user's root directory. MUST NOT
start with a slash and MUST NOT be url-encoded.
example: "folder/file.txt"
size:
integer
File size in bytes.
example: 1024
modified:
string (date-time)
Date and time the file has lastly been modified, formatted as
a RFC 3339 date-time.
example: "2018-01-03T10:55:29Z"
"""
def __init__(
self,
path: str = None,
size: int = None,
modified: str = None):
self.path = path
self.size = size
self.modified = modified
| 24.659574
| 85
| 0.613891
| 3,857
| 0.831429
| 0
| 0
| 0
| 0
| 0
| 0
| 2,629
| 0.566717
|
e3fdd8b8cbc3926690972bd648e3656a84878e8f
| 1,457
|
py
|
Python
|
plugins/maya/inventory/action_update_namespace.py
|
davidlatwe/reveries-config
|
4a282dd64a32a9b87bd1a070759b6425ff785d68
|
[
"MIT"
] | 3
|
2020-04-01T10:51:17.000Z
|
2021-08-05T18:35:23.000Z
|
plugins/maya/inventory/action_update_namespace.py
|
davidlatwe/reveries-config
|
4a282dd64a32a9b87bd1a070759b6425ff785d68
|
[
"MIT"
] | null | null | null |
plugins/maya/inventory/action_update_namespace.py
|
davidlatwe/reveries-config
|
4a282dd64a32a9b87bd1a070759b6425ff785d68
|
[
"MIT"
] | 1
|
2020-07-05T12:06:30.000Z
|
2020-07-05T12:06:30.000Z
|
import avalon.api
class UpdateNamespace(avalon.api.InventoryAction):
"""Update container imprinted namespace
Sometimes artist may import loaded subsets from other scene, which
may prefixing an extra namespace on top of those subsets but the
namespace attribute in the container did not update hence actions
like version updating bump into errors.
This action will lookup subset group node's namespace, and update
the container if namespace not consistent.
"""
label = "Namespace Dirty"
icon = "wrench"
color = "#F13A3A"
order = -101
@staticmethod
def is_compatible(container):
from reveries.maya import lib
if not ("subsetGroup" in container and container["subsetGroup"]):
return False
if container["loader"] in ["USDSetdressLoader", "USDLayoutLoader"]:
return False
namespace = lib.get_ns(container["subsetGroup"])
return container["namespace"] != namespace
def process(self, containers):
from maya import cmds
from avalon.tools import sceneinventory
from reveries.maya import lib
for container in containers:
namespace = lib.get_ns(container["subsetGroup"])
con_node = container["objectName"]
cmds.setAttr(con_node + ".namespace", namespace, type="string")
container["namespace"] = namespace
sceneinventory.app.window.refresh()
| 29.734694
| 75
| 0.671929
| 1,435
| 0.9849
| 0
| 0
| 396
| 0.271791
| 0
| 0
| 605
| 0.415237
|
e3fff047d0d4657b650e98281fbe2b1e51ff6026
| 3,694
|
py
|
Python
|
src/outpost/django/salt/serializers.py
|
medunigraz/outpost.django.salt
|
bb8d3cefeaa8444ce15979689abdd93ed993304b
|
[
"BSD-2-Clause"
] | null | null | null |
src/outpost/django/salt/serializers.py
|
medunigraz/outpost.django.salt
|
bb8d3cefeaa8444ce15979689abdd93ed993304b
|
[
"BSD-2-Clause"
] | null | null | null |
src/outpost/django/salt/serializers.py
|
medunigraz/outpost.django.salt
|
bb8d3cefeaa8444ce15979689abdd93ed993304b
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import gpg
from rest_framework import serializers
from .conf import settings
from . import models
logger = logging.getLogger(__name__)
class PGPFileField(serializers.Field):
def to_representation(self, value):
with gpg.Context(armor=True) as c:
imp = c.key_import(settings.SALT_PUBLIC_KEY.encode("ascii"))
if not isinstance(imp, gpg.results.ImportResult):
logger.error("Could not import Saltstack public GPG key.")
return
keys = [c.get_key(k.fpr) for k in imp.imports]
crypt, result, _ = c.encrypt(
value.read(), keys, sign=False, always_trust=True
)
return crypt
class PublicKeySerializer(serializers.ModelSerializer):
fingerprint = serializers.CharField(read_only=True)
class Meta:
model = models.PublicKey
fields = ("fingerprint", "key", "openssh")
class GroupSerializer(serializers.ModelSerializer):
gid = serializers.IntegerField(source="pk")
class Meta:
model = models.Group
fields = ("gid", "name")
class SystemUserActiveFilterListSerializer(serializers.ListSerializer):
def to_representation(self, data):
return super().to_representation(data.filter(user__active=True))
class SystemUserSerializer(serializers.ModelSerializer):
uid = serializers.IntegerField(source="user.pk")
username = serializers.CharField(source="user.person.username")
displayname = serializers.CharField(source="user.displayname")
homedir = serializers.SerializerMethodField()
groups = GroupSerializer(many=True)
public_keys = PublicKeySerializer(source="user.publickey_set.all", many=True)
class Meta:
model = models.SystemUser
list_serializer_class = SystemUserActiveFilterListSerializer
fields = (
"uid",
"username",
"displayname",
"homedir",
"shell",
"groups",
"sudo",
"public_keys",
)
def get_homedir(self, o):
return o.system.home_template.format(username=o.user.person.username)
class SystemFileSerializer(serializers.ModelSerializer):
path = serializers.CharField(read_only=True)
owner = serializers.CharField(source="file.user.username", read_only=True)
permissions = serializers.CharField(source="file.permissions", read_only=True)
source = serializers.FileField(source="file.content", use_url=False, read_only=True)
# content = PGPFileField(source='file.content', read_only=True)
sha256 = serializers.CharField(source="file.sha256", read_only=True)
mimetype = serializers.CharField(source="file.mimetype", read_only=True)
class Meta:
model = models.SystemFile
fields = ("path", "owner", "permissions", "source", "sha256", "mimetype")
class SystemSerializer(serializers.ModelSerializer):
users = SystemUserSerializer(source="systemuser_set", many=True)
groups = GroupSerializer(source="group_set", many=True)
files = SystemFileSerializer(source="systemfile_set", many=True)
class Meta:
model = models.System
fields = ("name", "users", "groups", "files")
class HostSerializer(serializers.ModelSerializer):
system = SystemSerializer()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields.update(self.Meta.extensions)
class Meta:
model = models.Host
fields = ("name", "system")
extensions = dict()
class FileSerializer(serializers.ModelSerializer):
class Meta:
model = models.File
fields = ("path", "systems", "permissions")
| 32.403509
| 88
| 0.674066
| 3,514
| 0.951272
| 0
| 0
| 0
| 0
| 0
| 0
| 545
| 0.147537
|
e3fff64e6905d157f27dedffc36bcf8b6222a9c6
| 11,950
|
py
|
Python
|
demosauruswebapp/demosaurus/link_thesaurus.py
|
KBNLresearch/Demosaurus
|
9235e315d9eef9d8d64f94a90ab4fc8220670ef2
|
[
"Apache-2.0"
] | 1
|
2020-06-25T16:39:35.000Z
|
2020-06-25T16:39:35.000Z
|
demosauruswebapp/demosaurus/link_thesaurus.py
|
KBNLresearch/Demosaurus
|
9235e315d9eef9d8d64f94a90ab4fc8220670ef2
|
[
"Apache-2.0"
] | 6
|
2020-03-06T12:31:38.000Z
|
2021-09-20T15:08:17.000Z
|
demosauruswebapp/demosaurus/link_thesaurus.py
|
KBNLresearch/Demosaurus
|
9235e315d9eef9d8d64f94a90ab4fc8220670ef2
|
[
"Apache-2.0"
] | null | null | null |
from flask import (
Blueprint, flash, g, redirect, render_template, get_template_attribute, request, url_for, jsonify
)
#from ....dataprocessing import # dataprocessin .read_rdf import
from demosauruswebapp.demosaurus.db import get_db
import pandas as pd
from nltk.metrics import distance as nl_distance
import re
import numpy as np
from scipy.spatial import distance as spatial_distance
from scipy import stats
import json
import time
import unidecode
import string
bp = Blueprint('link_thesaurus', __name__)
punctuation_remover = str.maketrans(string.punctuation, ' '*len(string.punctuation)) #map punctuation to space
def normalize_name(name):
name = name.split('(')[0] # get only first bit (no life years, comments etc.)
name = unidecode.unidecode(name) # unicode normalization
name = name.lower() # lowercase
name = name.translate(punctuation_remover) # replace dots, apostrophes, etc. with whitespace
name = ' '.join(name.split()) # single space separation
return name
@bp.route('/thesaureer/')
def thesaureer():
author_name = request.args.get('contributor_name', '', type=str)
if not author_name:
author_options = pd.DataFrame() # Without name, cannot select candidates
else:
author_role = request.args.get('contributor_role', '', type=str)
publication_title = request.args.get('publication_title', '', type=str)
publication_genres = json.loads(request.args.get('publication_genres', '', type=str))
publication_year = {'jaar_van_uitgave': [request.args.get('publication_year', '', type=str)]}
author_options = thesaureer_this(author_name, author_role, publication_title, publication_genres, publication_year)
return author_options.to_json(orient='records')
def thesaureer_this(author_name, author_role, publication_title, publication_genres, publication_year):
db = get_db()
searchkey = '@' in author_name
if searchkey:
candidates = "WITH candidates AS (SELECT author_ppn FROM author_fts5 WHERE searchkey MATCH :searchkey)\n"
matcher = normalize_name(author_name.split('@')[-1].strip('"'))
else:
candidates = "WITH candidates AS (SELECT author_ppn FROM author_fts5 WHERE normalized_name MATCH :searchkey)\n"
matcher = normalize_name(author_name)
start = time.time()
author_options = pd.read_sql_query(candidates + """SELECT author_NTA.* FROM candidates
JOIN publication_contributors_train_NBD t2 ON t2.author_ppn = candidates.author_ppn -- only authors that we have training data for
JOIN author_NTA ON candidates.author_ppn = author_NTA.author_ppn
GROUP BY author_NTA.author_ppn;
""", params={'searchkey':'\"'+matcher+'\"'}, con = db)
print('Obtain candidates - time elapsed:', time.time()-start)
# Add scores to the candidates
if len(author_options)>0:
start = time.time()
author_options=pd.concat((author_options, author_options.apply(
lambda row: score_names(row, author_name), axis=1)), axis=1)
print('Score names - time elapsed:', time.time() - start)
author_options=pd.concat((author_options, author_options.apply(
lambda row: score_class_based(row['author_ppn'], publication_genres, 'genre'), axis=1)), axis=1)
#author_options = pd.concat((author_options, author_options.apply(
# lambda row: score_class_based(row['author_ppn'], publication_year, 'jvu'), axis=1)), axis=1)
author_options=pd.concat((author_options, author_options.apply(
lambda row: score_year(row['author_ppn'], publication_year), axis=1)), axis=1)
author_options = pd.concat((author_options, author_options.apply(
lambda row: score_style(None, None), axis=1)), axis=1)
author_options=pd.concat((author_options, author_options.apply(
lambda row: score_role(None,author_role), axis=1)), axis=1)
# Determine overall score for candidate: linear combination of scores, weighted by confidence
features = ['name','genre', 'jvu']
scores = [feature+'_score' for feature in features]
weights = [feature+'_confidence' for feature in features]
author_options['score']= author_options.apply(lambda row: np.average(row.loc[scores], weights=row.loc[weights]), axis=1)
# Sort candidates by score
author_options.sort_values(by='score', ascending=False, inplace=True)
return author_options
def normalized_levenshtein(s1,s2):
# normalized Levenshtein distance: normalize by the max of the lengths
l = float(max(len(s1), len(s2))) # normalize by length, high score wins
return (l - nl_distance.edit_distance(s1, s2)) / l
def score_names(authorshipItem, author_name):
# family name should be rather similar: check levenshtein distance and normalize by length
if '@' in author_name:
nameparts = author_name.split('@')
else:
nameparts = author_name.split()
family_name = nameparts[-1]
given_name = ' '.join(nameparts[:-1])
familyNameScore = normalized_levenshtein(authorshipItem['foaf_familyname'],family_name)
confidence = 1
firstNameScore = 1
try: # convert given name(s) to list
# an for author name, cn for candidate name
an,cn= [list(filter(None,re.split('\.|\s+', name))) for name in [authorshipItem['foaf_givenname'],given_name]]
firstNameScore *= 1 if len(an)==len(cn) else .8 # if number of given names differs, lower score
except: # no reliable first name(s)
an, cn = [[],[]]
firstNameScore=.5
confidence *= 0.5
for i in range(min(len(an),len(cn))):
if len(an[i])==1 or len(cn[i])==1: # Just initials: compare first letter only
firstNameScore *= 1 if an[i][0] == cn[i][0] else .5
confidence *= 0.8 # Gives less reliable score: confidence penalty
else:
firstNameScore *= normalized_levenshtein(an[i],cn[i])
return pd.Series([.5*familyNameScore+.5*firstNameScore, confidence], index = ['name_score', 'name_confidence'])
def obtain_similarity_data(author_ppn, features):
# obtain accumulated data for author
# from author views (see repo/data-processing/author_views.py)
#try:
query = ''
for i, feature_i in enumerate(features):
if i > 0: query += ' UNION '
query += 'SELECT '
for j, feature_j in enumerate(features):
if i == j:
query += 'term_identifier AS ' + feature_j + ','
else:
query += 'NULL AS ' + feature_j + ','
query += 'nPublications as knownPublications '
query += 'FROM ' + 'author_' + feature_i + '_NBD '
query += 'WHERE author_ppn = :author_ppn'
data = pd.read_sql_query(query, params={'author_ppn':author_ppn}, con = get_db())
#except e:
# print('PROBLEM', e)
#TODO: proper exception handling (return exception to caller!)
return data
def score_class_based(author_ppn, publication_classes, name):
"""
Determine score (0-1) and confidence (0-1) for an author given the publication and their known publications
Based on information in fields corresponding to items in publication_classes (e.g. genres, subjects, ...)
author_ppn: the pica identifier of the candidate author (string)
publication_classes: the information of the publication to be compared to
a dictionary of lists:
keys are class names that correspond to database information (e.g. "CBK_genre")
values are a list of identifiers that correspond to publication (e.g. ["330", "135", "322", "334"])
name: a string that indicates how to interpret the score (e.g. "genre")
"""
if sum([len(v) for k,v in publication_classes.items()]) == 0:
# Nothing to base score on. Return zero or something else?
score = 0
confidence = 0
else:
# Obtain a list of known publication counts from the database
known_info = obtain_similarity_data(author_ppn, publication_classes.keys())
if len(known_info) == 0:
# no information available to make a sane comparison
score = 0
confidence = 0
else:
# Add a column with the new publication to compare with
for c,l in publication_classes.items():
for v in l:
if type(v)== dict:
try: known_info.loc[known_info[c]==v['identifier'],'newPublication']=1
except: print('Cannot add publication info to dataframe for comparison')
else:
try: known_info.loc[known_info[c]==v,'newPublication']=1
except: print('Cannot add publication info to dataframe for comparison')
# score = 1- cosine distance between array of known publications and new publication
# intuition:
# if there are no overlapping genres, distance = 1 so score is 0
# if there is little overlap, the score is close to 0
# if the new publication is very similar to known publications, the score is close to 1
known_info = known_info.fillna(0)
try:
score = 1 - spatial_distance.cosine(known_info.knownPublications, known_info.newPublication)
assert not np.isnan(score)
known = known_info.knownPublications.sum()
confidence= known/(known+20) # need approx. 20 datapoints to make a somewhat reliable estimate (50% sure)
# Temporary fix to get some estimate on reliability
except:
#print('class based score is undefined for', author_ppn, publication_classes)
score = 0
confidence = 0
return pd.Series([score, confidence], index = [name+'_score', name+'_confidence'])
def score_style(author_record, author_context):
#score=max(min(np.random.normal(0.5,0.1),1),0)
#confidence=max(min(np.random.normal(0.4, 0.1),0.9),0.1)
score = 0
confidence = 0
return pd.Series([score, confidence], index = ['style_score', 'style_confidence'])
def score_role(author_record, author_context):
if not author_context or not author_record :
score = 0
confidence = 0
else:
score = 0
confidence = 0
# score=max(min(np.random.normal(0.7, 0.1),1),0)
# confidence=max(min(np.random.normal(0.4, 0.1),0.9),0.1)
return pd.Series([score, confidence], index = ['role_score', 'role_confidence'])
def score_year(author_ppn, publication_year):
try:
year = int (publication_year['jaar_van_uitgave'][0])
known_info = obtain_similarity_data(author_ppn, publication_year)
except:
known_info = pd.DataFrame([])
if len(known_info) == 0:
# no information available to make a sane comparison
score = 0
confidence = 0
else:
# fit a normal distribution to the data points
mu, sigma = stats.norm.fit(np.repeat(known_info.jaar_van_uitgave, known_info.knownPublications))
sigma = max(sigma, 5) # sigma should be at least 5: publications are still likely (70%) 5 years from any known publication
top = stats.norm.pdf(mu, mu, sigma) # determine top
score = stats.norm.pdf(year, mu, sigma)/top # normalize by top: we want a score of 1 for the mean
# estimate confidence:
known = known_info.knownPublications.sum()
confidence= known/(known+20) # need approx. 20 datapoints to make a somewhat reliable estimate (50% sure)
return pd.Series([score, confidence], index=['jvu_score', 'jvu_confidence'])
| 49.585062
| 139
| 0.647699
| 0
| 0
| 0
| 0
| 752
| 0.062929
| 0
| 0
| 4,701
| 0.393389
|
5401d3f8943311c53015fddf7d9a9c7b00d0c8d8
| 6,784
|
py
|
Python
|
solver.py
|
IvoryCandy/char-rnn
|
a21f3b198770c6c9bef0171bf31b2a1710066da8
|
[
"Apache-2.0"
] | null | null | null |
solver.py
|
IvoryCandy/char-rnn
|
a21f3b198770c6c9bef0171bf31b2a1710066da8
|
[
"Apache-2.0"
] | null | null | null |
solver.py
|
IvoryCandy/char-rnn
|
a21f3b198770c6c9bef0171bf31b2a1710066da8
|
[
"Apache-2.0"
] | null | null | null |
import math
import numpy as np
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import CharRNN
from data import TextDataset, TextConverter
class Trainer(object):
def __init__(self, args):
self.args = args
self.device = torch.device('cuda' if self.args.cuda else 'cpu')
self.convert = None
self.model = None
self.optimizer = None
self.criterion = self.get_loss
self.meter = AverageValueMeter()
self.train_loader = None
self.get_data()
self.get_model()
self.get_optimizer()
def get_data(self):
self.convert = TextConverter(self.args.txt, max_vocab=self.args.max_vocab)
dataset = TextDataset(self.args.txt, self.args.len, self.convert.text_to_arr)
self.train_loader = DataLoader(dataset, self.args.batch_size, shuffle=True, num_workers=self.args.num_workers)
def get_model(self):
self.model = CharRNN(self.convert.vocab_size, self.args.embed_dim, self.args.hidden_size, self.args.num_layers,
self.args.dropout, self.args.cuda).to(self.device)
if self.args.cuda:
cudnn.benchmark = True
def get_optimizer(self):
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
self.optimizer = ScheduledOptim(optimizer)
@staticmethod
def get_loss(score, label):
return nn.CrossEntropyLoss()(score, label.view(-1))
def save_checkpoint(self, epoch):
if (epoch + 1) % self.args.save_interval == 0:
model_out_path = self.args.save_file + "epoch_{}_model.pth".format(epoch + 1)
torch.save(self.model, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
def save(self):
model_out_path = self.args.save_file + "final_model.pth"
torch.save(self.model, model_out_path)
print("Final model saved to {}".format(model_out_path))
@staticmethod
def pick_top_n(predictions, top_n=5):
top_predict_prob, top_predict_label = torch.topk(predictions, top_n, 1)
top_predict_prob /= torch.sum(top_predict_prob)
top_predict_prob = top_predict_prob.squeeze(0).cpu().numpy()
top_predict_label = top_predict_label.squeeze(0).cpu().numpy()
c = np.random.choice(top_predict_label, size=1, p=top_predict_prob)
return c
def train(self):
self.meter.reset()
self.model.train()
for x, y in tqdm(self.train_loader):
y = y.long()
x, y = x.to(self.device), y.to(self.device)
# Forward.
score, _ = self.model(x)
loss = self.criterion(score, y)
# Backward.
self.optimizer.zero_grad()
loss.backward()
# Clip gradient.
nn.utils.clip_grad_norm_(self.model.parameters(), 5)
self.optimizer.step()
self.meter.add(loss.item())
print('perplexity: {}'.format(np.exp(self.meter.value()[0])))
def test(self):
self.model.eval()
begin = np.array([i for i in self.args.begin])
begin = np.random.choice(begin, size=1)
text_len = self.args.predict_len
samples = [self.convert.word_to_int(c) for c in begin]
input_txt = torch.LongTensor(samples)[None]
input_txt = input_txt.to(self.device)
_, init_state = self.model(input_txt)
result = samples
model_input = input_txt[:, -1][:, None]
with torch.no_grad():
for i in range(text_len):
out, init_state = self.model(model_input, init_state)
prediction = self.pick_top_n(out.data)
model_input = torch.LongTensor(prediction)[None].to(self.device)
result.append(prediction[0])
print(self.convert.arr_to_text(result))
def predict(self):
self.model.eval()
samples = [self.convert.word_to_int(c) for c in self.args.begin]
input_txt = torch.LongTensor(samples)[None].to(self.device)
_, init_state = self.model(input_txt)
result = samples
model_input = input_txt[:, -1][:, None]
with torch.no_grad():
for i in range(self.args.predict_len):
out, init_state = self.model(model_input, init_state)
prediction = self.pick_top_n(out.data)
model_input = torch.LongTensor(prediction)[None].to(self.device)
result.append(prediction[0])
print(self.convert.arr_to_text(result))
def run(self):
for e in range(self.args.max_epoch):
print('===> EPOCH: {}/{}'.format(e + 1, self.args.max_epoch))
self.train()
self.test()
self.save_checkpoint(e)
self.save()
class AverageValueMeter(object):
"""
the meter tracker mainly focuses on mean and std
"""
def __init__(self):
super(AverageValueMeter, self).__init__()
self.n = None
self.sum = None
self.var = None
self.val = None
self.mean = None
self.std = None
self.reset()
def add(self, value, n=1):
self.val = value
self.sum += value
self.var += value * value
self.n += n
if self.n == 0:
self.mean, self.std = np.nan, np.nan
elif self.n == 1:
self.mean, self.std = self.sum, np.inf
else:
self.mean = self.sum / self.n
self.std = math.sqrt(
(self.var - self.n * self.mean * self.mean) / (self.n - 1.0))
def value(self):
return self.mean, self.std
def reset(self):
self.n = 0
self.sum = 0.0
self.var = 0.0
self.val = 0.0
self.mean = np.nan
self.std = np.nan
class ScheduledOptim(object):
"""A wrapper class for learning rate scheduling
"""
def __init__(self, optimizer):
self.optimizer = optimizer
self.lr = self.optimizer.param_groups[0]['lr']
self.current_steps = 0
def step(self):
"Step by the inner optimizer"
self.current_steps += 1
self.optimizer.step()
def zero_grad(self):
"Zero out the gradients by the inner optimizer"
self.optimizer.zero_grad()
def lr_multi(self, multi):
for param_group in self.optimizer.param_groups:
param_group['lr'] *= multi
self.lr = self.optimizer.param_groups[0]['lr']
def set_learning_rate(self, lr):
self.lr = lr
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
@property
def learning_rate(self):
return self.lr
| 31.849765
| 119
| 0.599204
| 6,544
| 0.964623
| 0
| 0
| 590
| 0.086969
| 0
| 0
| 380
| 0.056014
|
540226b4bbeda54cd1c6e6f8ca8daa02d21b75b8
| 17,360
|
py
|
Python
|
mayday_control/scripts/motion_control.py
|
LasseBoerresen/Mayday
|
3e40d9f3eb2727f78cfa915e19fb5706b6a53514
|
[
"MIT"
] | 2
|
2020-08-20T15:44:44.000Z
|
2021-09-27T07:21:59.000Z
|
mayday_control/scripts/motion_control.py
|
LasseBoerresen/Mayday
|
3e40d9f3eb2727f78cfa915e19fb5706b6a53514
|
[
"MIT"
] | 9
|
2018-03-02T15:21:22.000Z
|
2020-11-07T12:23:09.000Z
|
mayday_control/scripts/motion_control.py
|
LasseBoerresen/Mayday
|
3e40d9f3eb2727f78cfa915e19fb5706b6a53514
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import time
import random
import math
import unittest
import numpy as np
import pandas as pd
import std_msgs
from std_msgs.msg import String
# from control_msgs.msg import JointControllerState
# from gazebo_msgs.msg import LinkStates
# import matplotlib.pyplot as plt
import dynamixel_adapter
######## OBS must load pycharm in terminal after sourceing ros setup and catkin setup #######
# Load the urdf_parser_py manifest, you use your own package
# name on the condition but in this case, you need to depend on
# urdf_parser_py.
# import roslib;
# import roslib.load_manifest('urdfdom_py')
# import rospy
import sys
from urdf_parser_py.urdf import URDF
# tensorflow not installed for 2.7
# import tensorflow as tf
# from tensorflow.contrib import learn
from collections import OrderedDict
import pprint
import logging
# OBS using rospy for logging instead
#logging.basicConfig(format='%{asctime}s %{levelname}-8s %{message}s', level='DEBUG')
#logger = logging.getLogger(__name__)
# OBS Use rospy.logdebug or rospy.loginfo etc instead
# FORMAT = '%(asctime)s %(levelname)-8s: %(message)s'
# logging.basicConfig(format=FORMAT, level=logging.DEBUG)
# logger = logging.getLogger(__name__)
# logger.debug('testmsg')
pp = pprint.PrettyPrinter()
# Should mayday be modelled as an object? Probably. It could be Initiated by the xacro file.
TAU = math.pi * 2.0
class neural_network:
"""
This nn should learn by reinforcement learning. In theory it should be recurrent, but lets shelve that for now. It
should just basically take the different motor states as input and ouput the 18 goal positions. How does a
reinforcement nn train in practice?
"""
# class GameRunner:
# def __init__(self, sess, model, env, memory, max_eps, min_eps,
# decay, render=True):
# self._sess = sess
# self._env = env
# self._model = model
# self._memory = memory
# self._render = render
# self._max_eps = max_eps
# self._min_eps = min_eps
# self._decay = decay
# self._eps = self._max_eps
# self._steps = 0
# self._reward_store = []
# self._max_x_store = []
#
# def run(self):
# state = self._env.reset()
# tot_reward = 0
# max_x = -100
# while True:
# if self._render:
# self._env.render()
#
# action = self._choose_action(state)
# next_state, reward, done, info = self._env.step(action)
# if next_state[0] >= 0.1:
# reward += 10
# elif next_state[0] >= 0.25:
# reward += 20
# elif next_state[0] >= 0.5:
# reward += 100
#
# if next_state[0] > max_x:
# max_x = next_state[0]
# # is the game complete? If so, set the next state to
# # None for storage sake
# if done:
# next_state = None
#
# self._memory.add_sample((state, action, reward, next_state))
# self._replay()
#
# # exponentially decay the eps value
# self._steps += 1
# self._eps = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) * math.exp(-LAMBDA * self._steps)
#
# # move the agent to the next state and accumulate the reward
# state = next_state
# tot_reward += reward
#
# # if the game is done, break the loop
# if done:
# self._reward_store.append(tot_reward)
# self._max_x_store.append(max_x)
# break
#
# print("Step {}, Total reward: {}, Eps: {}".format(self._steps, tot_reward, self._eps))
#
# def _choose_action(self, state):
# """
#
# :param state:
# :return:
# """
#
# if random.random() < self._eps:
# return random.randint(0, self._model.num_actions - 1)
# else:
# return np.argmax(self._model.predict_one(state, self._sess))
#
# def _replay(self):
# """
#
# :return:
# """
#
# batch = self._memory.sample(self._model.batch_size)
# states = np.array([val[0] for val in batch])
# next_states = np.array([(np.zeros(self._model.num_states)
# if val[3] is None else val[3]) for val in batch])
# # predict Q(s,a) given the batch of states
# q_s_a = self._model.predict_batch(states, self._sess)
# # predict Q(s',a') - so that we can do gamma * max(Q(s'a')) below
# q_s_a_d = self._model.predict_batch(next_states, self._sess)
# # setup training arrays
# x = np.zeros((len(batch), self._model.num_states))
# y = np.zeros((len(batch), self._model.num_actions))
# for i, b in enumerate(batch):
# state, action, reward, next_state = b[0], b[1], b[2], b[3]
# # get the current q values for all actions in state
# current_q = q_s_a[i]
# # update the q value for action
# if next_state is None:
# # in this case, the game completed after action, so there is no max Q(s',a')
# # prediction possible
# current_q[action] = reward
# else:
# current_q[action] = reward + GAMMA * np.amax(q_s_a_d[i])
# x[i] = state
# y[i] = current_q
# self._model.train_batch(self._sess, x, y)
#
#
# if __name__ == "__main__":
# env_name = 'MountainCar-v0'
# env = gym.make(env_name)
#
# num_states = env.env.observation_space.shape[0]
# num_actions = env.env.action_space.n
#
# model = Model(num_states, num_actions, BATCH_SIZE)
# mem = Memory(50000)
#
# with tf.Session() as sess:
# sess.run(model.var_init)
# gr = GameRunner(sess, model, env, mem, MAX_EPSILON, MIN_EPSILON,
# LAMBDA)
# num_episodes = 300
# cnt = 0
# while cnt < num_episodes:
# if cnt % 10 == 0:
# print('Episode {} of {}'.format(cnt+1, num_episodes))
# gr.run()
# cnt += 1
# plt.plot(gr.reward_store)
# plt.show()
# plt.close("all")
# plt.plot(gr.max_x_store)
# plt.show()
class Robot:
"""
RNN to control each motor position each time step. Goal is to reach a certain body and leg configuration, decided by
the behavioual layer.
"""
def __init__(self):
"""
"""
# Declare this node to ros
rospy.init_node('mayday', anonymous=False, log_level=rospy.DEBUG)
# get xacro model of robot
self.description = URDF.from_parameter_server()
# Initiate state object from description, index num in orderedDict corresponds to dxl_id -1.
self.state = OrderedDict()
for joint in self.description.joints:
if joint.joint_type == 'revolute':
self.state[joint.name] = {}
if len(self.state) >= 3:
break
# self.nn =
self.dxl_controller = dynamixel_adapter.DynamixelAdapter(None)
self.dxl_controller.arm()
# get out of bed
self.initialize_robot_position()
sys.exit(0)
# OBS Not dealing with ros for now.
# # Subscribe and publish to joint topics
# self.joint_publishers = []
# self.joint_subscribers = []
# self.link_subscribers = []
#
# self.init_joint_subpubs()
#
# # Link states are calculated from joint states. TODO Add later for training feedback
# # self.init_links()
self.rate = rospy.Rate(10) # 10hz
# # Wait for first joint state update
# while self.robot_state['joints'] == {} and not rospy.is_shutdown():
# rospy.logdebug('waiting for joint states')
# self.rate.sleep()
# This is where the magic happens
while not rospy.is_shutdown():
self.read_joint_states()
self.find_new_joint_goals()
self.write_joint_goals()
self.rate.sleep()
def read_joint_states(self):
"""
Updates robot state by looping all defined joints and reads values from dynamixels.
:return:
"""
# TODO Handle that pos_goal is overwritten
for id, joint_key in enumerate(self.state.keys()):
# dxl ids start at 1, because 0 is broadcast
self.state[joint_key] = self.dxl_controller.read_state(id + 1)
def format_state_for_nn(self):
x = pd.DataFrame()
y = pd.DataFrame()
# Input current joint states
for joint_key in self.state.keys():
x[joint_key + '_pos'] = self.state[joint_key]['pos']
x[joint_key + '_vel'] = self.state[joint_key]['vel']
x[joint_key + '_torq'] = self.state[joint_key]['torq']
x[joint_key + '_temp'] = self.state[joint_key]['temp']
# Input IMU measurements. And other sensors available.
# Acceleration xyz, measures orientation around x and y axi, given gravity.
# Gyro xyz
# Compass xyz, measures orientation around z axis
# Input feet touch sensors
# Input belly and back touch sensors.
# Input goal thorax pose and velocity
# for i, name in enumerate(self.robot_state['links'].name)
# Ignore all links but base_link for now. Only base is used for now.
name = 'thorax' # 'mayday::base_link'
# TODO input actual goal position, from some behaviour function. Could just be sinusoid.
x['goal_' + name + '_pose_pos_x'] = 0.0 # self.robot_state['links'].pose[1].position.x
x['goal_' + name + '_pose_pos_y'] = 0.0 # self.robot_state['links'].pose[1].position.y
x['goal_' + name + '_pose_pos_z'] = 0.0 # self.robot_state['links'].pose[1].position.z
x['goal_' + name + '_pose_ori_r'] = 0.0 # self.robot_state['links'].pose[1].orientation.x
x['goal_' + name + '_pose_ori_p'] = 0.0 # self.robot_state['links'].pose[1].orientation.y
x['goal_' + name + '_pose_ori_y'] = 0.0 # self.robot_state['links'].pose[1].orientation.z
# x['goal_' + name + '_twist_position_x'] = 0.0 # self.robot_state['links'].pose[1].position.x
# x['goal_' + name + '_twist_position_y'] = 0.0 # self.robot_state['links'].pose[1].position.y
# x['goal_' + name + '_twist_orientation_z'] = 0.0 # self.robot_state['links'].pose[1].orientation.x
# Goal defining maximum movement speeds, in SI units.
x['goal_' + name + '_pose_pos_movement_speed'] = 0.01 # 1 cm per second
x['goal_' + name + '_pose_ori_movement_speed'] = 0.01 * TAU # 1/100 of a rev per second.
x['goal_joint_movement_speed'] = 0.02 * TAU # 1/100 of a rev per second.
# input goal stance width
# x['goal_' + name + '_stance_radius'] = 0.0
return x
def format_nn_output_for_state(self, y):
"""
:param pd.DataFrame y:
:return:
"""
for joint_key in self.state.keys():
self.state[joint_key]['pos_goal'] = y[joint_key]
pass
def find_new_joint_goals(self):
"""
:param joint_states:
:return:
"""
# for
# joint_goals =
x = self.format_state_for_nn()
# x = self.nn.preprocess(joint_states)
# y = self.nn.predict(x)
self.format_nn_output_for_state(y)
def write_joint_goals(self):
"""
:param goals:
:return:
"""
#
for i, joint_key in enumerate(self.state.keys()):
self.dxl_controller.write_goal_position(i + 1, self.state[joint_key]['pos_goal'])
# for i, (pub, goal) in enumerate(zip(self.joint_publishers, goals)):
# pub.publish(goal)
def check_joints_at_rest(self):
"""
Check that all joints are below TORQUE_LIMIT_REST
# TODO check that all are not movoing either. Maybe this is superfluous.
# TODO ask for manual robot reposition, then retry.
:return:
"""
for joint_key in self.state.keys():
# joint torque is signed, we are only interested in absolute torque
if math.fabs(self.state[joint_key]['torq']) > dynamixel_adapter.TORQ_LIMIT_REST:
raise Exception(
'joint torque not at rest, joint: {joint}, torque: abs({torq}%) is not < {torq_rest}'
.format(
joint=joint_key, torq=self.state[joint_key]['torq'],
torq_rest=dynamixel_adapter.TORQ_LIMIT_REST))
def initialize_robot_position(self):
"""
Make sure robot is in a safe position when it starts up. Collect legs lying on its belly then slowly move
femur to stand up to neutral position. The neutral position should not feel any torque, simply because of
friction in the joints.
Check that none of the legs are under torque load before and after procedure.
:return:
"""
# check that none of the motors have torque
self.read_joint_states()
self.check_joints_at_rest()
rospy.loginfo('all joints are torqueless at start of init')
# Set movement speed to rather slow
# OBS robot always starts slow for dxl init.
# Collect legs close to body, lying on its belly. Toes should not move when getting up.
for joint_key in self.state.keys():
if 'coxa_dynamixel' in joint_key:
self.state[joint_key]['pos_goal'] = TAU/2
elif 'femur_dynamixel' in joint_key:
self.state[joint_key]['pos_goal'] = TAU/2 + TAU * 2.5 / 8
elif 'tibia_dynamixel' in joint_key:
self.state[joint_key]['pos_goal'] = TAU/2 + TAU * 1.75 / 8
# Move to sitting position and take a breath
self.write_joint_goals()
time.sleep(2)
# Simply move femur to neutral, getting up on its legs, and tibia to accommodate not moving toe.
for joint_key in self.state.keys():
if 'coxa_dynamixel' in joint_key:
self.state[joint_key]['pos_goal'] = TAU/2
elif 'femur_dynamixel' in joint_key:
self.state[joint_key]['pos_goal'] = TAU/2 + TAU/4 - TAU/16
elif 'tibia_dynamixel' in joint_key:
self.state[joint_key]['pos_goal'] = TAU/2 + TAU/4
# move to upright postion
self.write_joint_goals()
# Check that joints are at rest in the awakened pose
self.read_joint_states()
self.check_joints_at_rest()
rospy.loginfo('all joints are torqueless at end of init')
# TODO Set joint velocity limits to a faster speed
def linear_position_controller(self, start_pos, end_pos, goal_vel, step=2 * np.pi / 2 ** 8):
"""
Generate timestamps and positions for linear movement between two angles
:param float start_pos:
:param float end_pos:
:param float goal_vel:
:param float step:
:return:
"""
# def joint_subscriber_callback(self, data, args):
# """save data from triggering joint topic"""
#
# self.robot_state['joints'][args['joint']] = data
#
# def init_joint_subpubs(self):
# """
#
# :return:
# """
#
# for i, transmission in enumerate(self.robot_description.transmissions):
# topic = '/mayday/' + transmission.joints[0].name + '_position_controller/command'
# self.joint_publishers.append(rospy.Publisher(topic, std_msgs.msg.Float64, queue_size=10))
#
# topic = '/mayday/' + transmission.joints[0].name + '_position_controller/state'
# self.joint_subscribers.append(rospy.Subscriber(
# name=topic, data_class=JointControllerState, callback=self.joint_subscriber_callback,
# callback_args={'joint': transmission.joints[0].name}))
#
# def link_subscriber_callback(self, data):
# """
#
# :param data:
# :return:
# """
#
# self.robot_state['links'] = data
#
# def model_subscriber_callback(self, data):
# """
#
# :param data:
# :return:f
# """
#
# self.robot_state['model'] = data
#
# def init_links(self):
# """
#
# :return:
# """
#
# topic = '/gazebo/link_states'
# self.link_subscribers.append(rospy.Subscriber(
# name=topic, data_class=LinkStates, callback=self.link_subscriber_callback))
def main():
"""
This script should initiate all the legs in a safe position, then move them to the initial standing resting
position and await commands. Commands should come from remote control.
Robot state should mirror gazebo, no matter whether it comes from the real robot. States are taken from a
subscription, and commands are published.
:return:
"""
try:
# Run robot, including initialization of legs and idle for commands.
robot = Robot()
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main()
| 33.643411
| 120
| 0.589286
| 10,733
| 0.61826
| 0
| 0
| 0
| 0
| 0
| 0
| 12,343
| 0.711002
|
5402f5bc7398b19a2c1f22b4890e4f4b84f51e3e
| 10,278
|
py
|
Python
|
venv/lib/python3.9/site-packages/trio/socket.py
|
almmello/frozen
|
c9928491f694b56a0023926bc763c703ba1fd75a
|
[
"BSD-2-Clause"
] | 3
|
2022-02-26T17:16:34.000Z
|
2022-03-04T15:04:00.000Z
|
venv/lib/python3.9/site-packages/trio/socket.py
|
almmello/frozen
|
c9928491f694b56a0023926bc763c703ba1fd75a
|
[
"BSD-2-Clause"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
venv/lib/python3.9/site-packages/trio/socket.py
|
almmello/frozen
|
c9928491f694b56a0023926bc763c703ba1fd75a
|
[
"BSD-2-Clause"
] | 1
|
2022-03-28T09:19:34.000Z
|
2022-03-28T09:19:34.000Z
|
# This is a public namespace, so we don't want to expose any non-underscored
# attributes that aren't actually part of our public API. But it's very
# annoying to carefully always use underscored names for module-level
# temporaries, imports, etc. when implementing the module. So we put the
# implementation in an underscored module, and then re-export the public parts
# here.
# We still have some underscore names though but only a few.
from . import _socket
import sys
import typing as _t
# The socket module exports a bunch of platform-specific constants. We want to
# re-export them. Since the exact set of constants varies depending on Python
# version, platform, the libc installed on the system where Python was built,
# etc., we figure out which constants to re-export dynamically at runtime (see
# below). But that confuses static analysis tools like jedi and mypy. So this
# import statement statically lists every constant that *could* be
# exported. It always fails at runtime, since no single Python build exports
# all these constants, but it lets static analysis tools understand what's
# going on. There's a test in test_exports.py to make sure that the list is
# kept up to date.
try:
# fmt: off
from socket import ( # type: ignore
CMSG_LEN, CMSG_SPACE, CAPI, AF_UNSPEC, AF_INET, AF_UNIX, AF_IPX,
AF_APPLETALK, AF_INET6, AF_ROUTE, AF_LINK, AF_SNA, PF_SYSTEM,
AF_SYSTEM, SOCK_STREAM, SOCK_DGRAM, SOCK_RAW, SOCK_SEQPACKET, SOCK_RDM,
SO_DEBUG, SO_ACCEPTCONN, SO_REUSEADDR, SO_KEEPALIVE, SO_DONTROUTE,
SO_BROADCAST, SO_USELOOPBACK, SO_LINGER, SO_OOBINLINE, SO_REUSEPORT,
SO_SNDBUF, SO_RCVBUF, SO_SNDLOWAT, SO_RCVLOWAT, SO_SNDTIMEO,
SO_RCVTIMEO, SO_ERROR, SO_TYPE, LOCAL_PEERCRED, SOMAXCONN, SCM_RIGHTS,
SCM_CREDS, MSG_OOB, MSG_PEEK, MSG_DONTROUTE, MSG_DONTWAIT, MSG_EOR,
MSG_TRUNC, MSG_CTRUNC, MSG_WAITALL, MSG_EOF, SOL_SOCKET, SOL_IP,
SOL_TCP, SOL_UDP, IPPROTO_IP, IPPROTO_HOPOPTS, IPPROTO_ICMP,
IPPROTO_IGMP, IPPROTO_GGP, IPPROTO_IPV4, IPPROTO_IPIP, IPPROTO_TCP,
IPPROTO_EGP, IPPROTO_PUP, IPPROTO_UDP, IPPROTO_IDP, IPPROTO_HELLO,
IPPROTO_ND, IPPROTO_TP, IPPROTO_ROUTING, IPPROTO_FRAGMENT,
IPPROTO_RSVP, IPPROTO_GRE, IPPROTO_ESP, IPPROTO_AH, IPPROTO_ICMPV6,
IPPROTO_NONE, IPPROTO_DSTOPTS, IPPROTO_XTP, IPPROTO_EON, IPPROTO_PIM,
IPPROTO_IPCOMP, IPPROTO_SCTP, IPPROTO_RAW, IPPROTO_MAX,
SYSPROTO_CONTROL, IPPORT_RESERVED, IPPORT_USERRESERVED, INADDR_ANY,
INADDR_BROADCAST, INADDR_LOOPBACK, INADDR_UNSPEC_GROUP,
INADDR_ALLHOSTS_GROUP, INADDR_MAX_LOCAL_GROUP, INADDR_NONE, IP_OPTIONS,
IP_HDRINCL, IP_TOS, IP_TTL, IP_RECVOPTS, IP_RECVRETOPTS,
IP_RECVDSTADDR, IP_RETOPTS, IP_MULTICAST_IF, IP_MULTICAST_TTL,
IP_MULTICAST_LOOP, IP_ADD_MEMBERSHIP, IP_DROP_MEMBERSHIP,
IP_DEFAULT_MULTICAST_TTL, IP_DEFAULT_MULTICAST_LOOP,
IP_MAX_MEMBERSHIPS, IPV6_JOIN_GROUP, IPV6_LEAVE_GROUP,
IPV6_MULTICAST_HOPS, IPV6_MULTICAST_IF, IPV6_MULTICAST_LOOP,
IPV6_UNICAST_HOPS, IPV6_V6ONLY, IPV6_CHECKSUM, IPV6_RECVTCLASS,
IPV6_RTHDR_TYPE_0, IPV6_TCLASS, TCP_NODELAY, TCP_MAXSEG, TCP_KEEPINTVL,
TCP_KEEPCNT, TCP_FASTOPEN, TCP_NOTSENT_LOWAT, EAI_ADDRFAMILY,
EAI_AGAIN, EAI_BADFLAGS, EAI_FAIL, EAI_FAMILY, EAI_MEMORY, EAI_NODATA,
EAI_NONAME, EAI_OVERFLOW, EAI_SERVICE, EAI_SOCKTYPE, EAI_SYSTEM,
EAI_BADHINTS, EAI_PROTOCOL, EAI_MAX, AI_PASSIVE, AI_CANONNAME,
AI_NUMERICHOST, AI_NUMERICSERV, AI_MASK, AI_ALL, AI_V4MAPPED_CFG,
AI_ADDRCONFIG, AI_V4MAPPED, AI_DEFAULT, NI_MAXHOST, NI_MAXSERV,
NI_NOFQDN, NI_NUMERICHOST, NI_NAMEREQD, NI_NUMERICSERV, NI_DGRAM,
SHUT_RD, SHUT_WR, SHUT_RDWR, EBADF, EAGAIN, EWOULDBLOCK, AF_ASH,
AF_ATMPVC, AF_ATMSVC, AF_AX25, AF_BLUETOOTH, AF_BRIDGE, AF_ECONET,
AF_IRDA, AF_KEY, AF_LLC, AF_NETBEUI, AF_NETLINK, AF_NETROM, AF_PACKET,
AF_PPPOX, AF_ROSE, AF_SECURITY, AF_WANPIPE, AF_X25, BDADDR_ANY,
BDADDR_LOCAL, FD_SETSIZE, IPV6_DSTOPTS, IPV6_HOPLIMIT, IPV6_HOPOPTS,
IPV6_NEXTHOP, IPV6_PKTINFO, IPV6_RECVDSTOPTS, IPV6_RECVHOPLIMIT,
IPV6_RECVHOPOPTS, IPV6_RECVPKTINFO, IPV6_RECVRTHDR, IPV6_RTHDR,
IPV6_RTHDRDSTOPTS, MSG_ERRQUEUE, NETLINK_DNRTMSG, NETLINK_FIREWALL,
NETLINK_IP6_FW, NETLINK_NFLOG, NETLINK_ROUTE, NETLINK_USERSOCK,
NETLINK_XFRM, PACKET_BROADCAST, PACKET_FASTROUTE, PACKET_HOST,
PACKET_LOOPBACK, PACKET_MULTICAST, PACKET_OTHERHOST, PACKET_OUTGOING,
POLLERR, POLLHUP, POLLIN, POLLMSG, POLLNVAL, POLLOUT, POLLPRI,
POLLRDBAND, POLLRDNORM, POLLWRNORM, SIOCGIFINDEX, SIOCGIFNAME,
SOCK_CLOEXEC, TCP_CORK, TCP_DEFER_ACCEPT, TCP_INFO, TCP_KEEPIDLE,
TCP_LINGER2, TCP_QUICKACK, TCP_SYNCNT, TCP_WINDOW_CLAMP, AF_ALG,
AF_CAN, AF_RDS, AF_TIPC, AF_VSOCK, ALG_OP_DECRYPT, ALG_OP_ENCRYPT,
ALG_OP_SIGN, ALG_OP_VERIFY, ALG_SET_AEAD_ASSOCLEN,
ALG_SET_AEAD_AUTHSIZE, ALG_SET_IV, ALG_SET_KEY, ALG_SET_OP,
ALG_SET_PUBKEY, CAN_BCM, CAN_BCM_RX_CHANGED, CAN_BCM_RX_DELETE,
CAN_BCM_RX_READ, CAN_BCM_RX_SETUP, CAN_BCM_RX_STATUS,
CAN_BCM_RX_TIMEOUT, CAN_BCM_TX_DELETE, CAN_BCM_TX_EXPIRED,
CAN_BCM_TX_READ, CAN_BCM_TX_SEND, CAN_BCM_TX_SETUP, CAN_BCM_TX_STATUS,
CAN_EFF_FLAG, CAN_EFF_MASK, CAN_ERR_FLAG, CAN_ERR_MASK, CAN_ISOTP,
CAN_RAW, CAN_RAW_ERR_FILTER, CAN_RAW_FD_FRAMES, CAN_RAW_FILTER,
CAN_RAW_LOOPBACK, CAN_RAW_RECV_OWN_MSGS, CAN_RTR_FLAG, CAN_SFF_MASK,
IOCTL_VM_SOCKETS_GET_LOCAL_CID, IPV6_DONTFRAG, IPV6_PATHMTU,
IPV6_RECVPATHMTU, IP_TRANSPARENT, MSG_CMSG_CLOEXEC, MSG_CONFIRM,
MSG_FASTOPEN, MSG_MORE, MSG_NOSIGNAL, NETLINK_CRYPTO, PF_CAN,
PF_PACKET, PF_RDS, SCM_CREDENTIALS, SOCK_NONBLOCK, SOL_ALG,
SOL_CAN_BASE, SOL_CAN_RAW, SOL_TIPC, SO_BINDTODEVICE, SO_DOMAIN,
SO_MARK, SO_PASSCRED, SO_PASSSEC, SO_PEERCRED, SO_PEERSEC, SO_PRIORITY,
SO_PROTOCOL, SO_VM_SOCKETS_BUFFER_MAX_SIZE,
SO_VM_SOCKETS_BUFFER_MIN_SIZE, SO_VM_SOCKETS_BUFFER_SIZE,
TCP_CONGESTION, TCP_USER_TIMEOUT, TIPC_ADDR_ID, TIPC_ADDR_NAME,
TIPC_ADDR_NAMESEQ, TIPC_CFG_SRV, TIPC_CLUSTER_SCOPE, TIPC_CONN_TIMEOUT,
TIPC_CRITICAL_IMPORTANCE, TIPC_DEST_DROPPABLE, TIPC_HIGH_IMPORTANCE,
TIPC_IMPORTANCE, TIPC_LOW_IMPORTANCE, TIPC_MEDIUM_IMPORTANCE,
TIPC_NODE_SCOPE, TIPC_PUBLISHED, TIPC_SRC_DROPPABLE,
TIPC_SUBSCR_TIMEOUT, TIPC_SUB_CANCEL, TIPC_SUB_PORTS, TIPC_SUB_SERVICE,
TIPC_TOP_SRV, TIPC_WAIT_FOREVER, TIPC_WITHDRAWN, TIPC_ZONE_SCOPE,
VMADDR_CID_ANY, VMADDR_CID_HOST, VMADDR_PORT_ANY,
VM_SOCKETS_INVALID_VERSION, MSG_BCAST, MSG_MCAST, RCVALL_MAX,
RCVALL_OFF, RCVALL_ON, RCVALL_SOCKETLEVELONLY, SIO_KEEPALIVE_VALS,
SIO_LOOPBACK_FAST_PATH, SIO_RCVALL, SO_EXCLUSIVEADDRUSE, HCI_FILTER,
BTPROTO_SCO, BTPROTO_HCI, HCI_TIME_STAMP, SOL_RDS, BTPROTO_L2CAP,
BTPROTO_RFCOMM, HCI_DATA_DIR, SOL_HCI, CAN_BCM_RX_ANNOUNCE_RESUME,
CAN_BCM_RX_CHECK_DLC, CAN_BCM_RX_FILTER_ID, CAN_BCM_RX_NO_AUTOTIMER,
CAN_BCM_RX_RTR_FRAME, CAN_BCM_SETTIMER, CAN_BCM_STARTTIMER,
CAN_BCM_TX_ANNOUNCE, CAN_BCM_TX_COUNTEVT, CAN_BCM_TX_CP_CAN_ID,
CAN_BCM_TX_RESET_MULTI_IDX, IPPROTO_CBT, IPPROTO_ICLFXBM, IPPROTO_IGP,
IPPROTO_L2TP, IPPROTO_PGM, IPPROTO_RDP, IPPROTO_ST, AF_QIPCRTR,
CAN_BCM_CAN_FD_FRAME, IPPROTO_MOBILE, IPV6_USE_MIN_MTU,
MSG_NOTIFICATION, SO_SETFIB, CAN_J1939, CAN_RAW_JOIN_FILTERS,
IPPROTO_UDPLITE, J1939_EE_INFO_NONE, J1939_EE_INFO_TX_ABORT,
J1939_FILTER_MAX, J1939_IDLE_ADDR, J1939_MAX_UNICAST_ADDR,
J1939_NLA_BYTES_ACKED, J1939_NLA_PAD, J1939_NO_ADDR, J1939_NO_NAME,
J1939_NO_PGN, J1939_PGN_ADDRESS_CLAIMED, J1939_PGN_ADDRESS_COMMANDED,
J1939_PGN_MAX, J1939_PGN_PDU1_MAX, J1939_PGN_REQUEST,
SCM_J1939_DEST_ADDR, SCM_J1939_DEST_NAME, SCM_J1939_ERRQUEUE,
SCM_J1939_PRIO, SO_J1939_ERRQUEUE, SO_J1939_FILTER, SO_J1939_PROMISC,
SO_J1939_SEND_PRIO, UDPLITE_RECV_CSCOV, UDPLITE_SEND_CSCOV
)
# fmt: on
except ImportError:
pass
# Dynamically re-export whatever constants this particular Python happens to
# have:
import socket as _stdlib_socket
_bad_symbols: _t.Set[str] = set()
if sys.platform == "win32":
# See https://github.com/python-trio/trio/issues/39
# Do not import for windows platform
# (you can still get it from stdlib socket, of course, if you want it)
_bad_symbols.add("SO_REUSEADDR")
globals().update(
{
_name: getattr(_stdlib_socket, _name)
for _name in _stdlib_socket.__all__ # type: ignore
if _name.isupper() and _name not in _bad_symbols
}
)
# import the overwrites
from ._socket import (
fromfd,
from_stdlib_socket,
getprotobyname,
socketpair,
getnameinfo,
socket,
getaddrinfo,
set_custom_hostname_resolver,
set_custom_socket_factory,
SocketType,
)
# not always available so expose only if
if sys.platform == "win32" or not _t.TYPE_CHECKING:
try:
from ._socket import fromshare
except ImportError:
pass
# expose these functions to trio.socket
from socket import (
gaierror,
herror,
gethostname,
ntohs,
htonl,
htons,
inet_aton,
inet_ntoa,
inet_pton,
inet_ntop,
)
# not always available so expose only if
if sys.platform != "win32" or not _t.TYPE_CHECKING:
try:
from socket import sethostname, if_nameindex, if_nametoindex, if_indextoname
except ImportError:
pass
# get names used by Trio that we define on our own
from ._socket import IPPROTO_IPV6
# Not defined in all python versions and platforms but sometimes needed
if not _t.TYPE_CHECKING:
try:
TCP_NOTSENT_LOWAT
except NameError:
# Hopefully will show up in 3.7:
# https://github.com/python/cpython/pull/477
if sys.platform == "darwin":
TCP_NOTSENT_LOWAT = 0x201
elif sys.platform == "linux":
TCP_NOTSENT_LOWAT = 25
if _t.TYPE_CHECKING:
IP_BIND_ADDRESS_NO_PORT: int
else:
try:
IP_BIND_ADDRESS_NO_PORT
except NameError:
if sys.platform == "linux":
IP_BIND_ADDRESS_NO_PORT = 24
del sys
| 48.481132
| 84
| 0.748784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,814
| 0.176493
|
54036005b75aaa482dfeae48fd25d054393283e1
| 2,258
|
py
|
Python
|
tests/test_encoders.py
|
alxlampe/d3rlpy
|
af7e6bd018a51f95138d121f59c50dc36ec87e3a
|
[
"MIT"
] | null | null | null |
tests/test_encoders.py
|
alxlampe/d3rlpy
|
af7e6bd018a51f95138d121f59c50dc36ec87e3a
|
[
"MIT"
] | null | null | null |
tests/test_encoders.py
|
alxlampe/d3rlpy
|
af7e6bd018a51f95138d121f59c50dc36ec87e3a
|
[
"MIT"
] | null | null | null |
import pytest
from d3rlpy.models.torch.encoders import PixelEncoder
from d3rlpy.models.torch.encoders import PixelEncoderWithAction
from d3rlpy.models.torch.encoders import VectorEncoder
from d3rlpy.models.torch.encoders import VectorEncoderWithAction
from d3rlpy.encoders import create_encoder_factory
from d3rlpy.encoders import PixelEncoderFactory
from d3rlpy.encoders import VectorEncoderFactory
@pytest.mark.parametrize('observation_shape', [(4, 84, 84)])
@pytest.mark.parametrize('action_size', [None, 2])
@pytest.mark.parametrize('discrete_action', [False, True])
def test_pixel_encoder_factory(observation_shape, action_size,
discrete_action):
factory = PixelEncoderFactory()
encoder = factory.create(observation_shape, action_size, discrete_action)
if action_size is None:
assert isinstance(encoder, PixelEncoder)
else:
assert isinstance(encoder, PixelEncoderWithAction)
assert encoder.discrete_action == discrete_action
assert factory.get_type() == 'pixel'
params = factory.get_params()
new_factory = PixelEncoderFactory(**params)
assert new_factory.get_params() == params
@pytest.mark.parametrize('observation_shape', [(100, )])
@pytest.mark.parametrize('action_size', [None, 2])
@pytest.mark.parametrize('discrete_action', [False, True])
def test_vector_encoder_factory(observation_shape, action_size,
discrete_action):
factory = VectorEncoderFactory()
encoder = factory.create(observation_shape, action_size, discrete_action)
if action_size is None:
assert isinstance(encoder, VectorEncoder)
else:
assert isinstance(encoder, VectorEncoderWithAction)
assert encoder.discrete_action == discrete_action
assert factory.get_type() == 'vector'
params = factory.get_params()
new_factory = VectorEncoderFactory(**params)
assert new_factory.get_params() == params
@pytest.mark.parametrize('name', ['pixel', 'vector'])
def test_create_encoder_factory(name):
factory = create_encoder_factory(name)
if name == 'pixel':
assert isinstance(factory, PixelEncoderFactory)
elif name == 'vector':
assert isinstance(factory, VectorEncoderFactory)
| 35.84127
| 77
| 0.740478
| 0
| 0
| 0
| 0
| 1,848
| 0.818423
| 0
| 0
| 149
| 0.065988
|
5406dab8bc4f61a6b8581ae628a67e2632c2d5cd
| 2,217
|
py
|
Python
|
cam/03_face_recognition.py
|
kimtaehoho/osscap2020
|
7980ab742a1a90fb4405eeabe941504a0b859d20
|
[
"Apache-2.0"
] | null | null | null |
cam/03_face_recognition.py
|
kimtaehoho/osscap2020
|
7980ab742a1a90fb4405eeabe941504a0b859d20
|
[
"Apache-2.0"
] | 10
|
2020-10-12T04:45:01.000Z
|
2020-11-29T12:40:55.000Z
|
cam/03_face_recognition.py
|
kimtaehoho/osscap2020
|
7980ab742a1a90fb4405eeabe941504a0b859d20
|
[
"Apache-2.0"
] | 1
|
2020-10-12T12:28:42.000Z
|
2020-10-12T12:28:42.000Z
|
# -*- coding: utf-8 -*-
#import game
from glob import glob
file1 = glob("01_face_dataset.py")
file2 = glob("02_face_training.py")
import facedataset
import facetrain
import cv2
import numpy as np
import os
from PIL import Image
#facedataset.first()
#facetrain.second()
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascades/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
font = cv2.FONT_HERSHEY_SIMPLEX
#iniciate id counter
id = 0
# names related to ids: example ==> loze: id=1, etc
# 이런식으로 사용자의 이름을 사용자 수만큼 추가해준다.
names = ['None', 'kkh', 'kth', 'ldh']
# Initialize and start realtime video capture
cam = cv2.VideoCapture(0)
cam.set(3, 640) # set video widht
cam.set(4, 480) # set video height
# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
while True:
ret, img =cam.read()
#img = cv2.flip(img, -1) # Flip vertically
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
# Check if confidence is less them 100 ==> "0" is perfect match
if (confidence < 100):
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
#game.start()
else:
facedataset.first()
facetrain.second()
#exec(open(file1.read())
#exec(open(file2.read())
#game.start()
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.imshow('camera',img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
| 28.063291
| 81
| 0.620207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 787
| 0.347768
|
540828fed7b9b1cf90bafa38feea72b4a282cfd0
| 1,047
|
py
|
Python
|
deprecated/dpr/code/encoder.py
|
eunaoeh/mrc-level2-nlp-01
|
caa893ca7d689200b3528377901d59fa9ca452ac
|
[
"MIT"
] | 1
|
2021-11-25T04:30:51.000Z
|
2021-11-25T04:30:51.000Z
|
deprecated/dpr/code/encoder.py
|
eunaoeh/mrc-level2-nlp-01
|
caa893ca7d689200b3528377901d59fa9ca452ac
|
[
"MIT"
] | null | null | null |
deprecated/dpr/code/encoder.py
|
eunaoeh/mrc-level2-nlp-01
|
caa893ca7d689200b3528377901d59fa9ca452ac
|
[
"MIT"
] | 5
|
2021-11-21T22:53:40.000Z
|
2022-02-23T09:22:25.000Z
|
from transformers import (
RobertaModel,
RobertaPreTrainedModel,
BertModel,
BertPreTrainedModel,
)
class BertEncoder(BertPreTrainedModel):
def __init__(self, config):
super(BertEncoder, self).__init__(config)
self.bert = BertModel(config)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None):
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids
)
pooled_output = outputs[1]
return pooled_output
class RobertaEncoder(RobertaPreTrainedModel):
def __init__(self, config):
super(RobertaEncoder, self).__init__(config)
self.roberta = RobertaModel(config)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None):
outputs = self.roberta(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids
)
pooled_output = outputs[1]
return pooled_output
| 25.536585
| 83
| 0.685769
| 926
| 0.884432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5408382e17eaa39a39eec48a1a272c02bf244807
| 3,395
|
py
|
Python
|
tutorial/calculator/calculator.002.py
|
UltraStudioLTD/pyTermTk
|
a1e96b0e7f43906b9fda0b16f19f427919a055c2
|
[
"MIT"
] | 1
|
2022-02-28T16:33:25.000Z
|
2022-02-28T16:33:25.000Z
|
tutorial/calculator/calculator.002.py
|
UltraStudioLTD/pyTermTk
|
a1e96b0e7f43906b9fda0b16f19f427919a055c2
|
[
"MIT"
] | null | null | null |
tutorial/calculator/calculator.002.py
|
UltraStudioLTD/pyTermTk
|
a1e96b0e7f43906b9fda0b16f19f427919a055c2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2022 Eugenio Parodi <ceccopierangiolieugenio AT googlemail DOT com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import TermTk as ttk
# Create a root object (it is a widget that represent the terminal)
root = ttk.TTk()
# Create a window and attach it to the root (parent=root)
calculatorWin = ttk.TTkWindow(
parent=root, pos=(1, 1), size=(30, 17), title="My first Calculator"
)
# Create a grid layout and set it as default for the window
winLayout = ttk.TTkGridLayout()
calculatorWin.setLayout(winLayout)
# Define the Label and attach it to the grid layout at
# Position (Row/Col) (0,0) and (Row/Col)Span (1,4)
# I force the Max Height to 1 in order to avoid this widget to resize vertically
resLabel = ttk.TTkLabel(text="Results", maxHeight=1)
winLayout.addWidget(resLabel, 0, 0, 1, 4)
# Define the Numeric Buttons and attach them to the grid layout
btn1 = ttk.TTkButton(border=True, text="1")
btn2 = ttk.TTkButton(border=True, text="2")
btn3 = ttk.TTkButton(border=True, text="3")
btn4 = ttk.TTkButton(border=True, text="4")
btn5 = ttk.TTkButton(border=True, text="5")
btn6 = ttk.TTkButton(border=True, text="6")
btn7 = ttk.TTkButton(border=True, text="7")
btn8 = ttk.TTkButton(border=True, text="8")
btn9 = ttk.TTkButton(border=True, text="9")
winLayout.addWidget(btn1, 1, 0) # Colspan/Rowspan are defaulted to 1 if not specified
winLayout.addWidget(btn2, 1, 1)
winLayout.addWidget(btn3, 1, 2)
winLayout.addWidget(btn4, 2, 0)
winLayout.addWidget(btn5, 2, 1)
winLayout.addWidget(btn6, 2, 2)
winLayout.addWidget(btn7, 3, 0)
winLayout.addWidget(btn8, 3, 1)
winLayout.addWidget(btn9, 3, 2)
# Adding the "0" button on the bottom which alignment is
# Position (Row/Col) (4,0) (Row/Col)span (1,2)
# Just to show off I am using another way to attach it to the grid layout
winLayout.addWidget(btn0 := ttk.TTkButton(border=True, text="0"), 4, 0, 1, 2)
# Define the 2 algebric buttons
winLayout.addWidget(btnAdd := ttk.TTkButton(border=True, text="+"), 1, 3)
winLayout.addWidget(btnSub := ttk.TTkButton(border=True, text="-"), 2, 3)
# The Enter "=" button (2 rows wide)
winLayout.addWidget(btnRes := ttk.TTkButton(border=True, text="="), 3, 3, 2, 1)
# Last but not least an extrabutton just for fun
winLayout.addWidget(mysteryButton := ttk.TTkButton(border=True, text="?"), 4, 2)
# Start the Main loop
root.mainloop()
| 40.903614
| 86
| 0.742268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,024
| 0.596171
|
5408f0d69dd4b712a3e36a300e74e57a1812c78d
| 4,433
|
py
|
Python
|
dags/clix_static_visuals_dag.py
|
CLIxIndia-Dev/clix_dashboard_backend_AF
|
4dc2f48fdd1ea312977f8237cec9b9fd71cc20b4
|
[
"Apache-2.0"
] | null | null | null |
dags/clix_static_visuals_dag.py
|
CLIxIndia-Dev/clix_dashboard_backend_AF
|
4dc2f48fdd1ea312977f8237cec9b9fd71cc20b4
|
[
"Apache-2.0"
] | null | null | null |
dags/clix_static_visuals_dag.py
|
CLIxIndia-Dev/clix_dashboard_backend_AF
|
4dc2f48fdd1ea312977f8237cec9b9fd71cc20b4
|
[
"Apache-2.0"
] | 1
|
2020-03-17T06:40:25.000Z
|
2020-03-17T06:40:25.000Z
|
# This DAG is for running python scripts to generate static visualisation data
# from syncthing every month end
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from datetime import date, timedelta, datetime
import scripts.sync_school_data as sync_school_data
import scripts.process_raw_school_data as process_raw_school_data
import config.clix_config as clix_config
tools_modules_server_logs_datapath = clix_config.local_dst_state_data_logs
# --------------------------------------------------------------------------------
# set default arguments
# --------------------------------------------------------------------------------
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(1),
#'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
'provide_context': True,
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG(
'clix_static_visuals_dag', default_args=default_args,
schedule_interval= '@monthly')
# --------------------------------------------------------------------------------
# Each state is synced independently. We have four states and syncthing data folders
# corresponding to those states are synced through sync_school_data
# --------------------------------------------------------------------------------
#sshHook = SSHHook(conn_id=<YOUR CONNECTION ID FROM THE UI>)
#dummy_operator = DummyOperator(task_id='dummy_task', retries=3, dag=dag)
list_of_state_vis = []
for each_state in clix_config.static_visuals_states:
src = clix_config.remote_src_static_vis + each_state
dst = clix_config.local_dst_static_vis + each_state
list_of_tasks_chunks = []
#sync_state_data = SSHExecuteOperator( task_id="task1",
#bash_command= rsync -avzhe ssh {0}@{1}:{2} {3}".format(user, ip, src, dst),
#ssh_hook=sshHook,
#dag=dag)
sync_state_data = PythonOperator(
task_id='sync_state_data_' + each_state,
python_callable=sync_school_data.rsync_data_ssh,
op_kwargs={'state': each_state, 'src': src, 'dst': dst, 'static_flag': True},
dag=dag, retries=0)
# For parallel processing of files in the list of schools updated
# we use three parallel tasks each taking the portion of the list
# of files. This is done instead of generating tasks dynamically.
# number of schools chunks is set to clix_config.num_school_chunks
# refer: https://stackoverflow.com/questions/55672724/airflow-creating-dynamic-tasks-from-xcom
for each in list(range(clix_config.num_school_chunks)):
if each_state == 'ts':
each_state_new = 'tg'
elif each_state == 'cg':
each_state_new = 'ct'
else:
each_state_new = each_state
process_state_raw_data = PythonOperator(
task_id='process_raw_state_data_' + str(each) + '_' + each_state_new,
python_callable=process_raw_school_data.process_school_data,
op_kwargs={'state': each_state_new, 'chunk': each},
dag=dag)
list_of_tasks_chunks.append(process_state_raw_data)
sync_state_data.set_downstream(process_state_raw_data)
combine_state_chunks = PythonOperator(
task_id='combine_chunks_' + each_state_new,
python_callable=process_raw_school_data.combine_chunks,
op_kwargs={'state': each_state_new},
dag=dag)
list_of_tasks_chunks >> combine_state_chunks
get_state_static_vis_data = PythonOperator(
task_id = 'get_static_vis_' + each_state_new,
python_callable = process_raw_school_data.get_state_static_vis_data,
op_kwargs = {'state': each_state_new, 'all_states_flag': False},
dag=dag)
list_of_state_vis.append(get_state_static_vis_data)
combine_state_chunks >> get_state_static_vis_data
get_static_vis_data_all = PythonOperator(
task_id = 'get_static_vis_data_allstates',
python_callable = process_raw_school_data.get_state_static_vis_data,
op_kwargs = {'state': None, 'all_states_flag': True},
dag=dag)
list_of_state_vis >> get_static_vis_data_all
| 39.230088
| 98
| 0.676291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,775
| 0.400406
|
540b37aa828992718d326e40cc3e8c5c7baaf141
| 67
|
py
|
Python
|
nadl/__init__.py
|
siAyush/nadl
|
8aa698231e1d198bf823a58c84f139f6f93bc7df
|
[
"MIT"
] | 7
|
2021-05-18T11:16:49.000Z
|
2021-05-30T20:25:12.000Z
|
nadl/__init__.py
|
siAyush/nadl
|
8aa698231e1d198bf823a58c84f139f6f93bc7df
|
[
"MIT"
] | null | null | null |
nadl/__init__.py
|
siAyush/nadl
|
8aa698231e1d198bf823a58c84f139f6f93bc7df
|
[
"MIT"
] | 1
|
2022-03-02T19:52:25.000Z
|
2022-03-02T19:52:25.000Z
|
from nadl.tensor import Tensor
from nadl.parameter import Parameter
| 33.5
| 36
| 0.865672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|