max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
annotate.py | jmathies/util-process-top-crashes | 0 | 12758751 | <reponame>jmathies/util-process-top-crashes
#!/usr/bin/env python3
import json
import os
import pprint
import getopt
import sys
import re
import html
import pprint
###########################################################
# Usage
###########################################################
# -s (signature) : signature to annotate
# -a (annotation) : simple text based note. bugzilla references
# will automatically get linked.
# -c : clear any existing data in the database
# -f (bug#) : add a fixed by bug # annotation
# -v (version) : fixed by annotation firefox version ex: '89.0'
#
# examples:
# python annotate.py -s "draw_quad_spans<T>" -a "filed Bug 1712938"
pp = pprint.PrettyPrinter(indent=2)
def saveAnnotations(ann, filename):
file = "%s.json" % filename
with open(file, "w") as database:
database.write(json.dumps(ann))
def loadAnnotations(filename):
file = "%s.json" % filename
try:
with open(file) as database:
annotations = json.load(database)
except FileNotFoundError:
print("Could not find %s file." % file)
return dict()
except json.decoder.JSONDecodeError:
print("Json error parsing %s" % file)
return dict()
return annotations
def escape(text):
return html.escape(text)
def escapeBugLinks(text):
# convert bug references to links
# https://bugzilla.mozilla.org/show_bug.cgi?id=1323439
pattern = "bug ([0-9]*) "
replacement = "<a href='https://bugzilla.mozilla.org/show_bug.cgi?id=\\1'>\\1</a> "
result = re.sub(pattern, replacement, text, flags=re.IGNORECASE)
return result
fixedBy = False
newDatabase = False
dbFilename = "annotations"
annotation = None
options, remainder = getopt.getopt(sys.argv[1:], 's:a:f:p:cv:')
for o, a in options:
if o == '-a':
annotation = a
elif o == '-c':
newDatabase = True
print("Using a new database.")
elif o == '-s':
signature = a
elif o == '-f':
bugId = int(a)
fixedBy = True
elif o == '-v':
appliesToFxVersion = a
elif o == '-p':
param = a.split('=')
parameters[param[0]] = param[1]
#channel = parameters['channel']
#fxVersion = parameters['version']
#processType = parameters['process_type']
annDb = dict()
if not newDatabase:
annDb = loadAnnotations(dbFilename)
signature = signature.strip("'\n \t")
print('signature: [%s]' % signature)
if annotation is None:
annotation = ''
if fixedBy:
if appliesToFxVersion is None or bugId is None or annotation is None:
print("missing parameters for fixed by entry.")
exit()
print("Fixed by version '%s' in bug %d, annotation: '%s'" % (appliesToFxVersion, bugId, annotation))
elif annotation:
print("annotation: '%s'" % annotation)
else:
exit()
annotation = annotation.strip("'\n \t")
record = dict()
if signature in annDb:
record = annDb[signature]
else:
record['annotations'] = list() # string list
record['fixedby'] = list() # [n] = { 'version': '87.b0', 'bug': 1234567 }
if fixedBy:
entry = { 'bug': bugId, 'version': appliesToFxVersion, 'annotation':annotation}
record['fixedby'].append(entry)
elif len(annotation) > 0:
annotation = annotation.strip("'\n \t")
record['annotations'].append(annotation)
annDb[signature] = record
#pp.pprint(annDb)
saveAnnotations(annDb, dbFilename) | 2.46875 | 2 |
complete-search/p624.py | sajjadt/competitive-programming | 10 | 12758752 | def foo(numbers, path, index, cur_val, target):
if index == len(numbers):
return cur_val, path
# No point in continuation
if cur_val > target:
return -1, ""
sol_1 = foo(numbers, path+"1", index+1, cur_val+numbers[index], target)
# Found the solution. Do not continue.
if sol_1[0] == target:
return sol_1
sol_2 = foo(numbers, path+"0", index+1, cur_val, target)
if sol_2[0] == target:
return sol_2
if sol_1[0] == -1 or sol_1[0] > target:
if sol_2[0] == -1 or sol_2[0] > target:
return -1, ""
else:
return sol_2
else:
if sol_2[0] == -1 or sol_2[0] > target or sol_1[0] > sol_2[0]:
return sol_1
else:
return sol_2
while True:
try:
line = list(map(int, input().split()))
N = line[1]
Target = line[0]
numbers = line[2:]
if sum(numbers) <= Target:
print((" ".join(map(str, numbers))) + " sum:{}".format(sum(numbers)))
else:
sol, path = foo(numbers, "", 0, 0, Target)
print(" ".join([str(p) for p, v in zip(numbers, path) if v=="1"]) + " sum:{}".format(sol))
except(EOFError):
break | 3.796875 | 4 |
pu/util.py | gf0842wf/pu | 0 | 12758753 | <reponame>gf0842wf/pu<filename>pu/util.py
# -*- coding: utf-8 -*-
from collections import Mapping
def shorten(s, width=80):
"""
>>> shorten('a very very very very long sentence', 20)
'a very very ..(23)..'
"""
if not isinstance(s, str):
s = str(s)
length = len(s)
if length < width:
return s
cut_length = length - width + 6
x = len(str(cut_length))
cut_length += x
# 长度调整
if x != len(str(cut_length)):
cut_length += 1
end_pos = length - cut_length
return s[:end_pos] + '..(%d)..' % cut_length
def deep_encode(ob, encoding='utf_8', errors='strict'):
"""深入数据结构内部,尽可能把字符串编码
"""
if isinstance(ob, bytes):
return ob
elif isinstance(ob, str):
return ob.encode(encoding, errors)
elif isinstance(ob, tuple):
return tuple(deep_encode(x, encoding, errors) for x in ob)
elif isinstance(ob, list):
return [deep_encode(x, encoding, errors) for x in ob]
elif isinstance(ob, Mapping):
new = ob.__class__()
for key, value in ob.items():
key = deep_encode(key, encoding, errors)
value = deep_encode(value, encoding, errors)
new[key] = value
return new
else:
return ob
def deep_decode(ob, encoding='utf_8', errors='strict'):
"""深入数据结构内部,尽可能把 bytes 解码
"""
if isinstance(ob, bytes):
return ob.decode(encoding, errors)
elif isinstance(ob, str):
return ob
elif isinstance(ob, tuple):
return tuple(deep_decode(x, encoding, errors) for x in ob)
elif isinstance(ob, list):
return [deep_decode(x, encoding, errors) for x in ob]
elif isinstance(ob, Mapping):
new = ob.__class__()
for key, value in ob.items():
key = deep_decode(key, encoding, errors)
value = deep_decode(value, encoding, errors)
new[key] = value
return new
else:
return ob
def group(seq, chunk_size):
"""Split a sequence into chunks.
Source: http://stackoverflow.com/a/312464/1158494
:param seq: sequence to be split.
:param chunk_size: chunk size.
"""
for i in range(0, len(seq), chunk_size):
yield seq[i:i + chunk_size]
def xfind_all(s, sep, start=None, end=None):
""" 返回所有index
>>> list(xfind_all('abcdabcdabcd', 'bc'))
[1, 5, 9]
>>>
"""
_start = start
while True:
index = s.find(sep, _start, end)
if index != -1:
yield index
else:
break
_start = index + len(sep)
def xsplit(s, sep, maxsplit=None):
""" split的generator版本
>>> list(xsplit('abcdabcdabcd', 'bc'))
['a', 'da', 'da', 'd']
>>>
"""
start = 0
cnt = 0
while True:
pos = s.find(sep, start)
if pos == -1:
yield s[start:]
cnt += 1
break
else:
old_start = start
start = pos + len(sep)
yield s[old_start:pos]
cnt += 1
if cnt == maxsplit:
break
if pos != -1:
yield s[pos:]
if __name__ == '__main__':
import doctest
doctest.testmod()
| 2.859375 | 3 |
manage.py | mruoff/songbase-python | 0 | 12758754 | ##file needed to manage and run code without the debug/ how you run a flask script
from flask_script import Manager
from songbase import app
from songbase import app, db, Artist, Song
manager = Manager(app)
# reset the database and create two artists
@manager.command
def deploy():
db.drop_all()
db.create_all()
coldplay = Artist(name='Coldplay', about='Coldplay is a British rock band.')
maroon5 = Artist(name='<NAME>', about='Maroon 5 is an American pop rock band.')
song1 = Song(name='yellow', year=2004, lyrics='blah blah', artist=coldplay)
db.session.add(coldplay)
db.session.add(maroon5)
db.session.commit()
if __name__=='__main__':
manager.run()
| 2.609375 | 3 |
unitTest/GenerateTest.py | Sky10086/newcoder | 2 | 12758755 | <gh_stars>1-10
#!/usr/bin/python
import os
import sys
if len(sys.argv)<2:
print "Usage: ./GenerateTest.py XXXXX"
model = 'model'
name = 'SkyNewCoder'+sys.argv[1]+'Test'
default = 'XXXXXXXXXXXXXX'
f = open(model,'r')
content = f.read();
f.close()
content = content.replace(default, name);
out = open(name+".cpp", 'w')
out.write(content);
out.close()
| 2.3125 | 2 |
nsi/data/data.py | NextStepInnovation/nsi-tools | 0 | 12758756 | import random
from pathlib import Path
from pkg_resources import resource_filename as _resource_filename
from ..toolz import (
pipe, curry, compose, memoize, concatv, groupby, take,
filter, map, strip_comments, sort_by, vmap, get, noop,
)
resource_filename = curry(_resource_filename)(__name__)
path = compose(
Path,
resource_filename,
str,
lambda p: Path(p),
)
@memoize
def user_agents():
return path('user-agents.txt').read_text().splitlines()
def random_user_agent():
return pipe(
user_agents(),
random.choice,
)
@memoize
def nmap_services(path='nmap-services'):
return pipe(
Path(path).read_text().splitlines(),
strip_comments,
filter(None),
map(lambda l: l.split('\t')[:3]),
map(lambda t: tuple(
concatv(t[:1], t[1].split('/'), map(float, t[-1:]))
)),
sort_by(lambda t: t[-1]),
vmap(lambda name, port, proto, perc: {
'name': name, 'port': port, 'proto': proto, 'perc': perc,
}),
tuple,
)
@curry
def top_ports(n, *, proto='tcp', services_generator=nmap_services,
just_ports=True):
'''For a given protocol ('tcp' or 'udp') and a services generator
(default nmap services file), return the top n ports
'''
return pipe(
services_generator(),
groupby(lambda d: d['proto']),
lambda d: d[proto],
sort_by(get('perc'), reverse=True),
map(get('port')) if just_ports else noop,
take(n),
tuple,
)
| 2.515625 | 3 |
benchmarks/django_simple/app.py | p7g/dd-trace-py | 308 | 12758757 | <gh_stars>100-1000
import os
import django
from django.db import connection
from django.template import Context
from django.template import Template
from django.urls import path
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = False
ROOT_URLCONF = __name__
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
BASE_DIR,
],
}
]
SECRET_KEY = ("SECRET",)
MIDDLEWARE = ["app.empty_middleware", "app.empty_middleware"]
ALLOWED_HOSTS = ["*"]
SETTINGS = dict((key, val) for key, val in locals().items() if key.isupper())
def empty_middleware(get_response):
def middleware(request):
response = get_response(request)
return response
return middleware
def index(request):
# render a large table template
template = Template(
(
"<table>\n"
"{% for row in table %}\n"
"<tr>{% for col in row %}<td>{{ col|escape }}</td>{% endfor %}</tr>\n"
"{% endfor %}\n"
"</table>"
)
)
table = [range(10) for _ in range(100)]
context = Context({"table": table})
template.render(context)
# query db for random data
for _ in range(10):
with connection.cursor() as cursor:
cursor.execute(
"""with recursive
cnt( id, x) as (
values(1 , random()) union all
select id+1,random() from cnt where id<100)
select * from cnt"""
)
cursor.fetchall()
index = Template(
"""
<html lang="en">
<head>
<meta charset="utf-8">
<title>Django Simple</title>
</head>
<body>
<p>Hello {{name|default:"friend"}}!</p>
</body>
</html>
"""
)
return django.http.HttpResponse(index.render(Context({})))
urlpatterns = [path("", index)]
if __name__ == "__main__":
from django.core import management
management.execute_from_command_line()
| 2.265625 | 2 |
src/snippets/05_regular-expressions.py | yvesbeutler/tyrannosaurus | 0 | 12758758 | <filename>src/snippets/05_regular-expressions.py<gh_stars>0
import re
# match - looks for a regular expression at the beginning of the string
email_pattern = r"[a-zA-Z\.]+@[a-zA-Z]{3,}\.[a-zA-Z]{2,3}"
assert re.match(email_pattern, '<EMAIL>'), 'is a valid email'
assert not re.match(email_pattern, '<EMAIL>'), 'is not a valid email'
# match - might be a problem for these kind of strings
assert re.match(email_pattern, '<EMAIL>#_yolo'), 'pattern still matches with the wrong email'
# solution is to define start- and endpoints in your regex
email_pattern = r"^[a-zA-Z\.]+@[a-zA-Z]{3,}\.[a-zA-Z]{2,3}$"
assert not re.match(email_pattern, '<EMAIL>#_yolo'), 'pattern does not match'
postalcode_pattern = r"[1-9][0-9]{3}"
assert re.match(postalcode_pattern, "3013"), 'is a valid postalcode'
assert not re.match(postalcode_pattern, "256"), 'is not a valid postalcode'
# search - looks for a regular expression anywhere in the string
word_pattern = r"cat"
assert re.search(word_pattern, 'cat'), 'pattern will be found'
assert re.search(word_pattern, 'catdog'), 'pattern will be found'
assert re.search(word_pattern, 'housecat'), 'pattern will be found'
assert not re.search(word_pattern, 'hotdog'), 'pattern will not be found'
| 3.328125 | 3 |
Ar_Script/Meetu_Ui_Test/test_case/performance_data_test.py | archerckk/PyTest | 0 | 12758759 | import pytest
import allure
from Ar_Script.Meetu_Ui_Test.Pages.base_page import *
import json
from appium import webdriver
import time
import os
import openpyxl
from Ar_Script.Meetu_Ui_Test.common.get_info import get_meminfo_data,saveData,get_cpu_data,get_activity_name
from Ar_Script.Meetu_Ui_Test.common.app_command import *
import logging
import random
import subprocess
# def test_app_cpu_home_stay_cost():
# package_info=get_activity_name()
# os.popen('adb shell am start {}/{}'.format(package_info[1],package_info[2]))
# time.sleep(5)
# os.popen('adb shell input keyevent 4 ')
# result=[('测试时间','cpu百分比')]
# result.extend(get_cpu_data(package_info[1]))
# data_save=Data_Save(result,'cpu_test','test_result.xlsx',(0,1),(0,'C1'))
# data_save.save_data()
#
class Test_Performance:
@classmethod
def setup_class(self):
os.popen('adb shell pm clear {}'.format('com.social.nene'))
print('执行清理数据')
def setup(self):
os.chdir(os.curdir)
with open('..\config\phone.json')as f:
desired_caps = json.load(f)['sanxingC8_meetu']
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
self.package_info=('',desired_caps['appPackage'],desired_caps['appActivity'])
self.account_page = Account_login_page(self.driver)
self.start_page = StarPage(self.driver)
self.home_page=Home_page(self.driver)
self.hot_start_control=Control(self.package_info,count=16,driver=self.driver)
self.cold_start_control=Control(self.package_info,count=16,driver=self.driver,mode=1)
self.loger=Loger()
# self.windows_size = self.driver.get_window_size()
# self.height = self.windows_size['height']
# self.width = self.windows_size['width']
self.driver.implicitly_wait(30)
def teardown(self):
self.driver.quit()
@allure.story('重复启动app内存测试')
@pytest.mark.parametrize('package,activity',[('com.social.nene',"com.funny.lovu.splash.LaunchActivity")])
def test_meminfo(self,package,activity):
account='<EMAIL>'
psw='123456'
meminfo_list=[]
#登录app
self.start_page.click_account_login()
self.account_page.account_input(account)
self.account_page.psw_input(psw)
self.account_page.click_login()
#处理权限允许弹窗
self.home_page.permission_allow()
#处理引导动画
self.home_page.close_guide()
for i in range(15):
self.driver.keyevent(4)
self.driver.start_activity(package,activity)
if self.home_page.loading_finish_judge():
result=get_meminfo_data(package)
meminfo_list.append(result)
logging.debug(meminfo_list)
saveData(meminfo_list,file_attr='liki_v{}'.format("test"))
@allure.story('启动时间测试')
@pytest.mark.parametrize('package_info',[('','com.social.nene',"com.funny.lovu.splash.LaunchActivity")])
def test_app_start_time(self,package_info):
self.hot_start_control.run()
self.hot_start_control.saveData('热启动_v34')
self.cold_start_control.run()
self.cold_start_control.saveData('冷启动_v34')
# @allure.story('cpu 空闲状态消耗信息记录')
# def test_app_cpu_home_stay_cost(self):
# print(get_cpu_data_t(self.package_info[1]))
# self.driver.keyevent(4)
# result=[('测试时间','cpu百分比')]
# result.extend(get_cpu_data_t(self.package_info[1]))
# data_save=Data_Save(result,'cpu_test','test_result.xlsx',(0,1),(0,'C1'))
# data_save.save_data()
if __name__ == '__main__':
pytest.main(['-s','performance_data_test.py'])
# print(get_cpu_data('com.real'))
# test_app_cpu_home_stay_cost() | 1.929688 | 2 |
scripts/secondary_analysis/figures/chartannotator.py | JosephLalli/ORCHARDS | 0 | 12758760 | <reponame>JosephLalli/ORCHARDS<filename>scripts/secondary_analysis/figures/chartannotator.py<gh_stars>0
#Taken from https://github.com/webermarcolivier/statannot/blob/master/statannot/statannot.py
from matplotlib.text import Text
import matplotlib.pyplot as plt
from matplotlib import transforms, lines
import matplotlib.transforms as mtransforms
from matplotlib.patches import Rectangle
from matplotlib.collections import PathCollection
from matplotlib.font_manager import FontProperties
import numpy as np
import pandas as pd
from itertools import product
import seaborn as sns
from seaborn.utils import remove_na
from scipy import stats
from statsmodels.stats.multitest import multipletests
DEFAULT = object()
def raise_expected_got(expected, for_, got, error_type=ValueError):
"""Raise a standardized error message.
Raise an `error_type` error with the message
Expected `expected` for `for_`; got `got` instead.
Or, if `for_` is `None`,
Expected `expected`; got `got` instead.
"""
if for_ is not None:
raise error_type(
'Expected {} for {}; got {} instead.'.format(expected, for_, got)
)
else:
raise error_type(
'Expected {}; got {} instead.'.format(expected, got)
)
class StatResult:
def __init__(self, test_str, test_short_name, stat_str, stat, pval):
self.test_str = test_str
self.test_short_name = test_short_name
self.stat_str = stat_str
self.stat = stat
self.pval = pval
self.pval_formatted = None
self._format_pval()
def _format_pval(self):
if self.pval < 0.001:
self.pval_formatted = '{:.2e}'.format(self.pval)
if 'strap' in self.test_short_name:
self.pval_formatted = '{:.1e}'.format(self.pval)
else:
self.pval_formatted = '{:.2g}'.format(self.pval)
if 'strap' in self.test_short_name and self.pval == 0:
if self.stat < 0.001:
self.pval_formatted = '< {:.2e}'.format(1/self.stat)
else:
self.pval_formatted = '< {:.1g}'.format(1/self.stat)
def set_pval(self, pval):
self.pval = pval
self._format_pval()
@property
def formatted_output(self):
if self.stat_str is None and self.stat is None:
stat_summary = '{}, P_val:{}'.format(self.test_str, self.pval_formatted)
else:
stat_summary = '{}, P_val={} {}={:.2e}'.format(
self.test_str, self.pval_formatted, self.stat_str, self.stat
)
return stat_summary
def __str__(self):
return self.formatted_output
def assert_is_in(x, valid_values, error_type=ValueError, label=None):
"""Raise an error if x is not in valid_values."""
if x not in valid_values:
raise_expected_got('one of {}'.format(valid_values), label, x)
def stat_test(
box_data1,
box_data2,
test,
mult_comp_correction=None,
num_comparisons=1,
**stats_params
):
"""Get formatted result of two sample statistical test.
Arguments
---------
bbox_data1, bbox_data2
test: str
Statistical test to run. Must be one of:
- `Levene`
- `Mann-Whitney`
- `Mann-Whitney-gt`
- `Mann-Whitney-ls`
- `t-test_ind`
- `t-test_welch`
- `t-test_paired`
- 'bootstrap'
- 'paired_bootstrap'
- `Wilcoxon`
- `Kruskal`
mult_comp_correction: str or None, default None
Method to use for multiple comparisons correction.
num_comparisons: int, default 1
Number of comparisons to use for multiple comparisons correction.
stats_params
Additional keyword arguments to pass to scipy stats functions.
Returns
-------
StatResult object with formatted result of test.
"""
# Check arguments.
assert_is_in(
mult_comp_correction,
['bonferroni', 'sidak','holm-sidak','holm','simes-hochberg','hommel','fdr_bh','fdr_by','fdr_tsbh','fdr_tsbky',None],
label='argument `mult_comp_correction`',
)
# Switch to run scipy.stats hypothesis test.
if test == 'Levene':
stat, pval = stats.levene(box_data1, box_data2, **stats_params)
result = StatResult(
'Levene test of variance', 'levene', 'stat', stat, pval
)
elif test == 'Mann-Whitney':
u_stat, pval = stats.mannwhitneyu(
box_data1, box_data2, alternative='two-sided', **stats_params
)
result = StatResult(
'Mann-Whitney-Wilcoxon test two-sided',
'M-W',
'U_stat',
u_stat,
pval,
)
elif test == 'Mann-Whitney-gt':
u_stat, pval = stats.mannwhitneyu(
box_data1, box_data2, alternative='greater', **stats_params
)
result = StatResult(
'Mann-Whitney-Wilcoxon test greater',
'Mann-Whitney',
'U_stat',
u_stat,
pval,
)
elif test == 'Mann-Whitney-ls':
u_stat, pval = stats.mannwhitneyu(
box_data1, box_data2, alternative='less', **stats_params
)
result = StatResult(
'Mann-Whitney-Wilcoxon test smaller',
'Mann-Whitney',
'U_stat',
u_stat,
pval,
)
elif test == 't-test_ind':
stat, pval = stats.ttest_ind(a=box_data1, b=box_data2, **stats_params)
result = StatResult(
't-test independent samples', 't-test_ind', 'stat', stat, pval
)
elif test == 't-test_welch':
stat, pval = stats.ttest_ind(
a=box_data1, b=box_data2, equal_var=False, **stats_params
)
result = StatResult(
'Welch\'s t-test independent samples',
't-test_welch',
'stat',
stat,
pval,
)
elif test == 't-test_paired':
stat, pval = stats.ttest_rel(a=box_data1, b=box_data2, **stats_params)
result = StatResult(
't-test paired samples', 't-test_rel', 'stat', stat, pval
)
elif test == 'Wilcoxon':
zero_method_default = len(box_data1) <= 20 and "pratt" or "wilcox"
zero_method = stats_params.get('zero_method', zero_method_default)
print("Using zero_method ", zero_method)
stat, pval = stats.wilcoxon(
box_data1, box_data2, zero_method=zero_method, **stats_params
)
result = StatResult(
'Wilcoxon test (paired samples)', 'Wilcoxon', 'stat', stat, pval
)
elif test == 'Kruskal':
stat, pval = stats.kruskal(box_data1, box_data2, **stats_params)
test_short_name = 'Kruskal'
result = StatResult(
'Kruskal-Wallis paired samples', 'Kruskal', 'stat', stat, pval
)
elif test == 'bootstrap':
n = 100000
print('bootstrapping...')
pval = bootstrap_diff(box_data1, box_data2, n)
test_short_name = 'Bootstrap'
result = StatResult(
'Non-parametric bootstrapped two-sided comparison', test_short_name, 'stat', n, pval
)
elif test == 'paired_bootstrap':
n = 100000
print('bootstrapping...')
pval = paired_bootstrap(box_data1, box_data2, n)
test_short_name = 'paired_bootstrap'
result = StatResult(
'Non-parametric paired bootstrap two-sided comparison', test_short_name, 'stat', n, pval
)
else:
result = StatResult(None, '', None, None, np.nan)
# Optionally, run multiple comparisons correction.
return result
def bootstrap_diff(dataset_a, dataset_b, n, stat='mean'):
'''determine via bootstrap if mean of dataset a is different than mean of dataset b'''
boot_a = np.array([np.mean(np.random.choice(dataset_a, size=len(dataset_a), replace=True)) for _ in range(n)])
boot_b = np.array([np.mean(np.random.choice(dataset_b, size=len(dataset_b), replace=True)) for _ in range(n)])
# if np.mean(boot_a) > np.mean(boot_b):
# print (len(dataset_a), len(dataset_b))
return min(np.mean(boot_a > boot_b), np.mean(boot_a < boot_b))*2 #two-sided
def paired_bootstrap(dataset_a, dataset_b, n):
'''determine via bootstrap if items from one dataset are higher or lower
than items from another paired dataset a significantly distinct proportion of the time.'''
if len(dataset_a) != len(dataset_b):
print ('Paired bootstrap tests can only be done between two variables of equal size.')
raise ValueError
# sample_idxs = np.random.choice(0, len(dataset_a), size=n, replace=True)
diffs = dataset_a.values-dataset_b.values
# print(dataset_b.v)
# diff_sum = sum(dataset_a-dataset_b)
samples = np.array([np.sum(np.random.choice(diffs, size=len(diffs), replace=True)) for _ in range(n)])
return min(np.mean(samples > 0), np.mean(0 > samples))*2 #two-sided
def corrected_fdr_pvals(pvals, rejects, alpha=0.05):
sortind = np.argsort(pvals)
pvals_sorted = np.take(pvals, sortind)
ecdfactor = 1/len(pvals)*np.arange(1, len(pvals)+1)
print(len(np.nonzero(rejects[sortind])))
if len(np.nonzero(rejects[sortind])[0]) != 0:
rejectmax = max(np.nonzero(rejects[sortind])[0])
pvals_corrected_raw = pvals_sorted / ecdfactor[rejectmax]
return pvals_corrected_raw[np.argsort(sortind)]
else:
return np.ones(len(pvals))
def multiple_comparisons(p_values, method):
# Input checks.
if np.ndim(p_values) > 1:
raise_expected_got(
'Scalar or list-like', 'argument `p_values`', p_values
)
p_values_array = np.atleast_1d(p_values)
reject, corrected_p_values, _, _ = multipletests(p_values, method=method)
if 'fdr' in method:
corrected_p_values = corrected_fdr_pvals(p_values, reject)
return reject, corrected_p_values
def bonferroni(p_values, num_comparisons='auto'):
"""Apply Bonferroni correction for multiple comparisons.
The Bonferroni correction is defined as
p_corrected = min(num_comparisons * p, 1.0).
Arguments
---------
p_values: scalar or list-like
One or more p_values to correct.
num_comparisons: int or `auto`
Number of comparisons. Use `auto` to infer the number of comparisons
from the length of the `p_values` list.
Returns
-------
Scalar or numpy array of corrected p-values.
"""
# Input checks.
if np.ndim(p_values) > 1:
raise_expected_got(
'Scalar or list-like', 'argument `p_values`', p_values
)
if num_comparisons != 'auto':
try:
# Raise a TypeError if num_comparisons is not numeric, and raise
# an AssertionError if it isn't int-like.
assert np.ceil(num_comparisons) == num_comparisons
except (AssertionError, TypeError) as e:
raise_expected_got(
'Int or `auto`', 'argument `num_comparisons`', num_comparisons
)
# Coerce p_values to numpy array.
p_values_array = np.atleast_1d(p_values)
if num_comparisons == 'auto':
# Infer number of comparisons
num_comparisons = len(p_values_array)
elif len(p_values_array) > 1 and num_comparisons != len(p_values_array):
# Warn if multiple p_values have been passed and num_comparisons is
# set manually.
warnings.warn(
'Manually-specified `num_comparisons={}` differs from number of '
'p_values to correct ({}).'.format(
num_comparisons, len(p_values_array)
)
)
# Apply correction by multiplying p_values and thresholding at p=1.0
p_values_array *= num_comparisons
p_values_array = np.min(
[p_values_array, np.ones_like(p_values_array)], axis=0
)
if len(p_values_array) == 1:
# Return a scalar if input was a scalar.
return p_values_array[0]
else:
return p_values_array
def add_stat_annotation(ax, plot='boxplot',
data=None, x=None, y=None, hue=None, units=None, order=None,
hue_order=None, box_pairs=None, width=0.8, yerr=None,
perform_stat_test=True,
pvalues=None, test_short_name=None,
test='Mann-Whitney', text_format='standard', pvalue_format_string=DEFAULT,
text_annot_custom=None,
loc='inside', show_test_name=True,
pvalue_thresholds=DEFAULT, stats_params=dict(),
mult_comp_correction=None,
use_fixed_offset=False, line_offset_to_box=None,
use_fixed_offset_from_top=False,
line_offset=0.1, line_height=0.03, text_offset=1,
color='0.2', linewidth=1.5,
fontsize=15, verbose=1):
"""
Optionally computes statistical test between pairs of data series, and add statistical annotation on top
of the boxes/bars. The same exact arguments `data`, `x`, `y`, `hue`, `order`, `width`,
`hue_order` (and `units`) as in the seaborn boxplot/barplot function must be passed to this function.
This function works in one of the two following modes:
a) `perform_stat_test` is True: statistical test as given by argument `test` is performed.
b) `perform_stat_test` is False: no statistical test is performed, list of custom p-values `pvalues` are
used for each pair of boxes. The `test_short_name` argument is then used as the name of the
custom statistical test.
:param plot: type of the plot, one of 'boxplot' or 'barplot'.
:param line_height: in axes fraction coordinates
:param text_offset: in points
:param box_pairs: can be of either form: For non-grouped boxplot: `[(cat1, cat2), (cat3, cat4)]`. For boxplot grouped by hue: `[((cat1, hue1), (cat2, hue2)), ((cat3, hue3), (cat4, hue4))]`
:param pvalue_format_string: defaults to `"{.3e}"`
:param pvalue_thresholds: list of lists, or tuples. Default is: For "star" text_format: `[[1e-4, "****"], [1e-3, "***"], [1e-2, "**"], [0.05, "*"], [1, "ns"]]`. For "simple" text_format : `[[1e-5, "1e-5"], [1e-4, "1e-4"], [1e-3, "0.001"], [1e-2, "0.01"]]`
:param pvalues: list or array of p-values for each box pair comparison.
:param mult_comp_correction: Method for multiple comparisons correction. `bonferroni` or None.
"""
if type(fontsize) != str:
text_offset = fontsize*.275
def find_x_position_box(box_plotter, boxName):
"""
boxName can be either a name "cat" or a tuple ("cat", "hue")
"""
if box_plotter.plot_hues is None:
cat = boxName
hue_offset = 0
else:
cat = boxName[0]
hue = boxName[1]
hue_offset = box_plotter.hue_offsets[
box_plotter.hue_names.index(hue)]
group_pos = box_plotter.group_names.index(cat)
box_pos = group_pos + hue_offset
return box_pos
def get_xpos_location(pos, xranges):
'''given a value and a dictionary of range:id, return middle of range'''
for xrange, label in xranges.items():
if (pos >= xrange[0]) & (pos <= xrange[1]):
return xrange[2]
def generate_ymaxes(box_plotter, boxNames, data_to_ax):
'''given box plotter and box name, return highest y point drawn in that box'''
xpositions = {np.round(find_x_position_box(box_plotter, boxName),1):boxName for boxName in boxNames}
ymaxes = {name:0 for name in boxNames}
for child in ax.get_children():
if (type(child) == PathCollection) and (len(child.properties()['offsets'])!=0):
ymax = child.properties()['offsets'][:,1].max()
xpos = float(np.round(np.nanmean(child.properties()['offsets'][:,0]),1))
try:
xname = xpositions[xpos]
except:
print (xpositions)
print (child.properties()['offsets'])
print (xpos)
raise
ypos = data_to_ax.transform((0,ymax))[1]
if ypos > ymaxes[xname]:
ymaxes[xname] = ypos
elif (type(child) == lines.Line2D) or (type(child) == Rectangle):
xunits = (max(list(xpositions.keys()))+1)/len(xpositions)
xranges = {(pos-xunits/2, pos+xunits/2, pos):boxName for pos, boxName in xpositions.items()}
box = ax.transData.inverted().transform(child.get_window_extent(fig.canvas.get_renderer()))
if (box[:,0].max()-box[:,0].min())>1.1*xunits:
continue
raw_xpos = np.round(box[:,0].mean(),1)
xpos = get_xpos_location(raw_xpos, xranges)
if xpos not in xpositions:
continue
xname = xpositions[xpos]
ypos = box[:,1].max()
ypos = data_to_ax.transform((0,ypos))[1]
if ypos > ymaxes[xname]:
ymaxes[xname] = ypos
return ymaxes
def get_box_data(box_plotter, boxName):
"""
boxName can be either a name "cat" or a tuple ("cat", "hue")
Here we really have to duplicate seaborn code, because there is not
direct access to the box_data in the BoxPlotter class.
"""
#if boxName isn't a string, then boxName[0] raises an IndexError. This fixes that.
try:
cat = box_plotter.plot_hues is None and boxName or boxName[0]
except IndexError:
cat = box_plotter.plot_hues is None and boxName
index = box_plotter.group_names.index(cat)
group_data = box_plotter.plot_data[index]
if box_plotter.plot_hues is None:
# Draw a single box or a set of boxes
# with a single level of grouping
box_data = remove_na(group_data)
else:
hue_level = boxName[1]
hue_mask = box_plotter.plot_hues[index] == hue_level
box_data = remove_na(group_data[hue_mask])
return box_data
# Set default values if necessary
if pvalue_format_string is DEFAULT:
pvalue_format_string = '{:.3e}'
simple_format_string = '{:.2f}'
else:
simple_format_string = pvalue_format_string
if pvalue_thresholds is DEFAULT:
if text_format == "star":
pvalue_thresholds = [[1e-4, "****"], [1e-3, "***"],
[1e-2, "**"], [0.05, "*"], [1, "ns"]]
else:
pvalue_thresholds = [[1e-5, "1e-5"], [1e-4, "1e-4"],
[1e-3, "0.001"], [1e-2, "0.01"]]
fig = plt.gcf()
# Validate arguments
if perform_stat_test:
if test is None:
raise ValueError("If `perform_stat_test` is True, `test` must be specified.")
if pvalues is not None or test_short_name is not None:
raise ValueError("If `perform_stat_test` is True, custom `pvalues` "
"or `test_short_name` must be `None`.")
valid_list = ['t-test_ind', 't-test_welch', 't-test_paired',
'Mann-Whitney', 'Mann-Whitney-gt', 'Mann-Whitney-ls',
'Levene', 'Wilcoxon', 'Kruskal', 'bootstrap', 'paired_bootstrap']
if test not in valid_list:
raise ValueError("test value should be one of the following: {}."
.format(', '.join(valid_list)))
else:
if pvalues is None:
raise ValueError("If `perform_stat_test` is False, custom `pvalues` must be specified.")
if test is not None:
raise ValueError("If `perform_stat_test` is False, `test` must be None.")
if len(pvalues) != len(box_pairs):
raise ValueError("`pvalues` should be of the same length as `box_pairs`.")
if text_annot_custom is not None and len(text_annot_custom) != len(box_pairs):
raise ValueError("`text_annot_custom` should be of same length as `box_pairs`.")
assert_is_in(
loc, ['inside', 'outside'], label='argument `loc`'
)
assert_is_in(
text_format,
['standard','full', 'simple', 'star'],
label='argument `text_format`'
)
assert_is_in(
mult_comp_correction,
['bonferroni', 'sidak','holm-sidak','holm','simes-hochberg','hommel','fdr_bh','fdr_by','fdr_tsbh','fdr_tsbky','fdr_gbs',None],
label='argument `mult_comp_correction`'
)
if verbose >= 1 and text_format == 'star':
print("p-value annotation legend:")
pvalue_thresholds = pd.DataFrame(pvalue_thresholds).sort_values(by=0, ascending=False).values
for i in range(0, len(pvalue_thresholds)):
if i < len(pvalue_thresholds)-1:
print('{}: {:.2e} < p <= {:.2e}'.format(pvalue_thresholds[i][1],
pvalue_thresholds[i+1][0],
pvalue_thresholds[i][0]))
else:
print('{}: p <= {:.2e}'.format(pvalue_thresholds[i][1], pvalue_thresholds[i][0]))
print()
orig_ylim = ax.get_ylim()
yrange = orig_ylim[1] - orig_ylim[0]
trans = ax.get_xaxis_transform()
data_to_ax = ax.transData+ax.get_xaxis_transform().inverted() #Will work in data coordinates on x axis, and axis coordinates on y axis
ax_to_data = data_to_ax.inverted()
pix_to_ax = ax.transAxes.inverted()
ylim = (0,1)
yrange = 1
if line_offset is None:
if loc == 'inside':
line_offset = 0.05
if line_offset_to_box is None:
line_offset_to_box = 0.1
# 'outside', see valid_list
else:
line_offset = 0.05
if line_offset_to_box is None:
line_offset_to_box = line_offset
else:
if loc == 'inside':
if line_offset_to_box is None:
line_offset_to_box = 0.1
elif loc == 'outside':
line_offset_to_box = line_offset
y_offset = line_offset*yrange
y_offset_to_box = line_offset_to_box*yrange
if plot == 'boxplot':
# Create the same plotter object as seaborn's boxplot
box_plotter = sns.categorical._BoxPlotter(
x, y, hue, data, order, hue_order, orient=None, width=width, color=None,
palette=None, saturation=.75, dodge=True, fliersize=5, linewidth=None)
elif plot == 'barplot':
# Create the same plotter object as seaborn's barplot
if yerr:
data[y] += np.array(yerr)
box_plotter = sns.categorical._BarPlotter(
x, y, hue, data, order, hue_order, seed=None,
estimator=np.mean, ci=95, n_boot=1000, units=None,
orient=None, color=None, palette=None, saturation=.75,
errcolor=".26", errwidth=None, capsize=None, dodge=True, yerr=[10,4,2,8,1])
# Build the list of box data structures with the x and ymax positions
group_names = box_plotter.group_names
hue_names = box_plotter.hue_names
if box_plotter.plot_hues is None:
box_names = group_names
labels = box_names
else:
box_names = [(group_name, hue_name) for group_name in group_names for hue_name in hue_names]
labels = ['{}_{}'.format(group_name, hue_name) for (group_name, hue_name) in box_names]
ymaxes = generate_ymaxes(box_plotter, box_names, data_to_ax)
box_structs = [{'box':box_names[i],
'label':labels[i],
'x':find_x_position_box(box_plotter, box_names[i]),
'box_data':get_box_data(box_plotter, box_names[i]),
'ymax':ymaxes[box_names[i]]}
for i in range(len(box_names))]
# Sort the box data structures by position along the x axis
box_structs = sorted(box_structs, key=lambda x: x['x'])
# Add the index position in the list of boxes along the x axis
box_structs = [dict(box_struct, xi=i) for i, box_struct in enumerate(box_structs)]
# Same data structure list with access key by box name
box_structs_dic = {box_struct['box']:box_struct for box_struct in box_structs}
# Build the list of box data structure pairs
box_struct_pairs = []
test_result_list = []
for i_box_pair, (box1, box2) in enumerate(box_pairs):
valid = box1 in box_names and box2 in box_names
if not valid:
raise ValueError("box_pairs contains an invalid box pair.")
pass
# i_box_pair will keep track of the original order of the box pairs.
box_struct1 = dict(box_structs_dic[box1], i_box_pair=i_box_pair)
box_struct2 = dict(box_structs_dic[box2], i_box_pair=i_box_pair)
if box_struct1['x'] <= box_struct2['x']:
pair = (box_struct1, box_struct2)
else:
pair = (box_struct2, box_struct1)
if perform_stat_test:
result = stat_test(
box_struct1['box_data'],
box_struct2['box_data'],
test,
**stats_params
)
else:
test_short_name = test_short_name if test_short_name is not None else ''
result = StatResult(
'Custom statistical test',
test_short_name,
None,
None,
pvalues[i_box_pair]
)
result.box1 = box1
result.box2 = box2
test_result_list.append(result)
box_struct_pairs.append(pair)
if verbose >= 1:
greater_or_less_than = 'equal to'
print(f"{box_struct1['label']}: {np.median(box_struct1['box_data'])}, {box_struct2['label']}: {np.median(box_struct2['box_data'])}")
if np.median(box_struct1['box_data']) > np.median(box_struct2['box_data']):
greater_or_less_than = 'greater than'
elif np.median(box_struct1['box_data']) < np.median(box_struct2['box_data']):
greater_or_less_than = 'less than'
print("{} is {} {}: {}".format(box_struct1['label'], greater_or_less_than, box_struct2['label'], result.formatted_output))
if box_struct1['box_data'].mean() > box_struct2['box_data'].mean():
greater_or_less_than = 'greater than'
elif box_struct1['box_data'].mean() < box_struct2['box_data'].mean():
greater_or_less_than = 'less than'
print("(means: {} is {} {}: {} vs {})".format(box_struct1['label'], greater_or_less_than, box_struct2['label'], box_struct1['box_data'].mean(), box_struct2['box_data'].mean()))
if mult_comp_correction:
pvals = [result.pval for result in test_result_list]
reject_null, corrected_pvals = multiple_comparisons(pvals, method= mult_comp_correction)
print (pvals)
print (corrected_pvals)
print (reject_null)
for result, reject_null, pval in zip(test_result_list, reject_null, corrected_pvals):
result.set_pval(pval)
if not reject_null:
result.test_str = 'n.s.'
result.set_pval(1)
else:
result.test_str = result.test_str + f'{result.test_str} with {mult_comp_correction} correction'
box_struct_pairs = [pair+(result,) for pair, result in zip(box_struct_pairs, test_result_list)]
# Draw first the annotations with the shortest between-boxes distance, in order to reduce
# overlapping between annotations.
box_struct_pairs = sorted(box_struct_pairs, key=lambda x: abs(x[1]['x'] - x[0]['x']))
# Build array that contains the x and y_max position of the highest annotation or box data at
# a given x position, and also keeps track of the number of stacked annotations.
# This array will be updated when a new annotation is drawn.
y_stack_arr = np.array([[box_struct['x'] for box_struct in box_structs],
[box_struct['ymax'] for box_struct in box_structs],
[0 for i in range(len(box_structs))]])
highestDataDrawn = y_stack_arr[1,:].max()
if loc == 'outside':
y_stack_arr[1, :] = ylim[1]
ann_list = []
ymaxs = []
y_stack = []
items_to_draw = []
for box_struct1, box_struct2, result in box_struct_pairs:
box1 = box_struct1['box']
box2 = box_struct2['box']
label1 = box_struct1['label']
label2 = box_struct2['label']
box_data1 = box_struct1['box_data']
box_data2 = box_struct2['box_data']
x1 = box_struct1['x']
x2 = box_struct2['x']
xi1 = box_struct1['xi']
xi2 = box_struct2['xi']
ymax1 = box_struct1['ymax']
ymax2 = box_struct2['ymax']
i_box_pair = box_struct1['i_box_pair']
# Find y maximum for all the y_stacks *in between* the box1 and the box2
i_ymax_in_range_x1_x2 = xi1 + np.nanargmax(y_stack_arr[1, np.where((x1 <= y_stack_arr[0, :]) &
(y_stack_arr[0, :] <= x2))])
ymax_in_range_x1_x2 = y_stack_arr[1, i_ymax_in_range_x1_x2]
if use_fixed_offset_from_top:
#if allowing overlap, simply find the highest y_stack
ymax_in_range_x1_x2 = highestDataDrawn
# if perform_stat_test:
# result = stat_test(
# box_data1,
# box_data2,
# test,
# mult_comp_correction,
# len(box_struct_pairs),
# **stats_params
# )
# else:
# test_short_name = test_short_name if test_short_name is not None else ''
# result = StatResult(
# 'Custom statistical test',
# test_short_name,
# None,
# None,
# pvalues[i_box_pair]
# )
# result.box1 = box1
# result.box2 = box2
# test_result_list.append(result)
# if verbose >= 1:
# print("{} v.s. {}: {}".format(label1, label2, result.formatted_output))
if text_annot_custom is not None:
text = text_annot_custom[i_box_pair]
else:
if text_format == 'standard':
if result.pval > 0.05:
text = 'n.s.'
else:
if '<' in result.pval_formatted:
text = f'p {result.pval_formatted}'
else:
text = f'p = {result.pval_formatted}'
if text_format == 'full':
text = "{} p = {}".format(result.test_short_name, result.formatted_output)
elif text_format is None:
text = None
elif text_format is 'star':
text = pval_annotation_text(result.pval, pvalue_thresholds)
elif text_format is 'simple':
test_short_name = show_test_name and test_short_name or ""
text = simple_text(result.pval, simple_format_string, pvalue_thresholds, test_short_name)
yref = ymax_in_range_x1_x2
yref2 = yref
# Choose the best offset depending on wether there is an annotation below
# at the x position in the range [x1, x2] where the stack is the highest
if (y_stack_arr[2, i_ymax_in_range_x1_x2] == 0) or use_fixed_offset_from_top:
# there is only a box below
offset = y_offset_to_box
else:
# there is an annotation below
offset = y_offset
y = yref2 + offset
h = line_height*yrange
ax_line_x, ax_line_y = [x1, x1, x2, x2], [y, y + h, y + h, y]
points = [ax_to_data.transform((x,y)) for x,y in zip(ax_line_x, ax_line_y)]
line_x, line_y = [x for x,y in points], [y for x,y in points]
if loc == 'inside':
ax.plot(line_x, line_y, lw=linewidth, c=color)
elif loc == 'outside':
line = lines.Line2D(line_x, line_y, lw=linewidth, c=color)
line.set_clip_on(False)
ax.add_line(line)
if text is not None:
ann = ax.annotate(
text, xy=(np.mean([x1, x2]), line_y[2]),
xytext=(0, text_offset),
textcoords='offset points',
xycoords='data', ha='center', va='bottom',
fontsize=fontsize, clip_on=False, annotation_clip=False)
ann_list.append(ann)
plt.draw()
ax.set_ylim(orig_ylim)
y_top_annot = None
got_mpl_error = False
if not use_fixed_offset:
try:
bbox = ann.get_window_extent()
bbox_ax = bbox.transformed(pix_to_ax)
y_top_annot = bbox_ax.ymax
except RuntimeError:
got_mpl_error = True
if use_fixed_offset or got_mpl_error:
if verbose >= 1:
print("Warning: cannot get the text bounding box. Falling back to a fixed"
" y offset. Layout may be not optimal.")
# We will apply a fixed offset in points,
# based on the font size of the annotation.
fontsize_points = FontProperties(size='medium').get_size_in_points()
offset_trans = mtransforms.offset_copy(
ax.transData, fig=fig, x=0,
y=1.0*fontsize_points + text_offset, units='points')
y_top_display = offset_trans.transform((0, y + h))
y_top_annot = ax.transData.inverted().transform(y_top_display)[1]
else:
y_top_annot = y + h
y_stack.append(y_top_annot) # remark: y_stack is not really necessary if we have the stack_array
ymaxs.append(max(y_stack))
# Fill the highest y position of the annotation into the y_stack array
# for all positions in the range x1 to x2
y_stack_arr[1, (x1 <= y_stack_arr[0, :]) & (y_stack_arr[0, :] <= x2)] = y_top_annot
y_stack_arr[2, xi1:xi2 + 1] = y_stack_arr[2, xi1:xi2 + 1] + 1
y_stack_max = max(ymaxs)
#reset transformation
data_to_ax = ax.transData+ax.get_xaxis_transform().inverted() #Will work in data coordinates on x axis, and axis coordinates on y axis
ax_to_data = data_to_ax.inverted()
if loc == 'inside':
ax.set_ylim(ax_to_data.transform([(0,ylim[0]),(0,max(1.05*y_stack_max, ylim[1]))])[:,1])
elif loc == 'outside':
ax.set_ylim(ax_to_data.transform([(0,ylim[0]),(0,y_stack_max, ylim[1])])[:,1])
return ax, test_result_list | 2.546875 | 3 |
test.py | Gollor/database1 | 2 | 12758761 | import unittest
import asyncio
import motor.motor_asyncio
import city_generator
client = motor.motor_asyncio.AsyncIOMotorClient('localhost', 27017)
db = client.local
loop = asyncio.get_event_loop()
async def get_all_cities(cap=500) -> list:
return await db.Cities.find({}).to_list(cap)
async def get_city_by_index(index) -> list:
return await db.Cities.find_one({'index': {'$eq': index}})
class TestCity(unittest.TestCase):
def test_collection_cleaning(self):
loop.run_until_complete(city_generator.remove_cities())
def test_insert_city(self):
loop.run_until_complete(city_generator.remove_cities())
loop.run_until_complete(city_generator.insert_city(1))
self.assertEqual(len(loop.run_until_complete(get_all_cities(200))), 1)
loop.run_until_complete(city_generator.remove_cities())
def test_get_city(self):
loop.run_until_complete(city_generator.remove_cities())
loop.run_until_complete(city_generator.insert_city(5))
self.assertIn('name', loop.run_until_complete(get_city_by_index(5)))
self.assertIn('index', loop.run_until_complete(get_city_by_index(5)))
self.assertIn('roads', loop.run_until_complete(get_city_by_index(5)))
loop.run_until_complete(city_generator.remove_cities())
def test_get_replace_city(self):
loop.run_until_complete(city_generator.remove_cities())
loop.run_until_complete(city_generator.insert_city(5))
city = loop.run_until_complete(city_generator.get_city(5))
city['name'] = 'replaced_city'
loop.run_until_complete(city_generator.replace_city(5, city))
loop.run_until_complete(city_generator.remove_cities())
def test_state_generation(self):
loop.run_until_complete(city_generator.remove_cities())
loop.run_until_complete(city_generator.generate_state(20, 60))
self.assertEqual(len(loop.run_until_complete(get_all_cities(200))), 20)
loop.run_until_complete(city_generator.remove_cities())
if __name__ == '__main__':
unittest.main() | 2.671875 | 3 |
src/analog_input/mcp3008.py | meshell/radioM40 | 0 | 12758762 | <gh_stars>0
import spi
from spi import SPIDevice
class MCP3008(object):
def __init__(self, bus=spi.Bus.SPI0, device=spi.Device.CE0, ref_voltage=3.3):
self._spi = SPIDevice(bus, device)
self._out = bytearray(3)
self._out[0] = 0x01
self._ref_voltage = ref_voltage
@property
def reference_voltage(self):
"""Returns the reference voltage. (read-only)"""
return self._ref_voltage
def read_channel(self, channel):
self._out[1] = (8 + channel) << 4
adc = self._spi.write_read(self._out)
return ((adc[1] & 0x03) << 8) | adc[2]
def open(self):
self._spi.open()
def close(self):
self._spi.close() | 2.75 | 3 |
music_sync/general_sync_utils.py | justinplasmeier/PyMusicSync | 2 | 12758763 | <filename>music_sync/general_sync_utils.py<gh_stars>1-10
# general_sync_utils
# similar to music_sync_utils but more general
class NameEqualityMixin():
def __eq__(self, other):
if isinstance(other, str):
return self.name == other
return self.name == other.name
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
class Folder(NameEqualityMixin):
def __init__(self, name):
self.name = name
self.contents = []
# similar to contents
# but name:object pairs
self.contents_map = {}
def __str__(self):
return "{0}: {1}".format(self.name, [str(c) for c in self.contents])
class File(NameEqualityMixin):
def __init__(self, name, size):
self.name = name
self.size = size
def __str__(self):
return self.name
class SyncAssertions:
def assertFolderEquality(self, actual, expected):
for a_i in actual.contents:
if a_i not in expected.contents:
raise AssertionError("Item {0} not in folder {1}".format(a_i, expected))
if isinstance(a_i, Folder):
b_i, = [i for i in expected.contents if i.name == a_i.name]
print("Checking subfolders: ", a_i, b_i)
self.assertFolderEquality(a_i, b_i)
for b_i in expected.contents:
if b_i not in actual.contents:
raise AssertionError("Item {0} not in folder {1}".format(b_i, actual))
if isinstance(b_i, Folder):
a_i, = [i for i in actual.contents if i.name == b_i.name]
print("Checking subfolders: ", a_i, b_i)
self.assertFolderEquality(a_i, b_i)
return
| 3.046875 | 3 |
src/include/report_state.py | joeldentici/python_stepper | 1 | 12758764 | <filename>src/include/report_state.py
import re
def indent(what):
lines = what.split("\n")
indentation = ' '
return "\n".join(indentation + x for x in lines)
def dict_state_to_string(state, active_transform):
if (state["type"] == "statement_group"):
return "\n".join(state_to_string(x, active_transform) for x in state["statements"] if x)
#return "\n".join(indent(x, 1) for x in stmts)
elif (state["type"] == "function_activation"):
return "{|\n" + indent(state_to_string(state["value"], active_transform)) + "\n|}"
elif (state["type"] == "block"):
return indent(state_to_string(state["value"], active_transform))
elif (state["type"] == "statement"):
return state_to_string(state["value"], active_transform)
elif (state["type"] == "active_component"):
val = state_to_string(state["value"], active_transform)
return active_transform(val)
raise NotImplementedError("Type " + state["type"])
def list_state_to_string(state, active_transform):
return "".join(state_to_string(x, active_transform) for x in state)
def state_to_string(state, active_transform = lambda x: x):
if (isinstance(state, dict)):
return dict_state_to_string(state, active_transform)
elif (isinstance(state, list)):
return list_state_to_string(state, active_transform)
else:
return state
def rename_statements(scope, stmts):
return [rename_statement(scope, x) for x in stmts]
def rename_statement(scope, stmt):
x = re.sub(r'<@ (.*?) @>', lambda o: scope.resolve_name(o.group(1)), stmt)
return x.replace('\n\n', '\n')
| 2.765625 | 3 |
coin_dectector/coins/circles.py | jorisroovers/opencv-playground | 0 | 12758765 | <filename>coin_dectector/coins/circles.py
import cv2
import cv2.cv
import numpy as np
import sys
# http://www.pyimagesearch.com/2014/07/21/detecting-circles-images-using-opencv-hough-circles/
print "Reading image..."
img = cv2.imread(sys.argv[1], 0)
height, width = img.shape
img2 = cv2.imread(sys.argv[1], 1)
output = np.zeros((height, width, 3), np.uint8)
print "Blurring image..."
blurred = cv2.medianBlur(img, 5)
print "Detecting circles..."
circles = cv2.HoughCircles(blurred, cv2.cv.CV_HOUGH_GRADIENT, 1.3, 50,
minRadius=20, maxRadius=100)
circles = np.uint16(np.around(circles))
cnt = 1
for i in circles[0, :]:
x, y = np.meshgrid(np.arange(width), np.arange(height))
d2 = (x - i[0]) ** 2 + (y - i[1]) ** 2
mask = d2 < i[2] ** 2
print "circle", cnt
print "radius coin", i[2]
pointC = (i[0], i[1])
pointR1 = (i[0], i[1] + int(0.90 * i[2]))
pointR2 = (i[0], i[1] - int(0.90 * i[2]))
pointR3 = (i[0] + int(0.90 * i[2]), i[1])
pointR4 = (i[0] - int(0.90 * i[2]), i[1])
radiusColor = 2
maskC = d2 < radiusColor ** 2
dR1 = (x - pointR1[0]) ** 2 + (y - pointR1[1]) ** 2
dR2 = (x - pointR2[0]) ** 2 + (y - pointR2[1]) ** 2
dR3 = (x - pointR3[0]) ** 2 + (y - pointR3[1]) ** 2
dR4 = (x - pointR4[0]) ** 2 + (y - pointR4[1]) ** 2
maskR1 = dR1 < radiusColor ** 2
maskR2 = dR2 < radiusColor ** 2
maskR3 = dR3 < radiusColor ** 2
maskR4 = dR4 < radiusColor ** 2
avgr = 0
avgg = 0
avgb = 0
avgcnt = 0
avgCr = 0
avgCg = 0
avgCb = 0
avgCcnt = 0
avgRr = 0
avgRg = 0
avgRb = 0
avgRcnt = 0
for a in range(0, width):
for b in range(0, height):
if mask[b, a]:
avgr += img2[b, a][0];
avgg += img2[b, a][1];
avgb += img2[b, a][2];
output[b, a] = img2[b, a]
avgcnt += 1
if maskC[b, a]:
avgCr += img2[b, a][0];
avgCg += img2[b, a][1];
avgCb += img2[b, a][2];
avgCcnt += 1
if maskR1[b, a]:
avgRr += img2[b, a][0];
avgRg += img2[b, a][1];
avgRb += img2[b, a][2];
avgRcnt += 1
if maskR2[b, a]:
avgRr += img2[b, a][0];
avgRg += img2[b, a][1];
avgRb += img2[b, a][2];
avgRcnt += 1
if maskR3[b, a]:
avgRr += img2[b, a][0];
avgRg += img2[b, a][1];
avgRb += img2[b, a][2];
avgRcnt += 1
if maskR4[b, a]:
avgRr += img2[b, a][0];
avgRg += img2[b, a][1];
avgRb += img2[b, a][2];
avgRcnt += 1
cv2.circle(output, pointC, 2, (0, 255, 0), 3)
print "avg color of circle [", int(avgr / avgcnt), ",", int(avgg / avgcnt), ",", int(avgb / avgcnt), "]"
print "avg color border of circle [", int(avgRr / avgRcnt), ",", int(avgRg / avgRcnt), ",", int(
avgRb / avgRcnt), "]"
print "avg color center of circle [", int(avgCr / avgCcnt), ",", int(avgCg / avgCcnt), ",", int(
avgCb / avgCcnt), "]"
print "*" * 50
cv2.circle(output, pointR1, 2, (0, 0, 255), 3)
cv2.circle(output, pointR2, 2, (0, 0, 255), 3)
cv2.circle(output, pointR3, 2, (0, 0, 255), 3)
cv2.circle(output, pointR4, 2, (0, 0, 255), 3)
cv2.putText(output, str(cnt), pointC, cv2.FONT_HERSHEY_SIMPLEX, 2, 255)
cnt += 1
cv2.imshow('detected circles', output)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 3.453125 | 3 |
003/inout.py | sauhor/miniprocon | 0 | 12758766 | # inout.py
#coding: utf-8
import sys
from solve import solve
argvs=sys.argv
argc=len(argvs)
if argc >= 2:
res=solve(argvs[1])
else:
res=solve('prb.txt')
print res
| 3 | 3 |
warn_transformer/transformers/nj.py | chriszs/warn-transformer | 3 | 12758767 | import typing
from datetime import datetime
from ..schema import BaseTransformer
class Transformer(BaseTransformer):
"""Transform New Jersey raw data for consolidation."""
postal_code = "NJ"
fields = dict(
company="Company",
location="City",
effective_date="Effective Date",
jobs="Workforce Affected",
)
date_format = ["%Y-%m-%d %H:%M:%S", "%M/%d/%Y", "%M/%d/%y"]
jobs_corrections = {
"TBA": None,
"To be Determined": None,
"-": None,
"Unknown": None,
23695: None,
# The United airlines number is legimate, though nationwide
# https://abcnews.go.com/Politics/united-airlines-furlough-16000-employees/story?id=72771897
16000: 16000,
}
date_corrections = {
"TBA": None,
"Temp layoff": None,
"-": None,
"3030-08-23 00:00:00": datetime(2020, 8, 23),
"04/22/2022, 09/30/2022, 12/21/22": datetime(2022, 4, 22),
}
def transform_jobs(self, value: str) -> typing.Optional[int]:
"""Transform a raw jobs number into an integer.
Args:
value (str): A raw jobs number provided by the source
Returns: An integer number ready for consolidation. Or, if the value is invalid, a None.
"""
# Cut the asterisk they sometimes use
value = value.replace("*", "")
# Do the normal stuff
return super().transform_jobs(value)
| 3.421875 | 3 |
source/pic2card/tests/base_test_class.py | sivasakthiv/AdaptiveCards | 0 | 12758768 | <filename>source/pic2card/tests/base_test_class.py
import os
import unittest
from tests.utils import (
img_to_base64,
headers,
payload_empty_dict_data,
payload_data_some_string,
generate_base64,
)
from app.api import app
class BaseAPITest(unittest.TestCase):
"""Base test class """
def setUp(self):
"""Define test variables and initialize app."""
self.data = img_to_base64(os.environ["test_img_path"])
self.gt_3mb_data = generate_base64()
self.empty_data = payload_empty_dict_data
self.wrong_data = payload_data_some_string
self.headers = headers
app.testing = True
self.client = app.test_client()
| 2.90625 | 3 |
tridesclous/labelcodes.py | remi-pr/tridesclous | 36 | 12758769 |
LABEL_TRASH = -1
LABEL_NOISE = -2
LABEL_ALIEN = -9
LABEL_UNCLASSIFIED = -10
LABEL_NO_WAVEFORM = -11
to_name = { -1: 'Trash',
-2 : 'Noise',
-9: 'Alien',
-10: 'Unclassified',
-11: 'No waveforms',
}
| 1.226563 | 1 |
discriminator/vanilla/horizons_test.py | dangeng/infiniteGANorama | 0 | 12758770 | <reponame>dangeng/infiniteGANorama<filename>discriminator/vanilla/horizons_test.py
import time
import networks
import pdb
from data.frankenstein_dataset import FrankensteinDataset
from data.horizon_dataset import HorizonDataset
import matplotlib.pyplot as plt
from scipy.misc import imsave
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch
import numpy as np
batch_size = 1
device = torch.device("cuda")
#model = networks.define_D(6, 64, 'n_layers', n_layers_D=3, use_sigmoid=False, out_channels=256, glob=True)
model = networks.define_D(6, 64, 'n_layers', n_layers_D=3, use_sigmoid=False)
#model = networks.Siamese()
#model = networks.GlobalLocal()
#model = networks.SiameseResnet()
#chkpt = torch.load('checkpoints/localGlobalBlur2/65.pth')
chkpt = torch.load('checkpoints/patch/179.pth')
#chkpt = torch.load('checkpoints/SiameseResnet/17.pth')
model.load_state_dict(chkpt['state_dict'])
model.to(device)
patch_loss = networks.GANLoss()
total_steps = 0
#dataset = FrankensteinDataset()
dataset = HorizonDataset()
#dataset.initialize('../datasets/street_view/sides/', allrandom=True)
#dataset.initialize('../../../data/semanticLandscapes512/train_img', allrandom=True, return_idx=True)
#dataset.initialize('../../../data/MITCVCL/imgs', allrandom=True, return_idx=True)
dataset.initialize('../../../data/MITCVCL/coast', allrandom=True, return_idx=True)
#dataset.initialize('../../../data/MITCVCL/mountain', allrandom=True, return_idx=True)
train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
ordered_preds = []
for i in range(100):
print(i)
# Reset image
dataset.initialize('../../../data/MITCVCL/coast', allrandom=True, return_idx=True)
indices = []
preds = []
offsets = []
# 1000 ~ 25 sec
# 100000 ~ 2500 sec
model.eval()
for i in range(100):
#for i, (data, target) in enumerate(train_loader):
data, target, idx_l, idx_r = dataset[i] # Samples random pair
data = data.unsqueeze(0)
data, target = data.to(device), target.to(device)
total_steps += batch_size
pred = model(data)
#pred = F.sigmoid(pred).mean(dim=(2,3))
#loss = patch_loss(pred.cpu(), target)
#loss = F.binary_cross_entropy(pred, target)
indices.append((idx_l, idx_r))
preds.append(pred.mean().item())
offsets.append(i)
#ims = np.array(ims)
preds = np.array(preds)
preds = 1 / (1 + np.exp(-preds))
ordered_preds.append(preds)
ordered_preds = np.array(ordered_preds)
np.save('samples/ordered_preds.npy', ordered_preds)
'''
indices = [indices[i] for i in np.argsort(preds)]
offsets = [offsets[i] for i in np.argsort(preds)]
preds = preds[np.argsort(preds)]
def convertImage(im):
# undo right image flip when cat-ing
im = np.concatenate((im[:3,:,:], im[3:,:,::-1]), 2)
return im
def savePaths(indices):
paths = ''
for idx_l, idx_r in indices:
paths += dataset.get_path_name(idx_l).split('/')[-1]
paths += ', '
paths += dataset.get_path_name(idx_r).split('/')[-1]
paths += '\n'
f = open("samples/indices.txt", "w")
f.write(paths)
f.close()
np.savetxt('samples/preds.txt', preds)
savePaths(indices)
for i in range(1,101):
data, target = dataset.get_deterministic(offsets[-i], indices[-i][0], indices[-i][1])
im = data.numpy()
imsave('samples/best/{}.jpg'.format(i), convertImage(im).transpose(1,2,0))
for i in range(100):
data, target = dataset.get_deterministic(offsets[i], indices[i][0], indices[i][1])
im = data.numpy()
imsave('samples/worst/{}.jpg'.format(i), convertImage(im).transpose(1,2,0))
'''
| 2.03125 | 2 |
src/top/test.py | kehuaWangfff/FashionAI_KeyPoint_Detection_Challenge_Keras | 0 | 12758771 | <reponame>kehuaWangfff/FashionAI_KeyPoint_Detection_Challenge_Keras<filename>src/top/test.py<gh_stars>0
import sys
sys.path.insert(0, "../data_gen/")
sys.path.insert(0, "../eval/")
sys.path.insert(0, "../unet/")
import argparse
import os
from fashion_net import FashionNet
from dataset import getKpNum, getKpKeys
import pandas as pd
from evaluation import Evaluation
import pickle
import numpy as np
def get_best_single_model(valfile):
'''
:param valfile: the log file with validation score for each snapshot
:return: model file and score
'''
def get_key(item):
return item[1]
with open(valfile) as xval:
lines = xval.readlines()
xlist = list()
for linenum, xline in enumerate(lines):
if 'hdf5' in xline and 'Socre' in xline:
modelname = xline.strip().split(',')[0]
overallscore = xline.strip().split(',')[1]
xlist.append((modelname, overallscore))
bestmodel = sorted(xlist, key=get_key)[0]
return bestmodel
def fill_dataframe(kplst, keys, dfrow, image_category):
# fill category
dfrow['image_category'] = image_category
assert (len(keys) == len(kplst)), str(len(kplst)) + ' must be the same as ' + str(len(keys))
for i, _key in enumerate(keys):
kpann = kplst[i]
outstr = str(int(kpann.x))+"_"+str(int(kpann.y))+"_"+str(1)
dfrow[_key] = outstr
def get_kp_from_dict(mdict, image_category, image_id):
if image_category in mdict.keys():
xdict = mdict[image_category]
else:
xdict = mdict['all']
return xdict[image_id]
def submission(pklpath):
xdf = pd.read_csv("../../data/train/Annotations/train.csv")
trainKeys = xdf.keys()
testdf = pd.read_csv("../../data/test/test.csv")
print(len(testdf), " samples in test.csv")
mdict = dict()
for xfile in os.listdir(pklpath):
if xfile.endswith('.pkl'):
category = xfile.strip().split('.')[0]
pkl = open(os.path.join(pklpath, xfile))
mdict[category] = pickle.load(pkl)
print(testdf.keys())
print(mdict.keys())
submissionDf = pd.DataFrame(columns=trainKeys, index=np.arange(testdf.shape[0]))
submissionDf = submissionDf.fillna(value='-1_-1_-1')
submissionDf['image_id'] = testdf['image_id']
submissionDf['image_category'] = testdf['image_category']
for _index, _row in submissionDf.iterrows():
image_id = _row['image_id']
image_category = _row['image_category']
kplst = get_kp_from_dict(mdict, image_category, image_id)
fill_dataframe(kplst, getKpKeys('all')[1:], _row, image_category)
print(len(submissionDf), "save to ", os.path.join(pklpath, 'submission.csv'))
submissionDf.to_csv( os.path.join(pklpath, 'submission.csv'), index=False )
def load_image_names(annfile, category):
# read into dataframe
xdf = pd.read_csv(annfile)
xdf = xdf[xdf['image_category'] == category]
return xdf
def main_test(savepath, modelpath, augmentFlag):
valfile = os.path.join(modelpath, 'val.log')
bestmodels = get_best_single_model(valfile)
print (bestmodels, augmentFlag)
xEval = Evaluation('all', bestmodels[0])
# load images and run prediction
testfile = os.path.join("../../data/test/", 'test.csv')
for category in ['skirt', 'blouse', 'trousers', 'outwear', 'dress']:
xdict = dict()
xdf = load_image_names(testfile, category)
print (len(xdf), " images to process ", category)
count = 0
for _index, _row in xdf.iterrows():
count += 1
if count%1000 == 0:
print (count, "images have been processed")
_image_id = _row['image_id']
imageName = os.path.join("../../data/test", _image_id)
if augmentFlag:
dtkp = xEval.predict_kp_with_rotate(imageName, _row['image_category'])
else:
dtkp = xEval.predict_kp(imageName, _row['image_category'], multiOutput=True)
xdict[_image_id] = dtkp
savefile = os.path.join(savepath, category+'.pkl')
with open(savefile, 'wb') as xfile:
pickle.dump(xdict, xfile)
print("prediction save to ", savefile)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gpuID", default=0, type=int, help='gpu id')
parser.add_argument("--modelpath", help="path of trained model")
parser.add_argument("--outpath", help="path to save predicted keypoints")
parser.add_argument("--augment", default=False, type=bool, help="augment or not")
args = parser.parse_args()
print (args)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpuID)
main_test(args.outpath, args.modelpath, args.augment)
submission(args.outpath) | 2.078125 | 2 |
myclang/ops.py | FindDefinition/myclang | 0 | 12758772 | from .betterenums import NodeKind
from . import constants
def nkind_is_decl(nkind: NodeKind):
K = nkind.value
return ((K >= constants.CXCURSOR_FIRST_DECL and K <= constants.CXCURSOR_LAST_DECL) or
(K >= constants.CXCURSOR_FIRST_EXTRA_DECL and K <= constants.CXCURSOR_LAST_EXTRA_DECL) or
(K >= constants.CXCURSOREX_FIRST_DECL and K <= constants.CXCURSOREX_LAST_DECL))
| 2.375 | 2 |
examples/analyze_no_mef.py | castillohair/FlowCal | 0 | 12758773 | #!/usr/bin/python
"""
FlowCal Python API example, without using calibration beads data.
This script is divided in two parts. Part one processes data from five cell
samples, and generates plots of each one.
Part two exemplifies how to use the processed cell sample data with
FlowCal's plotting and statistics modules, in order to produce interesting
plots.
For details about the experiment, samples, and instrument used, please
consult readme.txt.
"""
import os
import os.path
import numpy as np
import matplotlib.pyplot as plt
import FlowCal
###
# Definition of constants
###
# Names of the FCS files containing data from cell samples
samples_filenames = ['FCFiles/Data001.fcs',
'FCFiles/Data002.fcs',
'FCFiles/Data003.fcs',
'FCFiles/Data004.fcs',
'FCFiles/Data005.fcs',
]
# IPTG concentration of each cell sample, in micromolar.
iptg = np.array([0, 81, 161, 318, 1000])
# Plots will be generated after gating and transforming cell samples. These
# will be stored in the following folder.
samples_plot_dir = 'plot_samples'
if __name__ == "__main__":
# Check that plot directory exists, create if it does not.
if not os.path.exists(samples_plot_dir):
os.makedirs(samples_plot_dir)
###
# Part 1: Processing cell sample data
###
print("\nProcessing cell samples...")
# We will use the list ``samples`` to store processed, transformed flow
# cytometry data of each sample.
samples = []
# Iterate over cell sample filenames
for sample_id, sample_filename in enumerate(samples_filenames):
# Load flow cytometry data from the corresponding FCS file.
# ``FlowCal.io.FCSData(filename)`` returns an object that represents
# flow cytometry data loaded from file ``filename``.
print("\nLoading file \"{}\"...".format(sample_filename))
sample = FlowCal.io.FCSData(sample_filename)
# Data loaded from an FCS file is in "Channel Units", the raw numbers
# reported from the instrument's detectors. The FCS file also contains
# information to convert these into Relative Fluorescence Intensity
# (RFI) values, commonly referred to as arbitrary fluorescence units
# (a.u.). The function ``FlowCal.transform.to_rfi()`` performs this
# conversion.
print("Performing data transformation...")
sample = FlowCal.transform.to_rfi(sample)
# Gating
# Gating is the process of removing measurements of irrelevant
# particles, while retaining only the population of interest.
print("Performing gating...")
# ``FlowCal.gate.start_end()`` removes the first and last few events.
# Transients in fluidics can make these events slightly different from
# the rest. This may not be necessary in all instruments.
sample_gated = FlowCal.gate.start_end(sample,
num_start=250,
num_end=100)
# ``FlowCal.gate.high_low()`` removes events outside a range specified
# by a ``low`` and a ``high`` value. If these are not specified (as
# shown below), the function removes events outside the channel's range
# of detection.
# Detectors in a flow cytometer have a finite range of detection. If the
# fluorescence of a particle is higher than the upper limit of this
# range, the instrument will incorrectly record it with a value equal to
# this limit. The same happens for fluorescence values lower than the
# lower limit of detection. These saturated events should be removed,
# otherwise statistics may be calculated incorrectly.
# Note that this might not be necessary with newer instruments that
# record data as floating-point numbers (and in fact it might eliminate
# negative events). To see the data type stored in your FCS files, run
# the following instruction: ``print sample_gated.data_type``.
# We will remove saturated events in the forward/side scatter channels,
# and in the fluorescence channel FL1.
sample_gated = FlowCal.gate.high_low(sample_gated,
channels=['FSC','SSC','FL1'])
# ``FlowCal.gate.density2d()`` preserves only the densest population as
# seen in a 2D density diagram of two channels. This helps remove
# particle aggregations and other sparse populations that are not of
# interest (i.e. debris).
# We use the forward and side scatter channels, and preserve 50% of the
# events. Finally, setting ``full_output=True`` instructs the function
# to return two additional outputs. The last one (``gate_contour``) is
# a curve surrounding the gated region, which we will use for plotting
# later.
sample_gated, __, gate_contour = FlowCal.gate.density2d(
data=sample_gated,
channels=['FSC','SSC'],
gate_fraction=0.5,
full_output=True)
# Plot forward/side scatter 2D density plot and 1D fluorescence
# histograms
print("Plotting density plot and histogram...")
# Parameters for the forward/side scatter density plot
density_params = {}
# We use the "scatter" mode, in which individual particles will be
# plotted individually as in a scatter plot, but with a color
# proportional to the particle density around.
density_params['mode'] = 'scatter'
# Parameters for the fluorescence histograms
hist_params = {}
hist_params['xlabel'] = 'FL1 Fluorescence (a.u.)'
# Plot filename
# The figure can be saved in any format supported by matplotlib (svg,
# jpg, etc.) by just changing the extension.
plot_filename = '{}/density_hist_{}.png'.format(
samples_plot_dir,
'S{:03}'.format(sample_id + 1))
# Plot and save
# The function ``FlowCal.plot.density_and_hist()`` plots a combined
# figure with a 2D density plot at the top, and an arbitrary number of
# 1D histograms below. In this case, we will plot the forward/side
# scatter channels in the density plot, and a histogram of the
# fluorescence channel FL1 below.
# Note that we are providing data both before (``sample``) and after
# (``sample_gated``) gating. The 1D histogram will display the ungated
# dataset with transparency, and the gated dataset in front with a solid
# solid color. In addition, we are providing ``gate_contour`` from the
# density gating step, which will be displayed in the density diagram.
# This will result in a convenient representation of the data both
# before and after gating.
FlowCal.plot.density_and_hist(
sample,
sample_gated,
density_channels=['FSC','SSC'],
hist_channels=['FL1'],
gate_contour=gate_contour,
density_params=density_params,
hist_params=hist_params,
savefig=plot_filename)
# Save cell sample object
samples.append(sample_gated)
###
# Part 3: Examples on how to use processed cell sample data
###
# Histogram of all samples
# Here, we plot the fluorescence histograms of all five samples in the same
# figure, using ``FlowCal.plot.hist1d``. Note how this function can be used
# in the context of accessory matplotlib functions to modify the axes
# limits and labels and add a legend, among others.
plt.figure(figsize=(6,3.5))
FlowCal.plot.hist1d(samples,
channel='FL1',
histtype='step',
bins=128)
plt.ylim([0, 2000])
plt.xlabel('FL1 Fluorescence (a.u.)')
plt.legend(['{} $\mu M$ IPTG'.format(i) for i in iptg],
loc='upper left',
fontsize='small')
plt.tight_layout()
plt.savefig('histograms.png', dpi=200)
plt.close()
# Here we illustrate how to obtain statistics from the fluorescence of each
# sample, and how to use them in a plot.
# The stats module contains functions to calculate different statistics
# such as mean, median, and standard deviation. Here, we calculate the
# geometric mean from channel FL1 of each sample, and plot them against the
# corresponding IPTG concentrations.
samples_fluorescence = [FlowCal.stats.gmean(s, channels='FL1')
for s in samples]
plt.figure(figsize=(5.5, 3.5))
plt.plot(iptg,
samples_fluorescence,
marker='o',
color=(0, 0.4, 0.7))
plt.xlabel('IPTG Concentration ($\mu M$)')
plt.ylabel('FL1 Fluorescence (a.u.)')
plt.tight_layout()
plt.savefig('dose_response.png', dpi=200)
plt.close()
print("\nDone.")
| 3.109375 | 3 |
scripts/py_scripts/cluster_patient_systems_merger.py | Elenadisa/PhenCo | 3 | 12758774 | <reponame>Elenadisa/PhenCo
#! /usr/bin/env python
import functions as fn
#############################################################################################################################################################################
# OPTPARSE #
#############################################################################################################################################################################
import optparse
parser=optparse.OptionParser()
parser.add_option("-c", "--clusters_file", dest="clusters_hpo_file",
help="File with clusters and HPO clustering.R output", metavar="FILE")
parser.add_option("-A", "--key_cluster_id", dest="cluster_hpo_id",
help="column which have clusters identificators", type='int')
parser.add_option("-a", "--cluster_value", dest="cluster_hpo_value",
help="column with HPO terms", type='int')
parser.add_option("-p", "--patients_hpo_file", dest="patients_hpo_file",
help="File with patients and their hpos", metavar="FILE")
parser.add_option("-B", "--key_patients_hpo_id", dest="patients_hpo_id",
help="column which have patients identificators", type='int')
parser.add_option("-b", "--patiens_hpo_value", dest="patients_hpo_value",
help="column with HPO", type='int')
parser.add_option("-s", "--cluster_system_file", dest="cluster_system_file",
help="File with clusters and their coincidence system", metavar="FILE")
parser.add_option("-D", "--key_cluster_systems_id", dest="cluster_system_id",
help="column which have cluster id", type='int')
parser.add_option("-d", "--cluster_system_value", dest="cluster_system_value",
help="column with systems", type='int')
parser.add_option("-S", "--system_gene_file", dest="system_gene_file",
help="File with clusters and their coincidence system", metavar="FILE")
parser.add_option("-E", "--key_system_gene_id", dest="system_gene_id",
help="column which have system id", type='int')
parser.add_option("-e", "--system_gene_value", dest="system_gene_value",
help="column with genes", type='int')
parser.add_option("-P", "--patient_gene_file", dest="patient_gene_file",
help="File with patients and their genes", metavar="FILE")
parser.add_option("-F", "--key_patient_gene_id", dest="patient_gene_id",
help="column which have patient id", type='int')
parser.add_option("-f", "--patient_gene_value", dest="patient_gene_value",
help="column with genes in patients", type='int')
parser.add_option("-g", "--gene_symbol_file", dest="gene_symbol_file",
help="File with genes their symbols", metavar="FILE")
parser.add_option("-I", "--key_gene_symbol_id", dest="gene_symbol_id",
help="column which have genes id", type='int')
parser.add_option("-i", "--gene_symbol_value", dest="gene_symbol_value",
help="column with symbols", type='int')
parser.add_option("-t", "--shared_hpo_threshold", dest="shared_hpo_threshold",
help="shared_hpo_threshold", type='int')
parser.add_option("-n", "--gene_number_threshold", dest="gene_number_threshold",
help="gene_number_threshold", type='int')
(options, arg) = parser.parse_args()
#############################################################################################################################################################################
# MAIN #
#############################################################################################################################################################################
clusters_hpo_dictionary = fn.build_dictionary(options.clusters_hpo_file, options.cluster_hpo_id, options.cluster_hpo_value) #return dictionary
patients_hpo_dictionary = fn.build_dictionary(options.patients_hpo_file, options.patients_hpo_id, options.patients_hpo_value) #return dictionary
clusters_systems_dictionary = fn.build_dictionary(options.cluster_system_file, options.cluster_system_id, options.cluster_system_value) #return dictionary
systems_genes_dictionary = fn.build_dictionary(options.system_gene_file, options.system_gene_id, options.system_gene_value) #return dictionary
patients_genes_dictionary = fn.build_dictionary(options.patient_gene_file, options.patient_gene_id, options.patient_gene_value) #return dictionary
genes_symbol_dictionary = fn.build_dictionary(options.gene_symbol_file, options.gene_symbol_id, options.gene_symbol_value) #return dictionary
print("Patient" + "\t" + "Cluster" + "\t" + "Shared_hpos" + "\t" + "Patient_profile" + "\t" + "Systems" + "\t" + "Genes")
#For each patient and hpos in patient_hpo dictionary
for patient, hpos in patients_hpo_dictionary.items():
patient_profile = set(hpos) #get a set of the patient profile
if patient in patients_genes_dictionary: #look for the genes associated to the patient
patient_genes = patients_genes_dictionary[patient]
patient_genes_symbol = []
for gene in patient_genes:
if gene in genes_symbol_dictionary:
patient_genes_symbol.append("".join(genes_symbol_dictionary[gene])) #Translate each gene entrez id to gene symbol and save them in a list
patient_genes_symbol = set(patient_genes_symbol) #Get a set of patient's genes
#For each cluster and systems in cluster_systems dictionary
for cluster, systems in clusters_systems_dictionary.items():
cluster_profile = set(clusters_hpo_dictionary[cluster]) #get a set of hpos in the cluster
shared_hpos = patient_profile.intersection(cluster_profile) # look for common hpos between the patient profile and the clusters hpos
if len(shared_hpos) >= options.shared_hpo_threshold: #If the number of common hpos is greater than a threshold
for element in systems: #look for the genes related to the system
if element in systems_genes_dictionary:
system_genes = set(systems_genes_dictionary[element]) #get the set of systems genes
gene_intersection = patient_genes_symbol.intersection(system_genes) # look for common genes between patient and systems
if len(gene_intersection) >= options.gene_number_threshold: #If there are a number of common genes greater than a threshold it print:
print(patient, cluster, ", ".join(shared_hpos), ", ".join(patient_profile), element, ", ".join(gene_intersection), sep="\t") | 2.21875 | 2 |
data/transcoder_evaluation_gfg/python/CHECK_WHETHER_TRIANGLE_VALID_NOT_SIDES_GIVEN.py | mxl1n/CodeGen | 241 | 12758775 | <reponame>mxl1n/CodeGen<gh_stars>100-1000
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( a , b , c ) :
if ( a + b <= c ) or ( a + c <= b ) or ( b + c <= a ) :
return False
else :
return True
#TOFILL
if __name__ == '__main__':
param = [
(29,19,52,),
(83,34,49,),
(48,14,65,),
(59,12,94,),
(56,39,22,),
(68,85,9,),
(63,36,41,),
(95,34,37,),
(2,90,27,),
(11,16,1,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | 2.75 | 3 |
Collatz_Conjecture.py | lewiswatson55/Python_Collatz_Conjecture | 3 | 12758776 | <filename>Collatz_Conjecture.py
#Collatz Conjecture Program
#Created by <NAME>
from time import perf_counter_ns
it_count = 0
n = 0
def Select_Number():
print ("")
sn = int(input("Select the number you wish to calculate? "))
if (sn == 0):
print ("This number is invalid.")
print ()
print ()
Select_Number();
else:
Calculate(sn);
def Calculate(n):
global it_count
it_count = 0
start = perf_counter_ns()
while n != 1:
if (n % 2):
n = (n*3+1)
#print (n) #Prints All Numbers (Slows Program Speed)
it_count += 1
else:
n = (n//2)
#print (n) #Prints All Numbers (Slows Program Speed)
it_count += 1
end = perf_counter_ns()
print ("The number has reached " + str(n) + " with only " + str(it_count) + " iterations! (Time taken: " + format(end-start) + " nanoseconds.)")
print()
Select_Number();
Select_Number();
| 4.125 | 4 |
bluesky_queueserver/manager/tests/test_plan_queue_ops.py | ksunden/bluesky-queueserver | 0 | 12758777 | <filename>bluesky_queueserver/manager/tests/test_plan_queue_ops.py
import asyncio
import pytest
import json
import copy
from bluesky_queueserver.manager.plan_queue_ops import PlanQueueOperations
errmsg_wrong_plan_type = "Parameter 'item' should be a dictionary"
@pytest.fixture
def pq():
pq = PlanQueueOperations()
asyncio.run(pq.start())
# Clear any pool entries
asyncio.run(pq.delete_pool_entries())
yield pq
# Don't leave any test entries in the pool
asyncio.run(pq.delete_pool_entries())
def test_running_plan_info(pq):
"""
Basic test for the following methods:
`PlanQueueOperations.is_item_running()`
`PlanQueueOperations.get_running_item_info()`
`PlanQueueOperations.delete_pool_entries()`
"""
async def testing():
assert await pq.get_running_item_info() == {}
assert await pq.is_item_running() is False
some_plan = {"some_key": "some_value"}
await pq._set_running_item_info(some_plan)
assert await pq.get_running_item_info() == some_plan
assert await pq.is_item_running() is True
await pq._clear_running_item_info()
assert await pq.get_running_item_info() == {}
assert await pq.is_item_running() is False
await pq._set_running_item_info(some_plan)
await pq.delete_pool_entries()
assert await pq.get_running_item_info() == {}
assert await pq.is_item_running() is False
asyncio.run(testing())
# fmt: off
@pytest.mark.parametrize("plan_running, plans, result_running, result_plans", [
({"testing": 1}, [{"testing": 2}, {"item_uid": "ab", "name": "nm"}, {"testing": 2}],
{}, [{"item_uid": "ab", "name": "nm"}]),
({"testing": 1}, [{"testing": 2}, {"item_uid": "ab", "name": "nm"}, {"testing": 3}],
{}, [{"item_uid": "ab", "name": "nm"}]),
({"item_uid": "a"}, [{"item_uid": "a1"}, {"item_uid": "a2"}, {"item_uid": "a3"}],
{"item_uid": "a"}, [{"item_uid": "a1"}, {"item_uid": "a2"}, {"item_uid": "a3"}]),
])
# fmt: on
def test_queue_clean(pq, plan_running, plans, result_running, result_plans):
"""
Test for ``_queue_clean()`` method
"""
async def testing():
await pq._set_running_item_info(plan_running)
for plan in plans:
await pq._r_pool.rpush(pq._name_plan_queue, json.dumps(plan))
assert await pq.get_running_item_info() == plan_running
plan_queue, _ = await pq.get_queue()
assert plan_queue == plans
await pq._queue_clean()
assert await pq.get_running_item_info() == result_running
plan_queue, _ = await pq.get_queue()
assert plan_queue == result_plans
asyncio.run(testing())
# fmt: off
@pytest.mark.parametrize("plan, result",
[({"a": 10}, True),
([10, 20], False),
(50, False),
("abc", False)])
# fmt: on
def test_verify_item_type(pq, plan, result):
if result:
pq._verify_item_type(plan)
else:
with pytest.raises(TypeError, match=errmsg_wrong_plan_type):
pq._verify_item_type(plan)
# fmt: off
@pytest.mark.parametrize(
"plan, f_kwargs, result, errmsg",
[({"a": 10}, {}, False, "Item does not have UID"),
([10, 20], {}, False, errmsg_wrong_plan_type),
({"item_uid": "one"}, {}, True, ""),
({"item_uid": "two"}, {}, False, "Item with UID .+ is already in the queue"),
({"item_uid": "three"}, {}, False, "Item with UID .+ is already in the queue"),
({"item_uid": "two"}, {"ignore_uids": None}, False, "Item with UID .+ is already in the queue"),
({"item_uid": "two"}, {"ignore_uids": ["two"]}, True, ""),
({"item_uid": "two"}, {"ignore_uids": ["two", "three"]}, True, ""),
({"item_uid": "two"}, {"ignore_uids": ["one", "three"]}, False, "Item with UID .+ is already in the queue"),
])
# fmt: on
def test_verify_item(pq, plan, f_kwargs, result, errmsg):
"""
Tests for method ``_verify_item()``.
"""
# Set two existiing plans and then set one of them as running
existing_plans = [{"item_uid": "two"}, {"item_uid": "three"}]
async def set_plans():
# Add plan to queue
for plan in existing_plans:
await pq.add_item_to_queue(plan)
# Set one plan as currently running
await pq.set_next_item_as_running()
# Verify that setup is correct
assert await pq.is_item_running() is True
assert await pq.get_queue_size() == 1
asyncio.run(set_plans())
if result:
pq._verify_item(plan, **f_kwargs)
else:
with pytest.raises(Exception, match=errmsg):
pq._verify_item(plan, **f_kwargs)
def test_new_item_uid(pq):
"""
Smoke test for the method ``new_item_uid()``.
"""
assert isinstance(pq.new_item_uid(), str)
# fmt: off
@pytest.mark.parametrize("plan", [
{"name": "a"},
{"item_uid": "some_uid", "name": "a"},
])
# fmt: on
def test_set_new_item_uuid(pq, plan):
"""
Basic test for the method ``set_new_item_uuid()``.
"""
uid = plan.get("item_uid", None)
# The function is supposed to create or replace UID
new_plan = pq.set_new_item_uuid(plan)
assert "item_uid" in new_plan
assert isinstance(new_plan["item_uid"], str)
assert new_plan["item_uid"] != uid
def test_get_index_by_uid(pq):
"""
Test for ``_get_index_by_uid()``
"""
plans = [
{"item_uid": "a", "name": "name_a"},
{"item_uid": "b", "name": "name_b"},
{"item_uid": "c", "name": "name_c"},
]
async def testing():
for plan in plans:
await pq.add_item_to_queue(plan)
assert await pq._get_index_by_uid("b") == 1
with pytest.raises(IndexError, match="No plan with UID 'nonexistent'"):
assert await pq._get_index_by_uid("nonexistent")
def test_uid_dict_1(pq):
"""
Basic test for functions associated with `_uid_dict`
"""
plan_a = {"item_uid": "a", "name": "name_a"}
plan_b = {"item_uid": "b", "name": "name_b"}
plan_c = {"item_uid": "c", "name": "name_c"}
plan_b_updated = {"item_uid": "b", "name": "name_b_updated"}
pq._uid_dict_add(plan_a)
pq._uid_dict_add(plan_b)
assert pq._is_uid_in_dict(plan_a["item_uid"]) is True
assert pq._is_uid_in_dict(plan_b["item_uid"]) is True
assert pq._is_uid_in_dict(plan_c["item_uid"]) is False
assert pq._uid_dict_get_item(plan_b["item_uid"]) == plan_b
pq._uid_dict_update(plan_b_updated)
assert pq._uid_dict_get_item(plan_b["item_uid"]) == plan_b_updated
pq._uid_dict_remove(plan_a["item_uid"])
assert pq._is_uid_in_dict(plan_a["item_uid"]) is False
assert pq._is_uid_in_dict(plan_b["item_uid"]) is True
pq._uid_dict_clear()
assert pq._is_uid_in_dict(plan_a["item_uid"]) is False
assert pq._is_uid_in_dict(plan_b["item_uid"]) is False
def test_uid_dict_2(pq):
"""
Test if functions changing `pq._uid_dict` are also updating `pq.plan_queue_uid`.
"""
plan_a = {"item_uid": "a", "name": "name_a"}
plan_a_updated = {"item_uid": "a", "name": "name_a_updated"}
pq_uid = pq.plan_queue_uid
pq._uid_dict_add(plan_a)
assert pq.plan_queue_uid != pq_uid
pq_uid = pq.plan_queue_uid
pq._uid_dict_update(plan_a_updated)
assert pq.plan_queue_uid != pq_uid
pq_uid = pq.plan_queue_uid
pq._uid_dict_remove(plan_a_updated["item_uid"])
assert pq.plan_queue_uid != pq_uid
pq_uid = pq.plan_queue_uid
pq._uid_dict_clear()
assert pq.plan_queue_uid != pq_uid
def test_uid_dict_3_initialize(pq):
"""
Basic test for functions associated with ``_uid_dict_initialize()``
"""
async def testing():
await pq.add_item_to_queue({"name": "a"})
await pq.add_item_to_queue({"name": "b"})
await pq.add_item_to_queue({"name": "c"})
plans, _ = await pq.get_queue()
uid_dict = {_["item_uid"]: _ for _ in plans}
pq_uid = pq.plan_queue_uid
await pq._uid_dict_initialize()
assert pq._uid_dict == uid_dict
assert pq.plan_queue_uid != pq_uid
asyncio.run(testing())
def test_uid_dict_4_failing(pq):
"""
Failing cases for functions associated with `_uid_dict`
"""
plan_a = {"item_uid": "a", "name": "name_a"}
plan_b = {"item_uid": "b", "name": "name_b"}
plan_c = {"item_uid": "c", "name": "name_c"}
pq._uid_dict_add(plan_a)
pq._uid_dict_add(plan_b)
# Add plan with UID that already exists
pq_uid = pq.plan_queue_uid
with pytest.raises(RuntimeError, match=f"'{plan_a['item_uid']}', which is already in the queue"):
pq._uid_dict_add(plan_a)
assert pq.plan_queue_uid == pq_uid
assert len(pq._uid_dict) == 2
# Remove plan with UID does not exist exists
with pytest.raises(RuntimeError, match=f"'{plan_c['item_uid']}', which is not in the queue"):
pq._uid_dict_remove(plan_c["item_uid"])
assert pq.plan_queue_uid == pq_uid
assert len(pq._uid_dict) == 2
# Update plan with UID does not exist exists
with pytest.raises(RuntimeError, match=f"'{plan_c['item_uid']}', which is not in the queue"):
pq._uid_dict_update(plan_c)
assert pq.plan_queue_uid == pq_uid
assert len(pq._uid_dict) == 2
def test_remove_item(pq):
"""
Basic test for functions associated with ``_remove_plan()``
"""
async def testing():
plan_list = [{"name": "a"}, {"name": "b"}, {"name": "c"}]
for plan in plan_list:
await pq.add_item_to_queue(plan)
plans, _ = await pq.get_queue()
plan_to_remove = [_ for _ in plans if _["name"] == "b"][0]
# Remove one plan
await pq._remove_item(plan_to_remove)
plans, _ = await pq.get_queue()
assert len(plans) == 2
# Add a copy of a plan (queue is not supposed to have copies in real life)
plan_to_add = plans[0]
await pq._r_pool.lpush(pq._name_plan_queue, json.dumps(plan_to_add))
# Now remove both plans
await pq._remove_item(plan_to_add, single=False) # Allow deleting multiple or no plans
assert await pq.get_queue_size() == 1
# Delete the plan again (the plan is not in the queue, but it shouldn't raise an exception)
await pq._remove_item(plan_to_add, single=False) # Allow deleting multiple or no plans
assert await pq.get_queue_size() == 1
with pytest.raises(RuntimeError, match="One item is expected"):
await pq._remove_item(plan_to_add)
assert await pq.get_queue_size() == 1
# Now add 'plan_to_add' twice (create two copies)
await pq._r_pool.lpush(pq._name_plan_queue, json.dumps(plan_to_add))
await pq._r_pool.lpush(pq._name_plan_queue, json.dumps(plan_to_add))
assert await pq.get_queue_size() == 3
# Attempt to delete two copies
with pytest.raises(RuntimeError, match="One item is expected"):
await pq._remove_item(plan_to_add)
# Exception is raised, but both copies are deleted
assert await pq.get_queue_size() == 1
asyncio.run(testing())
def test_get_queue_full_1(pq):
"""
Basic test for the functions ``PlanQueueOperations.get_queue()`` and
``PlanQueueOperations.get_queue_full()``
"""
async def testing():
plans = [
{"item_uid": "one", "name": "a"},
{"item_uid": "two", "name": "b"},
{"item_uid": "three", "name": "c"},
]
for p in plans:
await pq.add_item_to_queue(p)
await pq.set_next_item_as_running()
pq_uid = pq.plan_queue_uid
queue1, uid1 = await pq.get_queue()
running_item1 = await pq.get_running_item_info()
queue2, running_item2, uid2 = await pq.get_queue_full()
assert queue1 == plans[1:]
assert queue2 == plans[1:]
assert running_item1 == plans[0]
assert running_item2 == plans[0]
assert uid1 == pq_uid
assert uid2 == pq_uid
asyncio.run(testing())
# fmt: off
@pytest.mark.parametrize("params, name", [
({"pos": "front"}, "a"),
({"pos": "back"}, "c"),
({"pos": 0}, "a"),
({"pos": 1}, "b"),
({"pos": 2}, "c"),
({"pos": 3}, None), # Index out of range
({"pos": -1}, "c"),
({"pos": -2}, "b"),
({"pos": -3}, "a"),
({"pos": -4}, None), # Index out of range
({"uid": "one"}, "a"),
({"uid": "two"}, "b"),
({"uid": "nonexistent"}, None),
])
# fmt: on
def test_get_item_1(pq, params, name):
"""
Basic test for the function ``PlanQueueOperations.get_item()``
"""
async def testing():
pq_uid = pq.plan_queue_uid
await pq.add_item_to_queue({"item_uid": "one", "name": "a"})
await pq.add_item_to_queue({"item_uid": "two", "name": "b"})
await pq.add_item_to_queue({"item_uid": "three", "name": "c"})
assert await pq.get_queue_size() == 3
assert pq.plan_queue_uid != pq_uid
if name is not None:
plan = await pq.get_item(**params)
assert plan["name"] == name
else:
msg = "Index .* is out of range" if "pos" in params else "is not in the queue"
with pytest.raises(IndexError, match=msg):
await pq.get_item(**params)
asyncio.run(testing())
def test_get_item_2_fail(pq):
"""
Basic test for the function ``PlanQueueOperations.get_item()``.
Attempt to retrieve a running plan.
"""
async def testing():
await pq.add_item_to_queue({"item_uid": "one", "name": "a"})
await pq.add_item_to_queue({"item_uid": "two", "name": "b"})
await pq.add_item_to_queue({"item_uid": "three", "name": "c"})
assert await pq.get_queue_size() == 3
pq_uid = pq.plan_queue_uid
await pq.set_next_item_as_running()
assert await pq.get_queue_size() == 2
assert pq.plan_queue_uid != pq_uid
with pytest.raises(IndexError, match="is currently running"):
await pq.get_item(uid="one")
# Ambiguous parameters (position and UID is passed)
with pytest.raises(ValueError, match="Ambiguous parameters"):
await pq.get_item(pos=5, uid="abc")
asyncio.run(testing())
def test_add_item_to_queue_1(pq):
"""
Basic test for the function ``PlanQueueOperations.add_item_to_queue()``
"""
async def add_plan(plan, n, **kwargs):
plan_added, qsize = await pq.add_item_to_queue(plan, **kwargs)
assert plan_added["name"] == plan["name"], f"plan: {plan}"
assert qsize == n, f"plan: {plan}"
async def testing():
await add_plan({"name": "a"}, 1)
await add_plan({"name": "b"}, 2)
await add_plan({"name": "c"}, 3, pos="back")
await add_plan({"name": "d"}, 4, pos="front")
await add_plan({"name": "e"}, 5, pos=0) # front
await add_plan({"name": "f"}, 6, pos=5) # back (index == queue size)
await add_plan({"name": "g"}, 7, pos=5) # previous to last
await add_plan({"name": "h"}, 8, pos=-1) # previous to last
await add_plan({"name": "i"}, 9, pos=3) # arbitrary index
await add_plan({"name": "j"}, 10, pos=100) # back (index some large number)
await add_plan({"name": "k"}, 11, pos=-10) # front (precisely negative queue size)
await add_plan({"name": "l"}, 12, pos=-100) # front (index some large negative number)
assert await pq.get_queue_size() == 12
plans, _ = await pq.get_queue()
name_sequence = [_["name"] for _ in plans]
assert name_sequence == ["l", "k", "e", "d", "a", "i", "b", "c", "g", "h", "f", "j"]
await pq.clear_queue()
asyncio.run(testing())
def test_add_item_to_queue_2(pq):
"""
Basic test for the function ``PlanQueueOperations.add_item_to_queue()``
"""
async def add_plan(plan, n, **kwargs):
plan_added, qsize = await pq.add_item_to_queue(plan, **kwargs)
assert plan_added["name"] == plan["name"], f"plan: {plan}"
assert qsize == n, f"plan: {plan}"
async def testing():
await add_plan({"name": "a"}, 1)
await add_plan({"name": "b"}, 2)
await add_plan({"name": "c"}, 3, pos="back")
plan_queue, _ = await pq.get_queue()
displaced_uid = plan_queue[1]["item_uid"]
await add_plan({"name": "d"}, 4, before_uid=displaced_uid)
await add_plan({"name": "e"}, 5, after_uid=displaced_uid)
# This reduces the number of elements in the queue by one
await pq.set_next_item_as_running()
displaced_uid = plan_queue[0]["item_uid"]
await add_plan({"name": "f"}, 5, after_uid=displaced_uid)
with pytest.raises(IndexError, match="Can not insert a plan in the queue before a currently running plan"):
await add_plan({"name": "g"}, 5, before_uid=displaced_uid)
with pytest.raises(IndexError, match="is not in the queue"):
await add_plan({"name": "h"}, 5, before_uid="nonexistent_uid")
assert await pq.get_queue_size() == 5
plans, _ = await pq.get_queue()
name_sequence = [_["name"] for _ in plans]
assert name_sequence == ["f", "d", "b", "e", "c"]
await pq.clear_queue()
asyncio.run(testing())
def test_add_item_to_queue_3_fail(pq):
"""
Failing tests for the function ``PlanQueueOperations.add_item_to_queue()``
"""
async def testing():
pq_uid = pq.plan_queue_uid
with pytest.raises(ValueError, match="Parameter 'pos' has incorrect value"):
await pq.add_item_to_queue({"name": "a"}, pos="something")
assert pq.plan_queue_uid == pq_uid
with pytest.raises(TypeError, match=errmsg_wrong_plan_type):
await pq.add_item_to_queue("plan_is_not_string")
assert pq.plan_queue_uid == pq_uid
# Duplicate plan UID
plan = {"item_uid": "abc", "name": "a"}
await pq.add_item_to_queue(plan)
pq_uid = pq.plan_queue_uid
with pytest.raises(RuntimeError, match="Item with UID .+ is already in the queue"):
await pq.add_item_to_queue(plan)
assert pq.plan_queue_uid == pq_uid
# Ambiguous parameters (position and UID is passed)
with pytest.raises(ValueError, match="Ambiguous parameters"):
await pq.add_item_to_queue({"name": "abc"}, pos=5, before_uid="abc")
assert pq.plan_queue_uid == pq_uid
# Ambiguous parameters ('before_uid' and 'after_uid' is specified)
with pytest.raises(ValueError, match="Ambiguous parameters"):
await pq.add_item_to_queue({"name": "abc"}, before_uid="abc", after_uid="abc")
assert pq.plan_queue_uid == pq_uid
asyncio.run(testing())
# fmt: off
@pytest.mark.parametrize("replace_uid", [False, True])
# fmt: on
def test_replace_item_1(pq, replace_uid):
"""
Basic functionality of ``PlanQueueOperations.replace_item()`` function.
"""
async def testing():
plans = [{"name": "a"}, {"name": "b"}, {"name": "c"}]
plans_added = [None] * len(plans)
qsizes = [None] * len(plans)
for n, plan in enumerate(plans):
plans_added[n], qsizes[n] = await pq.add_item_to_queue(plan)
assert qsizes == [1, 2, 3]
assert await pq.get_queue_size() == 3
# Change name, but keep UID
plan_names_new = ["d", "e", "f"]
for n in range(len(plans_added)):
plan = plans_added[n].copy()
plan["name"] = plan_names_new[n]
uid_to_replace = plan["item_uid"]
if replace_uid:
plan["item_uid"] = pq.new_item_uid() # Generate new UID
pq_uid = pq.plan_queue_uid
plan_new, qsize = await pq.replace_item(plan, item_uid=uid_to_replace)
assert plan_new["name"] == plan["name"]
assert plan_new["item_uid"] == plan["item_uid"]
assert pq.plan_queue_uid != pq_uid
assert await pq.get_queue_size() == 3
plans_added[n] = plan_new
# Make sure that the plan can be correctly extracted by uid
assert await pq.get_item(uid=plan["item_uid"]) == plan
assert await pq.get_queue_size() == 3
# Initialize '_uid_dict' and see if the plan can still be extracted using correct UID.
await pq._uid_dict_initialize()
assert await pq.get_item(uid=plan["item_uid"]) == plan
assert await pq.get_queue_size() == 3
asyncio.run(testing())
def test_replace_item_2(pq):
"""
``PlanQueueOperations.replace_item()`` function: not UID in the plan - random UID is assigned.
"""
async def testing():
plans = [{"name": "a"}, {"name": "b"}, {"name": "c"}]
plans_added = [None] * len(plans)
qsizes = [None] * len(plans)
for n, plan in enumerate(plans):
plans_added[n], qsizes[n] = await pq.add_item_to_queue(plan)
assert qsizes == [1, 2, 3]
assert await pq.get_queue_size() == 3
new_name = "h"
plan_new = {"name": new_name} # No UID in the plan. It should still be inserted
pq_uid = pq.plan_queue_uid
plan_replaced, qsize = await pq.replace_item(plan_new, item_uid=plans_added[1]["item_uid"])
assert pq.plan_queue_uid != pq_uid
new_uid = plan_replaced["item_uid"]
assert new_uid != plans_added[1]["item_uid"]
assert plan_replaced["name"] == plan_new["name"]
plan = await pq.get_item(uid=new_uid)
assert plan["item_uid"] == new_uid
assert plan["name"] == new_name
# Initialize '_uid_dict' and see if the plan can still be extracted using correct UID.
await pq._uid_dict_initialize()
plan = await pq.get_item(uid=new_uid)
assert plan["item_uid"] == new_uid
assert plan["name"] == new_name
asyncio.run(testing())
def test_replace_item_3_failing(pq):
"""
``PlanQueueOperations.replace_item()`` - failing cases
"""
async def testing():
plans = [{"name": "a"}, {"name": "b"}, {"name": "c"}]
plans_added = [None] * len(plans)
qsizes = [None] * len(plans)
for n, plan in enumerate(plans):
plans_added[n], qsizes[n] = await pq.add_item_to_queue(plan)
assert qsizes == [1, 2, 3]
assert await pq.get_queue_size() == 3
# Set the first item as 'running'
running_plan = await pq.set_next_item_as_running()
assert running_plan == plans[0]
queue, _ = await pq.get_queue()
running_item_info = await pq.get_running_item_info()
pq_uid = pq.plan_queue_uid
plan_new = {"name": "h"} # No UID in the plan. It should still be inserted
# Case: attempt to replace a plan that is currently running
with pytest.raises(RuntimeError, match="Failed to replace item: Item with UID .* is currently running"):
await pq.replace_item(plan_new, item_uid=running_plan["item_uid"])
assert pq.plan_queue_uid == pq_uid
# Case: attempt to replace a plan that is not in queue
with pytest.raises(RuntimeError, match="Failed to replace item: Item with UID .* is not in the queue"):
await pq.replace_item(plan_new, item_uid="uid-that-does-not-exist")
assert pq.plan_queue_uid == pq_uid
# Case: attempt to replace a plan with another plan that already exists in the queue
plan = plans_added[1]
with pytest.raises(RuntimeError, match="Item with UID .* is already in the queue"):
await pq.replace_item(plan, item_uid=plans_added[2]["item_uid"])
assert pq.plan_queue_uid == pq_uid
# Case: attempt to replace a plan with currently running plan
with pytest.raises(RuntimeError, match="Item with UID .* is already in the queue"):
await pq.replace_item(running_plan, item_uid=plans_added[2]["item_uid"])
assert pq.plan_queue_uid == pq_uid
# Make sure that the queue did not change during the test
plan_queue, _ = await pq.get_queue()
assert plan_queue == queue
assert await pq.get_running_item_info() == running_item_info
asyncio.run(testing())
# fmt: off
@pytest.mark.parametrize("params, src, order, success, pquid_changed, msg", [
({"pos": 1, "pos_dest": 1}, 1, "abcde", True, False, ""),
({"pos": "front", "pos_dest": "front"}, 0, "abcde", True, False, ""),
({"pos": "back", "pos_dest": "back"}, 4, "abcde", True, False, ""),
({"pos": "front", "pos_dest": "back"}, 0, "bcdea", True, True, ""),
({"pos": "back", "pos_dest": "front"}, 4, "eabcd", True, True, ""),
({"pos": 1, "pos_dest": 2}, 1, "acbde", True, True, ""),
({"pos": 2, "pos_dest": 1}, 2, "acbde", True, True, ""),
({"pos": 0, "pos_dest": 4}, 0, "bcdea", True, True, ""),
({"pos": 4, "pos_dest": 0}, 4, "eabcd", True, True, ""),
({"pos": 3, "pos_dest": "front"}, 3, "dabce", True, True, ""),
({"pos": 2, "pos_dest": "back"}, 2, "abdec", True, True, ""),
({"uid": "p3", "after_uid": "p3"}, 2, "abcde", True, False, ""),
({"uid": "p1", "before_uid": "p2"}, 0, "abcde", True, True, ""),
({"uid": "p1", "after_uid": "p2"}, 0, "bacde", True, True, ""),
({"uid": "p2", "pos_dest": "front"}, 1, "bacde", True, True, ""),
({"uid": "p2", "pos_dest": "back"}, 1, "acdeb", True, True, ""),
({"uid": "p1", "pos_dest": "front"}, 0, "abcde", True, False, ""),
({"uid": "p5", "pos_dest": "back"}, 4, "abcde", True, False, ""),
({"pos": 1, "after_uid": "p4"}, 1, "acdbe", True, True, ""),
({"pos": "front", "after_uid": "p4"}, 0, "bcdae", True, True, ""),
({"pos": 3, "after_uid": "p1"}, 3, "adbce", True, True, ""),
({"pos": "back", "after_uid": "p1"}, 4, "aebcd", True, True, ""),
({"pos": 1, "before_uid": "p4"}, 1, "acbde", True, True, ""),
({"pos": "front", "before_uid": "p4"}, 0, "bcade", True, True, ""),
({"pos": 3, "before_uid": "p1"}, 3, "dabce", True, True, ""),
({"pos": "back", "before_uid": "p1"}, 4, "eabcd", True, True, ""),
({"pos": "back", "after_uid": "p5"}, 4, "abcde", True, False, ""),
({"pos": "front", "before_uid": "p1"}, 0, "abcde", True, False, ""),
({"pos": 50, "before_uid": "p1"}, 0, "", False, False, r"Source plan \(position 50\) was not found"),
({"uid": "abc", "before_uid": "p1"}, 0, "", False, False, r"Source plan \(UID 'abc'\) was not found"),
({"pos": 3, "pos_dest": 50}, 0, "", False, False, r"Destination plan \(position 50\) was not found"),
({"uid": "p1", "before_uid": "abc"}, 0, "", False, False, r"Destination plan \(UID 'abc'\) was not found"),
({"before_uid": "p1"}, 0, "", False, None, r"Source position or UID is not specified"),
({"pos": 3}, 0, "", False, False, r"Destination position or UID is not specified"),
({"pos": 1, "uid": "p1", "before_uid": "p4"}, 1, "", False, False, "Ambiguous parameters"),
({"pos": 1, "pos_dest": 4, "before_uid": "p4"}, 1, "", False, False, "Ambiguous parameters"),
({"pos": 1, "after_uid": "p4", "before_uid": "p4"}, 1, "", False, False, "Ambiguous parameters"),
])
# fmt: on
def test_move_item_1(pq, params, src, order, success, pquid_changed, msg):
"""
Basic tests for ``move_item()``.
"""
async def testing():
plans = [
{"item_uid": "p1", "name": "a"},
{"item_uid": "p2", "name": "b"},
{"item_uid": "p3", "name": "c"},
{"item_uid": "p4", "name": "d"},
{"item_uid": "p5", "name": "e"},
]
for plan in plans:
await pq.add_item_to_queue(plan)
assert await pq.get_queue_size() == len(plans)
pq_uid = pq.plan_queue_uid
if success:
plan, qsize = await pq.move_item(**params)
assert qsize == len(plans)
assert plan["name"] == plans[src]["name"]
queue, _ = await pq.get_queue()
names = [_["name"] for _ in queue]
names = "".join(names)
assert names == order
if pquid_changed:
assert pq.plan_queue_uid != pq_uid
else:
assert pq.plan_queue_uid == pq_uid
else:
with pytest.raises(Exception, match=msg):
await pq.move_item(**params)
assert pq.plan_queue_uid == pq_uid
asyncio.run(testing())
# fmt: off
@pytest.mark.parametrize("pos, name", [
("front", "a"),
("back", "c"),
(0, "a"),
(1, "b"),
(2, "c"),
(3, None), # Index out of range
(-1, "c"),
(-2, "b"),
(-3, "a"),
(-4, None) # Index out of range
])
# fmt: on
def test_pop_item_from_queue_1(pq, pos, name):
"""
Basic test for the function ``PlanQueueOperations.pop_item_from_queue()``
"""
async def testing():
await pq.add_item_to_queue({"name": "a"})
await pq.add_item_to_queue({"name": "b"})
await pq.add_item_to_queue({"name": "c"})
assert await pq.get_queue_size() == 3
pq_uid = pq.plan_queue_uid
if name is not None:
plan, qsize = await pq.pop_item_from_queue(pos=pos)
assert plan["name"] == name
assert qsize == 2
assert await pq.get_queue_size() == 2
# Push the plan back to the queue (proves that UID is removed from '_uid_dict')
await pq.add_item_to_queue(plan)
assert await pq.get_queue_size() == 3
assert pq.plan_queue_uid != pq_uid
else:
with pytest.raises(IndexError, match="Index .* is out of range"):
await pq.pop_item_from_queue(pos=pos)
assert pq.plan_queue_uid == pq_uid
asyncio.run(testing())
@pytest.mark.parametrize("pos", ["front", "back", 0, 1, -1])
def test_pop_item_from_queue_2(pq, pos):
"""
Test for the function ``PlanQueueOperations.pop_item_from_queue()``:
the case of empty queue.
"""
async def testing():
assert await pq.get_queue_size() == 0
pq_uid = pq.plan_queue_uid
with pytest.raises(IndexError, match="Index .* is out of range|Queue is empty"):
await pq.pop_item_from_queue(pos=pos)
assert pq.plan_queue_uid == pq_uid
asyncio.run(testing())
def test_pop_item_from_queue_3(pq):
"""
Pop plans by UID.
"""
async def testing():
await pq.add_item_to_queue({"name": "a"})
await pq.add_item_to_queue({"name": "b"})
await pq.add_item_to_queue({"name": "c"})
assert await pq.get_queue_size() == 3
plans, _ = await pq.get_queue()
assert len(plans) == 3
plan_to_remove = [_ for _ in plans if _["name"] == "b"][0]
# Remove one plan
pq_uid = pq.plan_queue_uid
await pq.pop_item_from_queue(uid=plan_to_remove["item_uid"])
assert await pq.get_queue_size() == 2
assert pq.plan_queue_uid != pq_uid
# Attempt to remove the plan again. This should raise an exception.
pq_uid = pq.plan_queue_uid
with pytest.raises(
IndexError, match=f"Plan with UID '{plan_to_remove['item_uid']}' " f"is not in the queue"
):
await pq.pop_item_from_queue(uid=plan_to_remove["item_uid"])
assert await pq.get_queue_size() == 2
assert pq.plan_queue_uid == pq_uid
# Attempt to remove the plan that is running. This should raise an exception.
await pq.set_next_item_as_running()
assert await pq.get_queue_size() == 1
pq_uid = pq.plan_queue_uid
with pytest.raises(IndexError, match="Can not remove an item which is currently running"):
await pq.pop_item_from_queue(uid=plans[0]["item_uid"])
assert await pq.get_queue_size() == 1
assert pq.plan_queue_uid == pq_uid
asyncio.run(testing())
def test_pop_item_from_queue_4_fail(pq):
"""
Failing tests for the function ``PlanQueueOperations.pop_item_from_queue()``
"""
async def testing():
pq_uid = pq.plan_queue_uid
with pytest.raises(ValueError, match="Parameter 'pos' has incorrect value"):
await pq.pop_item_from_queue(pos="something")
assert pq.plan_queue_uid == pq_uid
# Ambiguous parameters (position and UID is passed)
with pytest.raises(ValueError, match="Ambiguous parameters"):
await pq.pop_item_from_queue(pos=5, uid="abc")
assert pq.plan_queue_uid == pq_uid
asyncio.run(testing())
def test_clear_queue(pq):
"""
Test for ``PlanQueueOperations.clear_queue`` function
"""
async def testing():
await pq.add_item_to_queue({"name": "a"})
await pq.add_item_to_queue({"name": "b"})
await pq.add_item_to_queue({"name": "c"})
# Set one of 3 plans as running (removes it from the queue)
await pq.set_next_item_as_running()
assert await pq.get_queue_size() == 2
assert len(pq._uid_dict) == 3
pq_uid = pq.plan_queue_uid
# Clears the queue only (doesn't touch the running plan)
await pq.clear_queue()
assert await pq.get_queue_size() == 0
assert len(pq._uid_dict) == 1
assert pq.plan_queue_uid != pq_uid
with pytest.raises(ValueError, match="Parameter 'pos' has incorrect value"):
await pq.pop_item_from_queue(pos="something")
asyncio.run(testing())
def test_add_to_history_functions(pq):
"""
Test for ``PlanQueueOperations._add_to_history()`` method.
"""
async def testing():
assert await pq.get_history_size() == 0
plans = [{"name": "a"}, {"name": "b"}, {"name": "c"}]
ph_uid = pq.plan_history_uid
for plan in plans:
await pq._add_to_history(plan)
assert await pq.get_history_size() == 3
assert pq.plan_history_uid != ph_uid
ph_uid = pq.plan_history_uid
plan_history, plan_history_uid_1 = await pq.get_history()
assert pq.plan_history_uid == plan_history_uid_1
assert pq.plan_history_uid == ph_uid
assert len(plan_history) == 3
assert plan_history == plans
ph_uid = pq.plan_history_uid
await pq.clear_history()
assert pq.plan_history_uid != ph_uid
plan_history, _ = await pq.get_history()
assert plan_history == []
asyncio.run(testing())
def test_set_next_item_as_running(pq):
"""
Test for ``PlanQueueOperations.set_next_item_as_running()`` function
"""
async def testing():
# Apply to empty queue
assert await pq.get_queue_size() == 0
assert await pq.is_item_running() is False
assert await pq.set_next_item_as_running() == {}
assert await pq.get_queue_size() == 0
assert await pq.is_item_running() is False
# Apply to a queue with several plans
await pq.add_item_to_queue({"name": "a"})
await pq.add_item_to_queue({"name": "b"})
await pq.add_item_to_queue({"name": "c"})
# Set one of 3 plans as running (removes it from the queue)
pq_uid = pq.plan_queue_uid
assert await pq.set_next_item_as_running() != {}
assert pq.plan_queue_uid != pq_uid
assert await pq.get_queue_size() == 2
assert len(pq._uid_dict) == 3
# Apply if a plan is already running
pq_uid = pq.plan_queue_uid
assert await pq.set_next_item_as_running() == {}
assert pq.plan_queue_uid == pq_uid
assert await pq.get_queue_size() == 2
assert len(pq._uid_dict) == 3
asyncio.run(testing())
def test_set_processed_item_as_completed(pq):
"""
Test for ``PlanQueueOperations.set_processed_item_as_completed()`` function.
The function moves currently running plan to history.
"""
plans = [{"item_uid": 1, "name": "a"}, {"item_uid": 2, "name": "b"}, {"item_uid": 3, "name": "c"}]
plans_run_uids = [["abc1"], ["abc2", "abc3"], []]
def add_status_to_plans(plans, run_uids, exit_status):
plans = copy.deepcopy(plans)
plans_modified = []
for plan, run_uid in zip(plans, run_uids):
plan.setdefault("result", {})
plan["result"]["exit_status"] = exit_status
plan["result"]["run_uids"] = run_uid
plans_modified.append(plan)
return plans_modified
async def testing():
for plan in plans:
await pq.add_item_to_queue(plan)
# No plan is running
pq_uid = pq.plan_queue_uid
ph_uid = pq.plan_history_uid
plan = await pq.set_processed_item_as_completed(exit_status="completed", run_uids=plans_run_uids[0])
assert plan == {}
assert pq.plan_queue_uid == pq_uid
assert pq.plan_history_uid == ph_uid
# Execute the first plan
await pq.set_next_item_as_running()
pq_uid = pq.plan_queue_uid
plan = await pq.set_processed_item_as_completed(exit_status="completed", run_uids=plans_run_uids[0])
assert pq.plan_queue_uid != pq_uid
assert pq.plan_history_uid != ph_uid
assert await pq.get_queue_size() == 2
assert await pq.get_history_size() == 1
assert plan["name"] == plans[0]["name"]
assert plan["result"]["exit_status"] == "completed"
assert plan["result"]["run_uids"] == plans_run_uids[0]
plan_history, _ = await pq.get_history()
plan_history_expected = add_status_to_plans(plans[0:1], plans_run_uids[0:1], "completed")
assert plan_history == plan_history_expected
# Execute the second plan
await pq.set_next_item_as_running()
plan = await pq.set_processed_item_as_completed(exit_status="completed", run_uids=plans_run_uids[1])
assert await pq.get_queue_size() == 1
assert await pq.get_history_size() == 2
assert plan["name"] == plans[1]["name"]
assert plan["result"]["exit_status"] == "completed"
assert plan["result"]["run_uids"] == plans_run_uids[1]
plan_history, _ = await pq.get_history()
plan_history_expected = add_status_to_plans(plans[0:2], plans_run_uids[0:2], "completed")
assert plan_history == plan_history_expected
asyncio.run(testing())
def test_set_processed_item_as_stopped(pq):
"""
Test for ``PlanQueueOperations.set_processed_item_as_stopped()`` function.
The function pushes running plan back to the queue and saves it in history as well.
Typically execution of single-run plans result in no UIDs, but in this test we still assign UIDS
to test functionality.
"""
plans = [{"item_uid": 1, "name": "a"}, {"item_uid": 2, "name": "b"}, {"item_uid": 3, "name": "c"}]
plans_run_uids = [["abc1"], ["abc2", "abc3"], []]
def add_status_to_plans(plans, run_uids, exit_status):
plans = copy.deepcopy(plans)
plans_modified = []
for plan, run_uid in zip(plans, run_uids):
plan.setdefault("result", {})
plan["result"]["exit_status"] = exit_status
plan["result"]["run_uids"] = run_uid
plans_modified.append(plan)
return plans_modified
async def testing():
for plan in plans:
await pq.add_item_to_queue(plan)
# No plan is running
pq_uid = pq.plan_queue_uid
ph_uid = pq.plan_history_uid
plan = await pq.set_processed_item_as_stopped(exit_status="stopped", run_uids=plans_run_uids[0])
assert plan == {}
assert pq.plan_queue_uid == pq_uid
assert pq.plan_history_uid == ph_uid
# Execute the first plan
await pq.set_next_item_as_running()
pq_uid = pq.plan_queue_uid
plan = await pq.set_processed_item_as_stopped(exit_status="stopped", run_uids=plans_run_uids[0])
assert pq.plan_queue_uid != pq_uid
assert pq.plan_history_uid != ph_uid
assert await pq.get_queue_size() == 3
assert await pq.get_history_size() == 1
assert plan["name"] == plans[0]["name"]
assert plan["result"]["exit_status"] == "stopped"
assert plan["result"]["run_uids"] == plans_run_uids[0]
plan_history, _ = await pq.get_history()
plan_history_expected = add_status_to_plans([plans[0]], [plans_run_uids[0]], "stopped")
assert plan_history == plan_history_expected
# Execute the second plan
await pq.set_next_item_as_running()
plan = await pq.set_processed_item_as_stopped(exit_status="stopped", run_uids=plans_run_uids[1])
assert await pq.get_queue_size() == 3
assert await pq.get_history_size() == 2
assert plan["name"] == plans[0]["name"]
assert plan["result"]["exit_status"] == "stopped"
assert plan["result"]["run_uids"] == plans_run_uids[1]
plan_history, _ = await pq.get_history()
plan_history_expected = add_status_to_plans(
[plans[0].copy(), plans[0].copy()], [plans_run_uids[0], plans_run_uids[1]], "stopped"
)
assert plan_history == plan_history_expected
asyncio.run(testing())
| 2.40625 | 2 |
file_split.py | juhyun0/python_filie | 0 | 12758778 | infile=open("proverbs.txt","r")
for line in infile:
line=line.rstrip()
word_list=line.split() #공백 단어분리
for word in word_list:
print(word);
infile.close()
| 3.296875 | 3 |
server/src/test/integration/general/test_script.py | zstars/weblabdeusto | 0 | 12758779 | <filename>server/src/test/integration/general/test_script.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
import threading
import traceback
import unittest
import time
from test.util.script import ServerCreator
from weblab.core.reservations import WaitingReservation, ConfirmedReservation, WaitingConfirmationReservation
from weblab.data.command import Command
from weblab.data.experiments import ExperimentId
class ScriptTestCase(unittest.TestCase):
def test_simple(self):
with ServerCreator(u"--cores=1") as sc:
client = sc.create_client()
session_id = client.login(u'admin', u'password')
self.assertNotEquals(session_id, None)
def test_multiple_cores_20_users_sql(self):
with ServerCreator(u"--cores=1 --db-engine=mysql --db-name=WebLabIntTests1 --db-user=weblab --db-passwd=weblab --coordination-engine=sql --dummy-silent") as sc:
tester = ExperimentUseTester(sc, 20, 'admin', 'password', 'dummy', 'Dummy experiments')
failures, max_users = tester.run()
self.assertEquals(failures, 0)
self.assertEquals(max_users, 1)
def test_multiple_cores_20_users_redis(self):
with ServerCreator(u"--cores=1 --db-engine=mysql --db-name=WebLabIntTests1 --db-user=weblab --db-passwd=weblab --coordination-engine=redis --dummy-silent") as sc:
tester = ExperimentUseTester(sc, 20, 'admin', 'password', 'dummy', 'Dummy experiments')
failures, max_users = tester.run()
self.assertEquals(failures, 0)
self.assertEquals(max_users, 1)
def test_multiple_cores_20_users_redis_4_cores(self):
with ServerCreator(u"--cores=4 --db-engine=mysql --db-name=WebLabIntTests1 --db-user=weblab --db-passwd=<PASSWORD> --coordination-engine=redis --dummy-silent") as sc:
tester = ExperimentUseTester(sc, 20, 'admin', 'password', 'dummy', 'Dummy experiments')
failures, max_users = tester.run()
self.assertEquals(failures, 0)
self.assertEquals(max_users, 1)
class ExperimentUseTester(object):
def __init__(self, server_creator, concurrent_users, user, password, exp_name, cat_name, max_time = 180, fail_on_concurrency = True, quiet_errors = True):
self.users_in = 0
self.failures = 0
self.clients = []
for n in xrange(concurrent_users):
client = server_creator.create_client()
self.clients.append(client)
self.concurrent_users = concurrent_users
self.user = user
self.password = password
self.exp_name = exp_name
self.cat_name = cat_name
self.max_time = max_time
self.fail_on_concurrency = fail_on_concurrency
self.quiet_errors = quiet_errors
def run(self):
threads = []
for n in xrange(self.concurrent_users):
thread = threading.Thread(target = self.run_wrapped, args = (n,))
thread.setDaemon(True)
thread.start()
threads.append(thread)
initial_time = time.time()
max_users = 0
while True:
time.sleep(0.1)
max_users = max(self.users_in, max_users)
if self.fail_on_concurrency and max_users > 1:
raise Exception("More than one concurrent users using the same non-concurrent lab!")
any_alive = False
for thread in threads:
if thread.isAlive():
any_alive = True
break
if not any_alive:
break
if (time.time() - initial_time) > (self.max_time + 10):
raise Exception("Threads still running after %s seconds!" % (self.max_time + 10))
return self.failures, max_users
def run_wrapped(self, user_number):
try:
self.do_full_experiment_use(user_number)
except:
if not self.quiet_errors:
traceback.print_exc()
else:
print "Error not shown since quiet_errors = True"
self.failures += 1
def do_full_experiment_use(self, user_number):
"""
Uses the configured experiment trying to resemble the way a human would do it.
This method will block for a while.
:return:
"""
client = self.clients[user_number]
sessionid = client.login(self.user, self.password)
if not sessionid:
raise Exception("Wrong login")
# Reserve the flash dummy experiment.
experiment_id = ExperimentId(self.exp_name, self.cat_name)
waiting = client.reserve_experiment(sessionid, experiment_id, "{}", "{}", None)
# print "Reserve response: %r" % waiting
reservation_id = waiting.reservation_id
initial_time = time.time()
while (time.time() - initial_time) < self.max_time:
status = client.get_reservation_status(reservation_id)
if type(status) is WaitingReservation:
time.sleep(0.1)
elif type(status) is ConfirmedReservation:
break
elif type(status) is WaitingConfirmationReservation:
time.sleep(0.1)
if (time.time() - initial_time) >= self.max_time:
raise Exception("Max time (%s seconds) achieved and still waiting..." % self.max_time)
self.users_in += 1
# Send some commands.
for i in range(20):
# What's commandstring actually for??
cmd = Command("foo")
result = client.send_command(reservation_id, cmd)
if not result.commandstring.startswith("Received command"):
raise Exception("Unrecognized command response")
# print "Command result: %r" % result
time.sleep(0.1)
self.users_in -= 1
result = client.logout(sessionid)
def suite():
return unittest.makeSuite(ScriptTestCase)
if __name__ == '__main__':
unittest.main()
| 2.09375 | 2 |
Rosalind/rna.py | yuriyshapovalov/Prototypes | 0 | 12758780 | <reponame>yuriyshapovalov/Prototypes<gh_stars>0
# Transcribing DNA into RNA
# rosalind.info/problems/rna
import sys
class rna:
def main(self, dna_seq):
dna_seq = list(dna_seq)
for idx, i in enumerate(dna_seq):
if i == 'T':
dna_seq[idx] = 'U'
print(''.join(dna_seq))
if __name__ == '__main__':
filename = sys.argv[1]
if not filename:
raise Exception('ERROR: File name should not be empty!')
with open(filename, 'r') as seq_file:
rna().main(seq_file.read())
| 3.25 | 3 |
30-Days-of-Code/30-nested-logic.py | lakshika1064/Hackerrank_Solutions-Python | 1 | 12758781 | <gh_stars>1-10
# Enter your code here. Read input from STDIN. Print output to STDOUT
rd,rm,ry=map(int,input().split())
ed,em,ey=map(int,input().split())
if ry<ey:
print("0")
elif ry<=ey:
if rm<=em:
if rd<=ed:
print("0")
else:
print(15*(rd-ed))
else:
print(500*(rm-em))
else:
print(10000)
| 2.96875 | 3 |
Gal2Renpy/TagSource/CgTag.py | dtysky/Gal2Renpy | 36 | 12758782 | #coding:utf-8
#################################
#Copyright(c) 2014 dtysky
#################################
import G2R
class CgTag(G2R.TagSource):
def Get(self,Flag,US):
tags=G2R.TagSource.Get(self,Flag,US)
tags['s']={}
for cg in tags['m']:
tags['s'][cg]={}
for s in US.Args[Flag][cg]['Scene']:
for knum in range(s[1]):
for bg in US.Args[Flag][cg]['Background']:
n=s[0]+str(knum)+bg
tags['s'][cg][n]=n
return tags | 2.234375 | 2 |
bin/pymischief.py | brianredbeard/mischief | 3 | 12758783 | <reponame>brianredbeard/mischief
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from colorama import Fore, Style
import time
import argparse
from mischief.pwned import Pwned
try:
import simplejson as json
except ImportError:
import json
def handleArgs():
parser = argparse.ArgumentParser(description='Search haveibeenpwned.com')
parser.add_argument('file', help='json file with account names')
args = vars(parser.parse_args())
return args
if __name__ == "__main__":
args = handleArgs()
with open(args['file']) as file:
data = file.read()
accounts = json.loads(data)
p = Pwned()
for email in accounts:
d = p.account(email)
if d is None:
print(Fore.GREEN + ">> No leaks for", email, Style.RESET_ALL)
continue
print("[{}]========================".format(email))
for b in d:
print(Fore.MAGENTA + " {} [{} accounts total]".format(b['Title'], p.clean(b['PwnCount'])))
print(Fore.CYAN + " Domain: {}".format(b['Domain']))
print(Fore.CYAN + " {}".format(b['BreachDate']))
if b['IsRetired']:
print(Fore.GREEN + " Retired")
else:
print(Fore.RED + " Data still on web")
if b['IsVerified']:
print(Fore.RED + " Hack verified by website")
else:
print(Fore.YELLOW + " Hack not verified by website")
print(Style.RESET_ALL + " Leaked data:")
for dd in b['DataClasses']:
print(" - {}".format(dd))
# print(Style.RESET_ALL)
time.sleep(1)
for acc in accounts:
# print("Paste:",acc,"==============")
paste = p.paste(acc)
if paste is None:
print(Fore.GREEN + ">> No pastes for", acc, Style.RESET_ALL)
continue
print("Paste:", acc, "==============")
for b in paste:
s = b['Source']
if s == "AdHocUrl":
print(" AdHocUrl: {}".format(b['Id']))
elif s == "Pastebin":
print(" Pastebin: https://pastebin.com/{}".format(b['Id']))
elif s == "QuickLeak":
print(" QuickLeak: http://quickleak.se/{}".format(b['Id']))
else:
print(" {}: {}".format(b['Source'], b['Id']))
time.sleep(1)
# num = p.password(pwd)
# print("Password {} used {} times".format(pwd, num))
| 2.5 | 2 |
main.py | drakenclimber/hookster | 0 | 12758784 | #!/usr/bin/env python
#****************************************************************************
# ©
# Copyright 2014-2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#****************************************************************************
#****************************************************************************
# Imports
#****************************************************************************
# python standard imports
import argparse
import os
import sys
import traceback
# project-specific imports
cur_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(cur_path, 'actions'))
sys.path.append(os.path.join(cur_path, 'checks'))
sys.path.append(os.path.join(cur_path, 'framework'))
sys.path.append(os.path.join(cur_path, 'scm'))
import abstractScm
from configManager import ConfigManager
from fileObject import FileObject
from hooksterExceptions import *
from logManager import *
#****************************************************************************
# Constants
#****************************************************************************
CONFIG_FILE = os.path.join(cur_path, "hookster.conf")
#****************************************************************************
# Functions
#****************************************************************************
def setup(config_file, scm, old_rev, new_rev, branch):
"""Setup hookster"""
config = ConfigManager(config_file, scm, old_rev, new_rev, branch)
return config
def teardown():
"""Teardown hookster"""
close_log()
def run_this_check(config, check_name, filename):
"""Returns True if this check is to be run on this file"""
run_check = False
extension_list = config.check.get_check_extensions(check_name)
file_extension = "*" + os.path.splitext(filename)[1]
if file_extension in extension_list:
# This extension is in the whitelist. We need to run this check on this file
run_check = True
return run_check
def main(config_file, scm, old_rev, new_rev, branch):
"""
Hookster entry point
:param scm:
:param old_rev:
:param new_rev:
:param branch:
:return: None
"""
try:
config = setup(config_file, scm, old_rev, new_rev, branch)
# loop through each modified file
for filename in config.scm.get_changed_file_list(config.new_rev):
file_object = FileObject(filename, config.scm, config.branch_name, config.new_rev, config.old_rev)
# loop through the enabled checks
for check in config.check.checks:
if run_this_check(config, check, file_object.filename):
config.check.check_objs[check].check_file(file_object)
# all checks passed. run the "success" actions.
for key in config.action.success_action_objs.keys():
config.action.success_action_objs[key].run(True)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
log(message, LOG_LEVEL_ERROR)
backtrace = traceback.format_exc()
log(backtrace, LOG_LEVEL_ERROR)
# run the "failed" actions
for key in config.action.failure_action_objs.keys():
config.action.failure_action_objs[key].run(False)
# re-raise the failing exception
raise
finally:
teardown()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Hookster - software configuration management hook scripts")
parser.add_argument('-s', '--scm', dest='scm', default=abstractScm.SCM_DEFAULT, type=str,
help='SCM type, e.g. git, svn, etc.')
parser.add_argument('-o', '--oldrev', dest='old_rev', default=None, type=str,
help='Hash/ID that represents the previous (old) revision')
parser.add_argument('-n', '--newrev', dest='new_rev', default=None, type=str,
help='Hash/ID that represents the incoming (new) revision')
parser.add_argument('-b', '--branch', dest='branch', default=None, type=str,
help='Branch name')
parser.add_argument('-c', '--config', dest='config_file', default=CONFIG_FILE, type=str,
help='Hookster configuration file')
args = parser.parse_args()
sys.exit(main(args.config_file, args.scm, args.old_rev, args.new_rev, args.branch))
| 1.75 | 2 |
ConvertCommands.py | Wilhite-r/QuadPod | 0 | 12758785 |
from math import pi
print("Commanding Command Converter... ")
def ConvertCommands(command):
if (command == "Forward"):
return (0.0,1.0,0.0)
if (command == "Turn"):
return (0.0,0.0,pi/2)
if (command == "Jump"):
return (0.0,0.0,0.0)
return None | 3.59375 | 4 |
setup.py | hsiaoyi0504/yoctol-nlu-py | 0 | 12758786 | <reponame>hsiaoyi0504/yoctol-nlu-py
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
try:
long_description = open("README.md").read()
except IOError:
long_description = ""
about = {}
with open(os.path.join(here, "ynlu", "__version__.py")) as f:
exec(f.read(), about)
setup(
name="yoctol-nlu",
version=about["__version__"],
description="Yoctol Natural Language Understanding SDK",
license="MIT",
author="cph",
packages=find_packages(),
install_requires=[
'gql-fork>=0.2.0',
'requests>=2.13.0',
"scipy>=1.0.1",
"numpy>=1.14.2",
"matplotlib==2.2.2",
'pandas;python_version>="3.5"',
'pandas<0.21;python_version<"3.5"',
"seaborn>=0.8.1",
"scikit-learn>=0.19.1",
],
long_description=long_description,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
],
)
| 1.703125 | 2 |
src/data/731.py | NULLCT/LOMC | 0 | 12758787 | N, Q = map(int, input().split())
ab = [list(map(int, input().split())) for _ in range(N - 1)]
cd = [list(map(int, input().split())) for _ in range(Q)]
G = [[] for _ in range(N)]
for a, b in ab:
a -= 1
b -= 1
G[a].append(b)
G[b].append(a)
dist = [-1 for _ in range(N)]
#"dist[0] = 0
from collections import deque
Qu = deque()
Qu.append(0)
while len(Qu) > 0:
i = Qu.popleft()
for j in G[i]:
if dist[j] == -1:
dist[j] = dist[i] + 1
Qu.append(j)
for c, d in cd:
c -= 1
d -= 1
diff = dist[c] - dist[d]
if diff % 2 != 0:
print('Road')
else:
print('Town')
| 2.8125 | 3 |
vento_bellingshausen.py | douglasnehme/bia | 0 | 12758788 | # -*- coding: utf-8 -*-
#
# AUTOR: <NAME>
#
# PLACE: Rio de Janeiro - Brazil
#
# CONTACT: <EMAIL>
#
# CRIATION: ago/2018
#
# LAST MODIFICATION: ago/2018
#
# OBJECTIVE: Processing Artigas' meteorological station data for Bia (INUMET)
import os
import sys
import pandas as pd
from datetime import datetime
sys.path.insert(0, os.path.expanduser('~/Dropbox/airsea'))
import airsea
start = datetime.now().replace(microsecond = 0)
##############################################################################
#### CONFIG PARAMETERS AND GLOBAL VARIABLES ##################################
##############################################################################
DATADIR = u'/home/douglasnehme/Desktop/bia/arquivos'
filename1 = u'vento_bellingshausen_direcao.xlsx'
filename2 = u'vento_bellingshausen_velocidade.xlsx'
new_filename = u'vento_u_v_bellingshausen.xlsx'
##############################################################################
# OPENNING AND MANIPULATING DATA #############################################
##############################################################################
# Open files
wdir = pd.read_excel(
os.path.join(
DATADIR,
filename1
),
header=0,
index_col=0,
na_values=['', '-']
)
wspd = pd.read_excel(
os.path.join(
DATADIR,
filename2
),
header=0,
index_col=0,
na_values=['', '-']
)
# Transform a df with monthly variation on
# columns and years on lines to Series with
# multi-index and all mothly values
wdir = wdir.stack()
wspd = wspd.stack()
# Aggregate month and year info from
# multi-index in one string, transform it
# into datetime and set as Series index
wdir.index = pd.to_datetime((
wdir.index.get_level_values(0).astype('str') +
'-' +
wdir.index.get_level_values(1).astype('str')
))
wspd.index = pd.to_datetime((
wspd.index.get_level_values(0).astype('str') +
'-' +
wspd.index.get_level_values(1).astype('str')
))
# Name Series
wdir.name = 'wdir'
wspd.name = 'wspd'
# Fill gaps with NaN
wdir = wdir.resample('MS').asfreq()
wspd = wspd.resample('MS').asfreq()
# Merge two Series in one df
df = pd.merge(
wspd,
wdir,
left_index=True,
right_index=True
)
del wdir, wspd
u, v = airsea.pol2cart_wind(
df.wspd,
df.wdir,
rnd=1
)
df_new = pd.DataFrame(
data={
'u':u.values,
'v':v.values
},
index=u.index
)
##########################################################
# Transforming index from a daily series from 03/1968 to
# 01/2021 over all rows length to a yearly series over all
# rows length and monthly variations on columns dimension
##########################################################
df_new = df_new.groupby([
df_new.index.year,
df_new.index.month
]).mean()
df_new.index.names = ['', '']
df_new = df_new.unstack()
##########################################################
# Save
df_new.to_excel(os.path.join(DATADIR, new_filename))
stop = datetime.now().replace(microsecond=0)
print('Time taken to execute program: {}'.format(stop - start))
| 2.109375 | 2 |
shadow-hunters/card.py | dolphonie/shadow-hunters | 17 | 12758789 | # card.py
# Implements the Card object.
class Card:
"""
A Card of any type.
"""
def __init__(self, title, desc, color, holder, is_equip, use):
self.title = title
self.desc = desc
self.color = color
self.holder = holder
self.is_equipment = is_equip
self.use = use
def dump(self):
return {
'title': self.title,
'desc': self.desc,
'color': self.color.name,
'is_equip': self.is_equipment
}
| 3.390625 | 3 |
src/applications/api/serializers.py | luisito666/M2Backend | 0 | 12758790 | """
Serializers
"""
from django.utils.translation import ugettext_lazy as _
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from applications.authentication import authenticate
from rest_framework import exceptions, serializers
from .state import User
from .tokens import AccessToken
from .utils import get_string_and_html
from .models import Pages, Token, Site, Image
class PasswordField(serializers.CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault("style", {})
kwargs["style"]["input_type"] = "password"
kwargs["write_only"] = True
super().__init__(*args, **kwargs)
class TokenObtainSerializer(serializers.Serializer):
username_field = User.USERNAME_FIELD
default_error_messages = {
"no_active_account": _("No active account found with the given credentials")
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields[self.username_field] = serializers.CharField()
self.fields["password"] = PasswordField()
def validate(self, attrs):
authenticate_kwargs = {
self.username_field: attrs[self.username_field],
"password": attrs["password"],
}
try:
authenticate_kwargs["request"] = self.context["request"]
except KeyError:
pass
self.user = authenticate(**authenticate_kwargs)
# Prior to Django 1.10, inactive users could be authenticated with the
# default `ModelBackend`. As of Django 1.10, the `ModelBackend`
# prevents inactive users from authenticating. App designers can still
# allow inactive users to authenticate by opting for the new
# `AllowAllUsersModelBackend`. However, we explicitly prevent inactive
# users from authenticating to enforce a reasonable policy and provide
# sensible backwards compatibility with older Django versions.
if self.user is None:
raise exceptions.AuthenticationFailed(
self.error_messages["no_active_account"],
"no_active_account",
)
return {}
@classmethod
def get_token(cls, user):
raise NotImplementedError(
"Must implement `get_token` method for `TokenObtainSerializer` subclasses"
)
class TokenObtainPairSerializer(TokenObtainSerializer):
"""
Helper class for create token
"""
@classmethod
def get_token(cls, user):
return AccessToken.for_user(user)
def validate(self, attrs):
data = super().validate(attrs)
refresh = self.get_token(self.user)
data["refresh"] = str(refresh)
return data
class RegisterSerializer(serializers.ModelSerializer):
def create(self, validated_data):
user = User.objects.create_account(**validated_data)
user.save()
token = Token.to_active(user)
self.send_activation_email(user, token)
return user
def send_activation_email(self, user, token):
html_content, string_content = get_string_and_html(
"email/email_confirmation.html", {"user": user, "token": token}
)
subject = _("Bienvenido a ") + settings.SERVERNAME
email = EmailMultiAlternatives(
subject, string_content, settings.EMAIL_HOST_USER, [user.email]
)
email.attach_alternative(html_content, "text/html")
email.send()
class Meta:
model = User
fields = ("login", "password", "email", "real_name", "social_id")
extra_kwargs = {"password": {"<PASSWORD>": True}}
# fmt: off
class CurrentUserSerializer(serializers.ModelSerializer):
"""
Current user serializer
"""
class Meta:
model = User
fields = (
"login",
"status",
"real_name",
"email",
"coins",
"create_time"
)
class RankingPlayerSerializer(serializers.Serializer):
"""
Serializer For Players
"""
account_id = serializers.IntegerField()
name = serializers.CharField()
level = serializers.IntegerField()
exp = serializers.IntegerField()
class RankingGuildSerializer(serializers.Serializer):
"""
Serializer for Guilds
"""
name = serializers.CharField()
level = serializers.IntegerField()
exp = serializers.IntegerField()
ladder_point = serializers.IntegerField()
class ChangePasswordSerializer(serializers.Serializer):
""" """
current_password = PasswordField()
new_password = PasswordField()
new_password_again = PasswordField()
def validate(self, data):
if data["new_password"] != data["new_password_again"]:
raise serializers.ValidationError("password must be equal")
return data
class ResetPasswordSerializer(serializers.Serializer):
""" """
new_password = PasswordField()
new_password_again = PasswordField()
def validate(self, data):
if data["new_password"] != data["new_password_again"]:
raise serializers.ValidationError("password must be equal")
return data
class DownloadSerializer(serializers.Serializer):
id = serializers.UUIDField(read_only=True)
provider = serializers.CharField(max_length=30)
weight = serializers.DecimalField(max_digits=5, decimal_places=3)
link = serializers.CharField(max_length=100)
create_at = serializers.DateTimeField()
modified_at = serializers.DateTimeField()
# fmt: off
class PagesSerializer(serializers.ModelSerializer):
class Meta:
model = Pages
fields = (
"slug",
"title",
"content",
"published",
"create_at",
"modified_at"
)
lookup_field = "slug"
extra_kwargs = {"url": {"lookup_field": "slug"}}
class RequestPasswordSerializer(serializers.Serializer):
login = serializers.CharField()
def validate(self, data):
try:
user = User.objects.get(login=data["login"])
token = Token.to_reset(user)
self.send_rest_password_email(user, token)
return data
except User.DoesNotExist:
raise serializers.ValidationError("User not found in database")
def send_rest_password_email(self, user, token):
html_content, string_content = get_string_and_html(
"email/reset_password.html", {"user": user, "token": token}
)
subject = _("Olvido de Contraseña - ") + settings.SERVERNAME
email = EmailMultiAlternatives(
subject, string_content, settings.EMAIL_HOST_USER, [user.email]
)
email.attach_alternative(html_content, "text/html")
email.send()
class ImageSerializer(serializers.ModelSerializer):
image_url = serializers.SerializerMethodField()
class Meta:
model = Image
fields = ("name", "types", "image_url")
def get_image_url(self, image):
photo_url = image.image.url
return photo_url
class SiteSerializer(serializers.ModelSerializer):
images = ImageSerializer(many=True)
footer_menu = PagesSerializer(many=True)
class Meta:
model = Site
fields = (
"name",
"slug",
"images",
"initial_level",
"max_level",
"rates",
"facebook_url",
"facebook_enable",
"footer_menu",
"footer_info",
"footer_menu_enable",
"footer_info_enable",
"forum_url",
"last_online",
)
lookup_field = "slug"
extra_kwargs = {"url": {"lookup_field": "slug"}}
| 2.109375 | 2 |
examples/research_projects/codeparrot/scripts/codeparrot_training.py | mdermentzi/transformers | 2 | 12758791 | <reponame>mdermentzi/transformers<filename>examples/research_projects/codeparrot/scripts/codeparrot_training.py
import logging
from argparse import Namespace
from pathlib import Path
import datasets
import torch
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from torch.utils.tensorboard import SummaryWriter
import transformers
import wandb
from accelerate import Accelerator
from arguments import TrainingArguments
from huggingface_hub import Repository
from transformers import AdamW, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, get_scheduler, set_seed
class ConstantLengthDataset(IterableDataset):
"""
Iterable dataset that returns constant length chunks of tokens from stream of text files.
Args:
tokenizer (Tokenizer): The processor used for proccessing the data.
dataset (dataset.Dataset): Dataset with text files.
infinite (bool): If True the iterator is reset after dataset reaches end else stops.
seq_length (int): Length of token sequences to return.
num_of_sequences: Number of token sequences to keep in buffer.
chars_per_token: Number of characters per token used to estimate number of tokens in text buffer.
"""
def __init__(
self, tokenizer, dataset, infinite=False, seq_length=1024, num_of_sequences=1024, chars_per_token=3.6
):
self.tokenizer = tokenizer
self.concat_token_id = tokenizer.bos_token_id
self.dataset = dataset
self.seq_length = seq_length
self.input_characters = seq_length * chars_per_token * num_of_sequences
self.epoch = 0
self.infinite = infinite
def __iter__(self):
iterator = iter(self.dataset)
more_examples = True
while more_examples:
buffer, buffer_len = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(iterator)["content"])
buffer_len += len(buffer[-1])
except StopIteration:
if self.infinite:
iterator = iter(self.dataset)
self.epoch += 1
logger.info(f"Dataset epoch: {self.epoch}")
else:
more_examples = False
break
tokenized_inputs = tokenizer(buffer, truncation=False)["input_ids"]
all_token_ids = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id])
for i in range(0, len(all_token_ids), self.seq_length):
input_ids = all_token_ids[i : i + self.seq_length]
if len(input_ids) == self.seq_length:
yield torch.tensor(input_ids)
def setup_logging(args):
project_name = args.model_ckpt.split("/")[-1]
logger = logging.getLogger(__name__)
log_dir = Path(args.save_dir) / "log/"
log_dir.mkdir(exist_ok=True)
filename = f"debug_{accelerator.process_index}.log"
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
handlers=[logging.FileHandler(log_dir / filename), logging.StreamHandler()],
)
if accelerator.is_main_process: # we only want to setup logging once
wandb.init(project=project_name, config=args)
run_name = wandb.run.name
tb_writer = SummaryWriter()
tb_writer.add_hparams(vars(args), {"0": 0})
logger.setLevel(logging.INFO)
datasets.utils.logging.set_verbosity_info()
transformers.utils.logging.set_verbosity_info()
else:
tb_writer = None
run_name = ""
logger.setLevel(logging.ERROR)
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
return logger, tb_writer, run_name
def create_dataloaders(args):
ds_kwargs = {"streaming": True}
train_data = load_dataset(args.dataset_name_train, split="train", **ds_kwargs)
train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=args.seed)
valid_data = load_dataset(args.dataset_name_valid, split="train", **ds_kwargs)
train_dataset = ConstantLengthDataset(tokenizer, train_data, infinite=True, seq_length=args.seq_length)
valid_dataset = ConstantLengthDataset(tokenizer, valid_data, infinite=False, seq_length=args.seq_length)
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size)
eval_dataloader = DataLoader(valid_dataset, batch_size=args.valid_batch_size)
return train_dataloader, eval_dataloader
def get_grouped_params(model, args, no_decay=["bias", "LayerNorm.weight"]):
params_with_wd, params_without_wd = [], []
for n, p in model.named_parameters():
if any(nd in n for nd in no_decay):
params_without_wd.append(p)
else:
params_with_wd.append(p)
return [
{"params": params_with_wd, "weight_decay": args.weight_decay},
{"params": params_without_wd, "weight_decay": 0.0},
]
def log_metrics(step, metrics):
logger.info(f"Step {step}: {metrics}")
if accelerator.is_main_process:
wandb.log(metrics)
[tb_writer.add_scalar(k, v, step) for k, v in metrics.items()]
def evaluate(args):
model.eval()
losses = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(batch, labels=batch)
loss = outputs.loss.repeat(args.valid_batch_size)
losses.append(accelerator.gather(loss))
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
loss = torch.mean(torch.cat(losses))
try:
perplexity = torch.exp(loss)
except OverflowError:
perplexity = float("inf")
return loss.item(), perplexity.item()
# Accelerator
accelerator = Accelerator()
acc_state = {str(k): str(v) for k, v in accelerator.state.__dict__.items()}
# Settings
parser = HfArgumentParser(TrainingArguments)
args = parser.parse_args()
args = Namespace(**vars(args), **acc_state)
samples_per_step = accelerator.state.num_processes * args.train_batch_size
set_seed(args.seed)
# Clone model repository
if accelerator.is_main_process:
hf_repo = Repository(args.save_dir, clone_from=args.model_ckpt)
# Logging
logger, tb_writer, run_name = setup_logging(args)
logger.info(accelerator.state)
# Checkout new branch on repo
if accelerator.is_main_process:
hf_repo.git_checkout(run_name, create_branch_ok=True)
# Load model and tokenizer
model = AutoModelForCausalLM.from_pretrained(args.save_dir)
if args.gradient_checkpointing:
model.gradient_checkpointing_enable()
tokenizer = AutoTokenizer.from_pretrained(args.save_dir)
# Load dataset and dataloader
train_dataloader, eval_dataloader = create_dataloaders(args)
# Prepare the optimizer and learning rate scheduler
optimizer = AdamW(get_grouped_params(model, args), lr=args.learning_rate)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
def get_lr():
return optimizer.param_groups[0]["lr"]
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Train model
model.train()
completed_steps = 0
for step, batch in enumerate(train_dataloader, start=1):
loss = model(batch, labels=batch, use_cache=False).loss
log_metrics(
step, {"lr": get_lr(), "samples": step * samples_per_step, "steps": completed_steps, "loss/train": loss.item()}
)
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
completed_steps += 1
if step % args.save_checkpoint_steps == 0:
logger.info("Evaluating and saving model checkpoint")
eval_loss, perplexity = evaluate(args)
log_metrics(step, {"loss/eval": eval_loss, "perplexity": perplexity})
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.save_dir, save_function=accelerator.save)
if accelerator.is_main_process:
hf_repo.push_to_hub(commit_message=f"step {step}")
model.train()
if completed_steps >= args.max_train_steps:
break
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
eval_loss, perplexity = evaluate(args)
log_metrics(step, {"loss/eval": eval_loss, "perplexity": perplexity})
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.save_dir, save_function=accelerator.save)
if accelerator.is_main_process:
hf_repo.push_to_hub(commit_message="final model")
| 2.46875 | 2 |
plotCameraJitter.py | topleaf/pictureMatch | 0 | 12758792 | <filename>plotCameraJitter.py
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
import os
import time
import argparse
class CameraJitterPlot:
def __init__(self, deviceId=0, frameNum=2,save = False,
sampleInteval=1, resW=1920, resH=1080,folderName='plotFrames',x=100,y=200,
liveMode = True,fileName='./trainingImages/2/pos-0.png'):
self.liveMode = liveMode
self.frameNum = frameNum
self.sampleInterval = sampleInteval
self.folderName = os.getcwd()+'/'+folderName.lstrip('/').rstrip('/')+'/'
self.windowName = 'live image window'
self.gaps = []
self.pixelValues = []
self.x = x
self.y = y
self.save = save
self.actualCount = 0
self._cameraProporty = []
self.backGround = None
if self.liveMode:
self.camera = cv.VideoCapture(deviceId)
self.camera.set(3,resW)
self.camera.set(4,resH)
self.camera.set(5, 5)
self.camera.set(cv.CAP_PROP_BUFFERSIZE, 1)
for i in range(19):
self._cameraProporty.append(self.camera.get(i))
print(i, ":", self._cameraProporty[i], "\t")
else:
self.frame = cv.imread(fileName)
pass
def capture(self):
count = 0
try:
os.mkdir(self.folderName)
except:
pass
previousTimestamp = time.time()
while count < self.frameNum:
if self.liveMode:
succ, liveFrame = self.camera.read()
else:
succ = True
liveFrame = self.frame
if self.backGround is None:
self.backGround = cv.cvtColor(liveFrame, cv.COLOR_BGR2GRAY)
# self.backGround = cv.GaussianBlur(self.backGround,(21,21),0)
continue
grayFrame = cv.cvtColor(liveFrame,cv.COLOR_BGR2GRAY)
# grayFrame = cv.GaussianBlur(grayFrame, (21,21),0)
diff = cv.absdiff(grayFrame, self.backGround)
diff = cv.threshold(diff, 25, 255, cv.THRESH_BINARY)[1]
diff = cv.dilate(diff, cv.getStructuringElement(cv.MORPH_ELLIPSE,(9,4)),iterations=3)
cv.imshow('diff', diff)
key = cv.waitKey(10000)
currentTimestamp = time.time()
if succ:
gap = (currentTimestamp-previousTimestamp)
self.gaps.append(gap)
fps = 1/gap
self.pixelValues.append(np.array(liveFrame)[self.y, self.x])
if self.save:
cv.putText(liveFrame,'count={},FPS={:.0f}'.format(count,fps),(10,20),cv.FONT_HERSHEY_COMPLEX,
1, (0, 255, 0), 2)
cv.imwrite(self.folderName+str(count) + '.png', liveFrame)
cv.imshow(self.windowName, liveFrame)
previousTimestamp = currentTimestamp
count += 1
key = cv.waitKey(self.sampleInterval)
key = key & 0xFF
if key == 27:
# cv.destroyWindow(self.windowName)
break
self.actualCount = count
print('captured {} frames, sampleInterval is {} ms, saved to {}'.format(count,self.sampleInterval, self.folderName))
if self.liveMode:
self.camera.release()
def show(self):
"""
plot R,G,B values over count or over gap
:param x:
:param y:
:return:
"""
x = np.array(range(self.actualCount))
x1 = np.array(self.gaps)
y = np.array(self.pixelValues)
b = [b[0] for b in y]
g = [g[1] for g in y]
r = [r[2] for r in y]
b = np.array(b)
g = np.array(g)
r = np.array(r)
plt.title('live Capture Mode = {}, pixel position: ({},{})'.format((lambda x:'True' if x else 'False')(self.liveMode), self.x, self.y))
plt.scatter(x, b, color='blue')
plt.scatter(x, g, color='green')
plt.scatter(x, r, color='red')
# plt.scatter(x, x1, color='cyan')
plt.xlabel('sample count #')
plt.ylabel('R/G/B value')
print('gaps value range is [{},{}] seconds, mean is {},std={}'.format(np.min(x1), np.max(x1),np.mean(x1),np.std(x1)))
print('blue value range is [{},{}],mean is {},std={}'.format(np.min(b), np.max(b),np.mean(b),np.std(b)))
print('green value range is [{},{}],mean is {},std={}'.format(np.min(g), np.max(g),np.mean(g),np.std(g)))
print('red value range is [{},{}],mean is {},std={}'.format(np.min(r), np.max(r),np.mean(r),np.std(r)))
plt.show()
cv.destroyAllWindows()
pass
if __name__ =='__main__':
parser = argparse.ArgumentParser("plot the (b,g,r) values at position of (x,y) from live camera")
parser.add_argument('--deviceId',dest="deviceId",help="video camera id:,default=0 ",type=int,default=0)
parser.add_argument('--x',dest="x",help="x position ,default=332",type=int,default=332)
parser.add_argument('--y',dest="y",help="y position ,default=364",type=int,default=364)
parser.add_argument('--frameNum',dest="frameNum",help="capture how many frames ,default=100",type=int,default=100)
parser.add_argument('--save',dest="save",help="save to disk or not,default=0 ",type=int,default=0)
parser.add_argument('--liveMode',dest="live",help="live or playback mode,default=1 ",type=int,default=1)
parser.add_argument('--fileName', dest="fileName",help="playback mode fileName, "
"default=./trainingImages/2/pos-0.png ",type=str,default='./trainingImages/2/pos-0.png')
args = parser.parse_args()
plotter = CameraJitterPlot(deviceId=args.deviceId,save=args.save, frameNum=args.frameNum,x=args.x, y=args.y,
liveMode=args.live,fileName=args.fileName)#x=332, y=364
plotter.capture()
plotter.show()
| 2.71875 | 3 |
pykotor/resource/formats/lip/io_xml.py | NickHugi/PyKotor | 1 | 12758793 | <gh_stars>1-10
from __future__ import annotations
import io
from typing import Optional
from xml.etree import ElementTree
from pykotor.resource.formats.lip import LIP, LIPShape
from pykotor.resource.type import SOURCE_TYPES, TARGET_TYPES, ResourceReader, ResourceWriter
class LIPXMLReader(ResourceReader):
def __init__(self, source: SOURCE_TYPES, offset: int = 0, size: int = 0):
super().__init__(source, offset, size)
self._xml_root: ElementTree.Element = ElementTree.parse(io.StringIO(self._reader.read_bytes(self._size).decode())).getroot()
self._lip: Optional[LIP] = None
def load(self, auto_close: bool = True) -> LIP:
if self._xml_root.tag != "lip":
raise TypeError("The XML file that was loaded was not a valid LIP.")
self._lip = LIP()
self._lip.length = float(self._xml_root.get("duration"))
for subelement in self._xml_root:
time = float(subelement.get("time"))
shape = LIPShape(int(subelement.get("shape")))
self._lip.add(time, shape)
if auto_close:
self._reader.close()
return self._lip
class LIPXMLWriter(ResourceWriter):
def __init__(self, lip: LIP, target: TARGET_TYPES):
super().__init__(target)
self._lip = lip
self._xml_root: ElementTree.Element = ElementTree.Element("lip")
def write(self, auto_close: bool = True) -> None:
self._xml_root.set("duration", str(self._lip.length))
for keyframe in self._lip:
ElementTree.SubElement(self._xml_root,
"keyframe",
time=str(keyframe.time),
shape=str(keyframe.shape.value))
ElementTree.indent(self._xml_root)
self._writer.write_bytes(ElementTree.tostring(self._xml_root))
if auto_close:
self._writer.close()
| 2.421875 | 2 |
django_uploads_app/ftp/tests/test_app.py | paiuolo/django-uploads-app | 0 | 12758794 | import os
from pathlib import Path
from django.urls import reverse
from django.contrib.auth import get_user_model
from filer.models import File as FilerFile
# from rest_framework import status
from ...tests import APITestFactory
from ...models import Upload, Link
from ..utils import parse_user_files
User = get_user_model()
class TestFtp(APITestFactory):
def setUp(self):
super(TestFtp, self).setUp()
self.user_folder_ftp_path = os.path.join(self.user_folder_path, 'ftp')
def test_user_folder_creation_on_access(self):
"""
Creates user related folder on first access
"""
self.perform_user_login()
response = self.client.get('/', format='json')
self.assertTrue(os.path.exists(self.user_folder_ftp_path), 'user ftp folder not created')
def test_fpt_uploaded_file_creates_upload_model(self):
self.perform_user_login()
Path(os.path.join(self.user_folder_ftp_path, 'ftp_file.txt')).touch()
_parsed_files = parse_user_files(self.user)
relative_path = os.path.join('users', self.user.sso_id) + os.sep + os.path.join('ftp', 'ftp_file.txt')
self.assertTrue(Upload.objects.filter(file_path=relative_path).count() == 1, 'cannot link to ftp uploaded file')
def test_fpt_uploaded_file_creates_upload_model_once(self):
self.perform_user_login()
Path(os.path.join(self.user_folder_ftp_path, 'ftp_file.txt')).touch()
_parsed_files = parse_user_files(self.user)
relative_path = os.path.join('users', self.user.sso_id) + os.sep + os.path.join('ftp', 'ftp_file.txt')
self.assertTrue(Upload.objects.filter(file_path=relative_path).count() == 1, 'cannot link to ftp uploaded file')
Path(os.path.join(self.user_folder_ftp_path, 'ftp_file.txt')).touch()
_parsed_files = parse_user_files(self.user)
self.assertTrue(Upload.objects.filter(file_path=relative_path).count() == 1, 'cannot link to ftp uploaded file')
self.assertTrue(Upload.objects.count() == 1, 'more than one Upload created')
def test_fpt_uploaded_file_creates_filer_file(self):
self.perform_user_login()
Path(os.path.join(self.user_folder_ftp_path, 'ftp_file.txt')).touch()
_parsed_files = parse_user_files(self.user)
relative_path = os.path.join('users', self.user.sso_id) + os.sep + os.path.join('ftp', 'ftp_file.txt')
self.assertTrue(FilerFile.objects.filter(file=relative_path).count() == 1, 'cannot find filer file for ftp uploaded file')
| 2.359375 | 2 |
tools/time_data.py | zhang123-123/CsdnServer | 0 | 12758795 | <reponame>zhang123-123/CsdnServer<filename>tools/time_data.py
# -*- coding:utf-8 -*-
def time_(time_delta):
result = ""
if time_delta < 60:
temp = int(((time_delta / 3600) * 3600) % 60)
result = '%s秒前' % temp
elif time_delta < 3600:
temp = int(((time_delta / 3600) * 3600) / 60)
result = '%s分钟前' % temp
elif time_delta < 24 * 60 * 60:
result = '%s小时%s分钟前' % (int(time_delta // 3600), int((((time_delta % 3600) / 3600) * 3600) / 60))
elif time_delta < 24 * 60 * 60 * 30:
result = '%s天前' % (int(time_delta / (24 * 60 * 60)),)
elif time_delta < 24 * 60 * 60 * 30 * 12:
result = '%s月前' % (int(time_delta / (24 * 60 * 60 * 30)),)
else:
result = '%s年前' % (int(time_delta / (24 * 60 * 60 * 30 * 12)),)
return result
| 2.875 | 3 |
Meus_dessafios/Exercicios2021/ex007.py | DiegoSilvaHoffmann/Curso-de-Python | 0 | 12758796 | n1 = float(input('Informe a primeira nota do Aluno: '))
n2 = float(input('informe a segunda nota: '))
m = (n1 + n2) / 2
print('Sua media foi de {}'.format(m))
| 3.828125 | 4 |
fython/test/printruc/printruc_test.py | nicolasessisbreton/fython | 41 | 12758797 | <filename>fython/test/printruc/printruc_test.py<gh_stars>10-100
s = r"""
.a.fy
integer: x=8, var_name_without_dot=10, any_var_name=10
integer: float int vector(10) vector_content(10)
char(10): variable_name='./file', string = 'abcdef'
# basic
print 'x {:x}'
# dotted
print .file 'x1 {:x}'
print url(.file) 'x2 {:x}'
# in variable or unit
open(10, file=variable_name)
print 10 'x3 {:x}' # number
print var_name_without_dot 'x4 {:x}'
print unit(any_var_name) 'x5 {:x}'
close(10)
# path
print './out' 'x6'
print path('./out') 'x7'
print path(variable_name) 'x8'
# suite
print:
'x9'
'y10'
# multiline
# format are fortran format plus some addition
# see fython.format_mini_language.format_mini_language_desc
print '''
multiline:
{f:float}
{i:int}
{v:vector}
{vc:vector_content}
{a:string}
'''
# xip
xip 'printed only in debug mode'
"""
from fython.test import *
writer(s)
w = load('.a', force=1, release=1, verbose=0, run_main=1)
# print(open(w.module.url.fortran_path, 'r').read())
| 2.84375 | 3 |
backend/authorization/views.py | StichtingIAPC/swipe | 0 | 12758798 | <filename>backend/authorization/views.py
import hashlib
from datetime import timedelta
from django.conf import settings
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.utils import timezone
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework import serializers
User = settings.AUTH_USER_MODEL
class JSONResponse(HttpResponse):
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super().__init__(content, **kwargs)
class Logout(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(Logout, self).dispatch(request, *args, **kwargs)
def post(self, request):
if 'token' not in request.POST or request.POST['token'] is None:
return JSONResponse({})
Token.objects.filter(key=request.POST['token']).delete()
return JSONResponse({})
class Login(ObtainAuthToken):
def post(self, request, *args, **kwargs):
try:
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
except serializers.ValidationError:
return HttpResponse(status=401, content="Username or password incorrect")
user = serializer.validated_data['user']
Token.objects.filter(user=user).filter(
created__lt=timezone.now() - timedelta(hours=settings.AUTH_TOKEN_VALID_TIME_HOURS)).delete()
token, created = Token.objects.get_or_create(user=user)
m = hashlib.md5()
m.update(user.email.encode('utf-8'))
return Response({
'token': token.key,
'user': {
'id': user.id,
'username': user.username,
'permissions': user.get_all_permissions(),
'gravatarUrl': 'https://www.gravatar.com/avatar/' + m.hexdigest(),
'firstName': user.first_name,
'lastName': user.last_name,
'email': user.email,
},
})
class Validate(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(Validate, self).dispatch(request, *args, **kwargs)
def post(self, request):
tokens = Token.objects.filter(key=request.POST['token']).filter(user__username=request.POST['username'])
if tokens.count() == 1:
token = tokens.first()
user = token.user
expiry = (timezone.now() if token.created is None else token.created) + timedelta(
hours=settings.AUTH_TOKEN_VALID_TIME_HOURS)
if expiry < timezone.now():
return JSONResponse({
'valid': False,
'expiry': expiry.strftime('%Y-%m-%d %H:%M'),
}, status=401)
m = hashlib.md5()
m.update(user.email.encode('utf-8'))
return JSONResponse({
'valid': True,
'expiry': expiry.strftime('%Y-%m-%d %H:%M'),
'user': {
'id': user.id,
'username': user.username,
'permissions': user.get_all_permissions(),
'gravatarUrl': 'https://www.gravatar.com/avatar/' + m.hexdigest(),
'firstName': user.first_name,
'lastName': user.last_name,
'email': user.email,
}
})
return JSONResponse({
'valid': False,
})
| 2.015625 | 2 |
examples/monthly_budget_mover_example.py | Ressmann/starthinker | 138 | 12758799 | <filename>examples/monthly_budget_mover_example.py
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.dataset.run import dataset
from starthinker.task.dbm.run import dbm
from starthinker.task.monthly_budget_mover.run import monthly_budget_mover
def recipe_monthly_budget_mover(config, recipe_timezone, recipe_name, auth_write, auth_read, partner_id, budget_categories, filter_ids, excluded_ios, version, is_colab, dataset):
"""Apply the previous month's budget/spend delta to the current month. Aggregate
up the budget and spend from the previous month of each category declared
then apply the delta of the spend and budget equally to each Line Item
under that Category.
Args:
recipe_timezone (timezone) - Timezone for report dates.
recipe_name (string) - Table to write to.
auth_write (authentication) - Credentials used for writing data.
auth_read (authentication) - Credentials used for reading data.
partner_id (integer) - The sdf file types.
budget_categories (json) - A dictionary to show which IO Ids go under which Category. {"CATEGORY1":[12345,12345,12345], "CATEGORY2":[12345,12345]}
filter_ids (integer_list) - Comma separated list of filter ids for the request.
excluded_ios (integer_list) - A comma separated list of Inserion Order Ids that should be exluded from the budget calculations
version (choice) - The sdf version to be returned.
is_colab (boolean) - Are you running this in Colab? (This will store the files in Colab instead of Bigquery)
dataset (string) - Dataset that you would like your output tables to be produced in.
"""
dataset(config, {
'description':'Create a dataset where data will be combined and transfored for upload.',
'auth':auth_write,
'dataset':dataset
})
dbm(config, {
'auth':auth_read,
'report':{
'timeout':90,
'filters':{
'FILTER_ADVERTISER':{
'values':filter_ids
}
},
'body':{
'timezoneCode':recipe_timezone,
'metadata':{
'title':recipe_name,
'dataRange':'PREVIOUS_MONTH',
'format':'CSV'
},
'params':{
'type':'TYPE_GENERAL',
'groupBys':[
'FILTER_ADVERTISER_CURRENCY',
'FILTER_INSERTION_ORDER'
],
'metrics':[
'METRIC_REVENUE_ADVERTISER'
]
}
}
},
'delete':False
})
monthly_budget_mover(config, {
'auth':'user',
'is_colab':is_colab,
'report_name':recipe_name,
'budget_categories':budget_categories,
'excluded_ios':excluded_ios,
'sdf':{
'auth':'user',
'version':version,
'partner_id':partner_id,
'file_types':'INSERTION_ORDER',
'filter_type':'FILTER_TYPE_ADVERTISER_ID',
'read':{
'filter_ids':{
'single_cell':True,
'values':filter_ids
}
},
'time_partitioned_table':False,
'create_single_day_table':False,
'dataset':dataset,
'table_suffix':''
},
'out_old_sdf':{
'bigquery':{
'dataset':dataset,
'table':recipe_name,
'schema':[
],
'skip_rows':0,
'disposition':'WRITE_TRUNCATE'
},
'file':'/content/old_sdf.csv'
},
'out_new_sdf':{
'bigquery':{
'dataset':dataset,
'table':recipe_name,
'schema':[
],
'skip_rows':0,
'disposition':'WRITE_TRUNCATE'
},
'file':'/content/new_sdf.csv'
},
'out_changes':{
'bigquery':{
'dataset':dataset,
'table':recipe_name,
'schema':[
],
'skip_rows':0,
'disposition':'WRITE_TRUNCATE'
},
'file':'/content/log.csv'
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Apply the previous month's budget/spend delta to the current month. Aggregate up the budget and spend from the previous month of each category declared then apply the delta of the spend and budget equally to each Line Item under that Category.
1. No changes made can be made in DV360 from the start to the end of this process
2. Make sure there is budget information for the current and previous month's IOs in DV360
3. Make sure the provided spend report has spend data for every IO in the previous month
4. Spend report must contain 'Revenue (Adv Currency)' and 'Insertion Order ID'
5. There are no duplicate IO Ids in the categories outlined below
6. This process must be ran during the month of the budget it is updating
7. If you receive a 502 error then you must separate your jobs into two, because there is too much information being pulled in the sdf
8. Manually run this job
9. Once the job has completed go to the table for the new sdf and export to a csv
10. Take the new sdf and upload it into DV360
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-recipe_timezone", help="Timezone for report dates.", default='America/Los_Angeles')
parser.add_argument("-recipe_name", help="Table to write to.", default=None)
parser.add_argument("-auth_write", help="Credentials used for writing data.", default='service')
parser.add_argument("-auth_read", help="Credentials used for reading data.", default='user')
parser.add_argument("-partner_id", help="The sdf file types.", default=None)
parser.add_argument("-budget_categories", help="A dictionary to show which IO Ids go under which Category. {"CATEGORY1":[12345,12345,12345], "CATEGORY2":[12345,12345]}", default='{}')
parser.add_argument("-filter_ids", help="Comma separated list of filter ids for the request.", default=[])
parser.add_argument("-excluded_ios", help="A comma separated list of Inserion Order Ids that should be exluded from the budget calculations", default=None)
parser.add_argument("-version", help="The sdf version to be returned.", default='5')
parser.add_argument("-is_colab", help="Are you running this in Colab? (This will store the files in Colab instead of Bigquery)", default=True)
parser.add_argument("-dataset", help="Dataset that you would like your output tables to be produced in.", default='')
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_monthly_budget_mover(config, args.recipe_timezone, args.recipe_name, args.auth_write, args.auth_read, args.partner_id, args.budget_categories, args.filter_ids, args.excluded_ios, args.version, args.is_colab, args.dataset)
| 2.203125 | 2 |
src/python/test/cgp/test_cgp.py | konopczynski/nifty | 38 | 12758800 | <reponame>konopczynski/nifty<gh_stars>10-100
import nifty.cgp as ncgp
import nifty.graph.rag as nrag
import unittest
import nifty
import unittest
import nifty.cgp as ncgp
import numpy
numpy.random.seed(42)
class TestCgp2d(unittest.TestCase):
def test_corner_case_3x3_grid_a(self):
assertEq = self.assertEqual
# 4 one cells are active
# but still no junction
seg = [
[1,1,2],
[1,3,1],
[1,1,1]
]
seg = numpy.array(seg,dtype='uint32')
tGrid = ncgp.TopologicalGrid2D(seg)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells,[0,2,3])
tShape = tGrid.topologicalGridShape
assertEq(tShape, [5,5])
shape = tGrid.shape
assertEq(shape, [3,3])
# check the bounds
bounds = tGrid.extractCellsBounds()
bounds0 = bounds[0]
bounds1 = bounds[1]
boundedBy1 = bounds0.reverseMapping()
boundedBy2 = bounds1.reverseMapping()
assertEq(len(bounds0),0)
assertEq(len(bounds1),2)
assertEq(len(boundedBy1),2)
assertEq(len(boundedBy2),3)
# check the geometry
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
for geo in geos:
for c in [0,1,2]:
g = geo[c]
assert len(g) == numberOfCells[c]
def test_corner_case_3x3_grid_b(self):
assertEq = self.assertEqual
seg = [
[1,1,1],
[1,2,1],
[1,1,1]
]
seg = numpy.array(seg,dtype='uint32')
tGrid = ncgp.TopologicalGrid2D(seg)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells,[0,1,2])
tShape = tGrid.topologicalGridShape
assertEq(tShape, [5,5])
shape = tGrid.shape
assertEq(shape, [3,3])
# check the bounds
bounds = tGrid.extractCellsBounds()
bounds0 = bounds[0]
bounds1 = bounds[1]
boundedBy1 = bounds0.reverseMapping()
boundedBy2 = bounds1.reverseMapping()
assertEq(len(bounds0),0)
assertEq(len(bounds1),1)
assertEq(len(boundedBy1),1)
assertEq(len(boundedBy2),2)
# check the geometry
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
for geo in geos:
for c in [0,1,2]:
g = geo[c]
assert len(g) == numberOfCells[c]
def test_corner_case_3x3_grid_c(self):
assertEq = self.assertEqual
seg = [
[1,1,3],
[1,2,3],
[1,1,3]
]
seg = numpy.array(seg,dtype='uint32')
tGrid = ncgp.TopologicalGrid2D(seg)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells,[2,4,3])
tShape = tGrid.topologicalGridShape
assertEq(tShape, [5,5])
shape = tGrid.shape
assertEq(shape, [3,3])
# check the bounds
bounds = tGrid.extractCellsBounds()
bounds0 = bounds[0]
bounds1 = bounds[1]
boundedBy1 = bounds0.reverseMapping()
boundedBy2 = bounds1.reverseMapping()
assertEq(len(bounds0),2)
assertEq(len(bounds1),4)
assertEq(len(boundedBy1),4)
assertEq(len(boundedBy2),3)
# check the geometry
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
for geo in geos:
for c in [0,1,2]:
g = geo[c]
assert len(g) == numberOfCells[c]
def test_corner_case_3x3_grid_d(self):
assertEq = self.assertEqual
seg = [
[1,1,1],
[1,2,1],
[1,1,3]
]
# 01234
# --------------------
# 0 |1|1|1| 0
# 1 |-*-*-| 1
# 2 |1|2|1| 2
# 3 |-*-*-| 3
# 4 |1|1|3| 4
# ----------------------
# 01234
seg = numpy.array(seg,dtype='uint32').T
tGrid = ncgp.TopologicalGrid2D(seg)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells,[0,2,3])
tShape = tGrid.topologicalGridShape
assertEq(tShape, [5,5])
shape = tGrid.shape
assertEq(shape, [3,3])
# check the bounds
bounds = tGrid.extractCellsBounds()
bounds0 = bounds[0]
bounds1 = bounds[1]
boundedBy1 = bounds0.reverseMapping()
boundedBy2 = bounds1.reverseMapping()
assertEq(len(bounds0),0)
assertEq(len(bounds1),2)
assertEq(len(boundedBy1),2)
assertEq(len(boundedBy2),3)
# check the geometry
#print(seg)
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
for geo in geos:
for c in [0,1,2]:
g = geo[c]
assert len(g) == numberOfCells[c]
def test_randomized_big(self):
for x in range(100):
assertEq = self.assertEqual
shape = (10, 20)
size = shape[0]*shape[1]
labels = numpy.random.randint(0, 4,size=size).reshape(shape)
gg = nifty.graph.undirectedGridGraph(shape)
cc = nifty.graph.connectedComponentsFromNodeLabels(gg, labels.ravel())
cc = cc.reshape(shape) + 1
cc = numpy.require(cc, dtype='uint32')
tGrid = ncgp.TopologicalGrid2D(cc)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells[2], cc.max())
# check the bounds
bounds = tGrid.extractCellsBounds()
boundedBy = {
1:bounds[0].reverseMapping(),
2:bounds[1].reverseMapping(),
}
try:
# check the geometry
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
except:
print(cc)
import sys
sys.exit()
def test_randomized_medium(self):
for x in range(1000):
assertEq = self.assertEqual
shape = (7, 7)
size = shape[0]*shape[1]
labels = numpy.random.randint(0, 4,size=size).reshape(shape)
#print(labels)
gg = nifty.graph.undirectedGridGraph(shape)
cc = nifty.graph.connectedComponentsFromNodeLabels(gg, labels.ravel())
cc = cc.reshape(shape) + 1
cc = numpy.require(cc, dtype='uint32')
tGrid = ncgp.TopologicalGrid2D(cc)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells[2], cc.max())
# check the bounds
bounds = tGrid.extractCellsBounds()
boundedBy = {
1:bounds[0].reverseMapping(),
2:bounds[1].reverseMapping(),
}
try:
# check the geometry
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
except:
print(cc)
import sys
sys.exit()
def test_randomized_small(self):
for x in range(3000):
assertEq = self.assertEqual
shape = (4, 3)
size = shape[0]*shape[1]
labels = numpy.random.randint(1, 5,size=size).reshape(shape)
#print(labels)
gg = nifty.graph.undirectedGridGraph(shape)
cc = nifty.graph.connectedComponentsFromNodeLabels(gg, labels.ravel())
cc = cc.reshape(shape) + 1
cc = numpy.require(cc, dtype='uint32')
tGrid = ncgp.TopologicalGrid2D(cc)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells[2], cc.max())
# check the bounds
bounds = tGrid.extractCellsBounds()
boundedBy = {
1:bounds[0].reverseMapping(),
2:bounds[1].reverseMapping(),
}
try:
# check the geometry
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
except:
print(cc)
print("labels")
print(labels)
import sys
sys.exit()
if __name__ == '__main__':
unittest.main()
| 2.140625 | 2 |
src/features/overall_features/first_edited_frame.py | Sushentsev/DapStep | 1 | 12758801 | <filename>src/features/overall_features/first_edited_frame.py
from collections import defaultdict
from typing import List, Set, Dict
import numpy as np
from src.data.objects.stack import Stack
from src.features.features_base import OverallFeature
class FirstEditedFrame(OverallFeature):
def __init__(self):
super().__init__(4, "first_edited_frame")
def __call__(self, stack: Stack, user_ids: Set[int], **kwargs) -> Dict[int, List[float]]:
annotation_loader = kwargs["annotation_loader"]
first_frame = defaultdict(lambda: len(stack))
num_annotated_frames = 1
for frame_num, frame in enumerate(stack.frames, start=1):
annotation = annotation_loader(frame.raw_frame.commit_hash, frame.raw_frame.file_name)
if annotation:
num_annotated_frames += 1
for author in np.unique(annotation.author):
first_frame[author] = min(first_frame[author], frame_num)
min_edited_frame = min(first_frame[user_id] for user_id in user_ids) + 1
return {user_id: [first_frame[user_id], first_frame[user_id] / (len(stack) + 1),
first_frame[user_id] / num_annotated_frames, first_frame[user_id] / min_edited_frame]
for user_id in user_ids}
| 2.171875 | 2 |
tests/test_target.py | iksteen/dpf | 133 | 12758802 | import mock
import pytest
import pwny
def test_default_arch_x86():
with mock.patch('platform.machine') as platform_mock:
platform_mock.return_value = 'i386'
assert pwny.Target().arch is pwny.Target.Arch.x86
def test_default_arch_x86_64():
with mock.patch('platform.machine') as platform_mock:
platform_mock.return_value = 'x86_64'
assert pwny.Target().arch is pwny.Target.Arch.x86
def test_default_arch_unknown():
with mock.patch('platform.machine') as platform_mock:
platform_mock.return_value = 'unknown'
assert pwny.Target().arch is pwny.Target.Arch.unknown
def test_default_arch_32bit():
with mock.patch('platform.architecture') as platform_mock:
platform_mock.return_value = ('32bit',)
assert pwny.Target().bits is pwny.Target.Bits.bits_32
def test_default_arch_64bit():
with mock.patch('platform.architecture') as platform_mock:
platform_mock.return_value = ('64bit',)
assert pwny.Target().bits is pwny.Target.Bits.bits_64
def test_set_arch():
with mock.patch('platform.architecture') as platform_mock:
platform_mock.return_value = ('64bit',)
target = pwny.Target(arch=pwny.Target.Arch.x86)
assert target.arch is pwny.Target.Arch.x86
def test_default_endian():
assert pwny.Target().endian is pwny.Target.Endian.little
def test_set_endian():
target = pwny.Target(arch=pwny.Target.Arch.unknown, endian=pwny.Target.Endian.big)
assert target.endian is pwny.Target.Endian.big
def test_default_bits_x86():
target = pwny.Target(arch=pwny.Target.Arch.x86)
assert target.bits == 32
@pytest.mark.xfail(raises=NotImplementedError)
def test_default_bits_unsupported():
target = pwny.Target(arch=pwny.Target.Arch.unknown)
_ = target.bits
def test_set__bits():
target = pwny.Target(arch=pwny.Target.Arch.x86, bits=64)
assert target.bits == 64
@pytest.mark.xfail(raises=ValueError)
def test_set_invalid_bits():
pwny.Target(bits=33)
def test_target_assume():
target = pwny.Target()
target.assume(pwny.Target(arch=pwny.Target.Arch.arm, endian=pwny.Target.Endian.little, bits=64, mode=2))
assert target.arch is pwny.Target.Arch.arm and \
target.endian == pwny.Target.Endian.little and \
target.bits == 64 and \
target.mode == 2
| 2.203125 | 2 |
src/whylogs/features/__init__.py | cswarth/whylogs | 603 | 12758803 | <filename>src/whylogs/features/__init__.py
_IMAGE_FEATURES = ["Hue", "Brightness", "Saturation"]
| 1.296875 | 1 |
cpte/builder.py | fluendo/conan_package_tools_extender | 0 | 12758804 | import platform
import os
import sys
import yaml
from cpt.packager import ConanMultiPackager
from conans.util.files import load
from conans.errors import ConanException
from conans import tools
def _get_distro_linux():
distro_info = None
if sys.version_info >= (3, 8, 0):
import distro
distro_info = distro.linux_distribution()
else:
import platform
distro_info = platform.linux_distribution()
return distro_info
def _get_yaml_name(basename):
file_basename = os.path.join(os.getcwd(), '{}.yml'.format(basename))
os_basename = os.path.join(os.getcwd(), '{}_{}.yml'.format(basename, platform.system()))
if platform.system() == 'Windows':
if os.path.exists(os_basename):
return os_basename
if platform.system() == 'Linux':
distro_info = _get_distro_linux()
distro_basename = os.path.join(os.getcwd(), '{}_{}.yml'.format(basename, distro_info[2]))
if os.path.exists(distro_basename):
return distro_basename
if os.path.exists(os_basename):
return os_basename
return file_basename
def _load_yaml(data_path):
if not os.path.exists(data_path):
return None
try:
data = yaml.safe_load(load(data_path))
except Exception as e:
raise ConanException('Invalid yml format at {}: {}'.format(data_path, e))
return data or {}
def _load_tools(tools_info):
for item in tools_info:
file_path = os.path.join(tools_info[item]['destination'], item)
os.environ['PATH'] += os.pathsep + os.path.join(tools_info[item]['destination'])
if not os.path.exists(file_path):
tools.get(**(tools_info.get(item)))
class Builder(object):
_config_data = None
_shared_settings = None
_shared_options = None
_shared_platform = None
_remote = None
_remote_user = None
_remote_password = None
def __init__(self, file_basename):
data_path = _get_yaml_name(file_basename)
self._config_data = _load_yaml(data_path)
if not self._config_data:
raise ConanException('Invalid yml format at {}, don''t exists '.format(data_path))
if 'shared_settings' in self._config_data:
self._shared_settings = self._config_data['shared_settings']
if 'shared_setting' in self._config_data:
self._shared_settings = self._config_data['shared_setting']
if self._shared_settings:
if platform.system() in self._shared_settings:
self._shared_platform = self._shared_settings[platform.system()]
if 'options' in self._shared_settings:
self._shared_options = self._shared_settings['options']
print('Loading the {} configuration file '.format(data_path))
def system(self):
return platform.system()
def set_remote(self, remote, remote_user, remote_password):
self._remote = remote
self._remote_user = remote_user
self._remote_password = <PASSWORD>
def run(self):
if platform.system() == 'Windows' and 'windows_tools' in self._config_data:
tools_section = self._config_data['windows_tools']
if tools_section:
_load_tools(tools_section)
packages_section = self._config_data['packages']
if packages_section:
self.build_packages(packages_section)
def build_packages(self, packages_section):
for item in packages_section:
self.build_package(packages_section[item])
def build_package(self, package_section):
settings = self._shared_platform['settings']['settings']
options = {}
env_vars = {}
build = package_section['package']
build_requires = {}
local_build_type = []
if 'options' in package_section:
options = package_section['options']
else:
options = self._shared_options
if 'env_vars' in self._shared_platform:
env_vars = self._shared_platform['env_vars']
if 'build_requires' in package_section:
build_requires = package_section['build_requires']
if 'os' in package_section:
if platform.system() not in package_section['os']:
print('The {} package is not building for the {} platform'.format(
build['reference'], platform.system()))
return
if 'settings' in package_section and 'build_type' in package_section['settings']:
local_build_type = package_section['settings']['build_type']
for env_var in env_vars:
os.environ[env_var] = env_vars[env_var]
packager = ConanMultiPackager(**build, build_policy='outdated',
login_username=self._remote_user,
password=<PASSWORD>,
upload=self._remote)
for build_type in self._shared_platform['settings']['build_type']:
if len(local_build_type) == 0 or build_type in local_build_type:
settings['build_type'] = build_type
packager.add(settings.copy(), options=options, build_requires=build_requires)
packager.run()
| 2.234375 | 2 |
wikifolioPy.py | TheFakeStefan/WikifolioPy | 3 | 12758805 | <gh_stars>1-10
#WikifolioPy imports all submodules and calls them within its own methods. This is to allow a more conventient use of the bot.
from credentials import credentials
from activateSession import SessionActivator
from controlBrowser import BrowserController
from checkWikifolio import CheckWikifolio
class WikifolioPy:
'''Main class to be instantiated from, all modules are bundled into that class.
Subsequent methods are implemented and provided by the <modules>:
login, logout <controlBrowser.py>
get_cash_amount <checkAccountBalance.py>
....
'''
def __init__(self, symbol):
self.symbol = symbol
self.credentials = credentials()
self.s = SessionActivator(self.credentials).activateSession()
self.session = self.s['session']
self.connectionToken = self.s['connectionToken']
self.browserController = BrowserController(self.credentials)
self.checkWikifolio = CheckWikifolio(self.session, self.symbol)
def login(self):
self.browserController.login()
def logout(self):
self.browserController.logout()
def get_portfolio_items(self):
self.checkWikifolio.get_items()
def get_cash_amount(self):
self.checkWikifolio.check_balance()
def enter_order(self):
return None
| 2.796875 | 3 |
scripts/windows/journalist.py | CartmanORCamille/OPSVN | 0 | 12758806 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
'''
@Time : 2021/06/21 10:22:31
@Author : Camille
@Version : 1.0
'''
import logging
import os
import datetime
class BaseLogs():
"""
@logName: types_datetime
@callerPath: caller function path
"""
def __init__(self, logName, mark, callerPath='..\\'):
if not logName:
todays = datetime.date.today()
self.logName = '{}{}'.format(todays, '.log')
self.logName = logName
self.callerPath = callerPath
# The main log folder path.
# self.callerLogsPath = '{}{}'.format(self.callerPath , r'\logs')
self.callerLogsPath = os.path.join(callerPath, 'logs', mark)
# Default log name.
self.baseLogDir()
def baseLogDir(self):
"""
Complete the main log folder creation requirements.
"""
if not os.path.exists(self.callerLogsPath):
os.makedirs(self.callerLogsPath)
def subLogDir(self, subLogPath):
"""
Complete other log folder creation requirements.
"""
os.makedirs('{}{}{}'.format(self.callerPath, '\\', subLogPath))
def logHandler(self, logName=None, w_logName=None):
# Create the log.
logPath = '{}{}{}'.format(self.callerLogsPath, '\\', self.logName)
fileHandler = logging.FileHandler(logPath, 'a', encoding='utf-8')
# The logs format.
fmt = logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(module)s: %(message)s')
fileHandler.setFormatter(fmt)
# Use the log. Write to self.logName.
# Default log name: today.
if w_logName:
logger = logging.Logger(w_logName)
logger = logging.Logger(logPath)
logger.addHandler(fileHandler)
return logger
class BasicLogs(BaseLogs):
@staticmethod
def handler(mark, logName=None):
logsObj = BaseLogs(logName, mark)
return logsObj | 2.9375 | 3 |
handlers/sugarGuideHandler.py | zhuxiyulu/sugar | 2 | 12758807 | from tornado.web import RequestHandler
from tornado.web import gen
from controller import sugarGuideController
import json
# 保存糖导的结果
class AddSugarGuideResult(RequestHandler):
@gen.coroutine
def post(self):
session_id = self.get_argument('session_id')
gender = self.get_argument('gender')
age = self.get_argument('age')
height = self.get_argument('height')
weight = self.get_argument('weight')
sugarType = self.get_argument('sugarType')
diseaseAge = self.get_argument('diseaseAge')
akin = self.get_argument('akin')
fm = self.get_argument('fm')
manyDrinkWc = self.get_argument('manyDrinkWc')
posion = self.get_argument('posion')
thirsty = self.get_argument('thirsty')
visionDown = self.get_argument('visionDown')
diseaseSpeed = self.get_argument('diseaseSpeed')
verifyYear = self.get_argument('verifyYear')
cureWay = self.get_argument('cureWay')
dsPlan = self.get_argument('dsPlan')
complication = self.get_argument('complication')
data = sugarGuideController.createHealthWeekly(session_id, gender, age, height, weight,
sugarType, diseaseAge, akin, fm,
manyDrinkWc, posion, thirsty,
visionDown, diseaseSpeed, verifyYear,
cureWay, dsPlan, complication)
self.write(json.dumps(data))
# 获取健康周报
class GetHealthWeekly(RequestHandler):
@gen.coroutine
def post(self):
session_id = self.get_argument('session_id')
data = sugarGuideController.retireveHealthWeekly(session_id)
self.render('healthWeekly.html', cerealsValue=data['diet']['cerealsValue'],
cereals=data['diet']['cereals'],fruitValue=data['diet']['fruitValue'],
fruit=data['diet']['fruit'],meatValue=data['diet']['meatValue'],
meat=data['diet']['meat'],milkValue=data['diet']['milkValue'],
milk=data['diet']['milk'],fatValue=data['diet']['fatValue'],
fat=data['diet']['fat'],vegetablesValue=data['diet']['vegetablesValue'],
vegetables=data['diet']['vegetables'],
sport1=data['sport']['sport1'],sport2=data['sport']['sport2'],
sport3=data['sport']['sport3'],sport4=data['sport']['sport4'],
time1=data['sport']['time1'], time2=data['sport']['time2'],
time3=data['sport']['time3'], time4=data['sport']['time4'],
week1=data['sport']['week1'], week2=data['sport']['week2'],
week3=data['sport']['week3'], week4=data['sport']['week4'],
min1=data['control']['min1'],max1=data['control']['max1'],
min2=data['control']['min2'],max2=data['control']['max2'],
sleep1=data['control']['sleep1'],sleep2=data['control']['sleep2'],)
| 2.328125 | 2 |
tests/test_outliers/test_interactive_outlier.py | seyma-tas/human-learn | 1 | 12758808 | import pytest
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklego.datasets import load_penguins
from sklearn.pipeline import Pipeline
from sklearn.metrics import make_scorer, accuracy_score
from hulearn.preprocessing import PipeTransformer
from hulearn.outlier import InteractiveOutlierDetector
from hulearn.common import flatten
from tests.conftest import (
select_tests,
general_checks,
classifier_checks,
nonmeta_checks,
)
@pytest.mark.parametrize(
"test_fn",
select_tests(
include=flatten([general_checks, classifier_checks, nonmeta_checks]),
exclude=[
"check_estimators_pickle",
"check_estimator_sparse_data",
"check_estimators_nan_inf",
"check_pipeline_consistency",
"check_complex_data",
"check_fit2d_predict1d",
"check_methods_subset_invariance",
"check_fit1d",
"check_dict_unchanged",
"check_classifier_data_not_an_array",
"check_classifiers_one_label",
"check_classifiers_classes",
"check_classifiers_train",
"check_supervised_y_2d",
"check_supervised_y_no_nan",
"check_estimators_unfitted",
"check_estimators_dtypes",
"check_fit_score_takes_y",
"check_dtype_object",
"check_estimators_empty_data_messages",
],
),
)
def test_estimator_checks(test_fn):
"""
We're skipping a lot of tests here mainly because this model is "bespoke"
it is *not* general. Therefore a lot of assumptions are broken.
"""
clf = InteractiveOutlierDetector.from_json(
"tests/test_classification/demo-data.json"
)
test_fn(InteractiveOutlierDetector, clf)
def test_base_predict_usecase():
clf = InteractiveOutlierDetector.from_json(
"tests/test_classification/demo-data.json"
)
df = load_penguins(as_frame=True).dropna()
X, y = df.drop(columns=["species"]), df["species"]
preds = clf.fit(X, y).predict(X)
assert preds.shape[0] == df.shape[0]
def identity(x):
return x
def test_grid_predict():
clf = InteractiveOutlierDetector.from_json(
"tests/test_classification/demo-data.json"
)
pipe = Pipeline(
[
("id", PipeTransformer(identity)),
("mod", clf),
]
)
grid = GridSearchCV(
pipe,
cv=5,
param_grid={},
scoring={"acc": make_scorer(accuracy_score)},
refit="acc",
)
df = load_penguins(as_frame=True).dropna()
X = df.drop(columns=["species", "island", "sex"])
y = (np.random.random(df.shape[0]) < 0.1).astype(int)
preds = grid.fit(X, y).predict(X)
assert preds.shape[0] == df.shape[0]
def test_ignore_bad_data():
"""
There might be some "bad data" drawn. For example, when you quickly hit double-click you might
draw a line instead of a poly. Bokeh is "okeh" with it, but our point-in-poly algorithm is not.
"""
data = [
{
"chart_id": "9ec8e755-2",
"x": "bill_length_mm",
"y": "bill_depth_mm",
"polygons": {
"Adelie": {"bill_length_mm": [], "bill_depth_mm": []},
"Gentoo": {"bill_length_mm": [], "bill_depth_mm": []},
"Chinstrap": {"bill_length_mm": [], "bill_depth_mm": []},
},
},
{
"chart_id": "11640372-c",
"x": "flipper_length_mm",
"y": "body_mass_g",
"polygons": {
"Adelie": {
"flipper_length_mm": [[214.43261376806052, 256.2612913545137]],
"body_mass_g": [[3950.9482324534456, 3859.9137496948247]],
},
"Gentoo": {"flipper_length_mm": [], "body_mass_g": []},
"Chinstrap": {"flipper_length_mm": [], "body_mass_g": []},
},
},
]
clf = InteractiveOutlierDetector(json_desc=data)
assert len(list(clf.poly_data)) == 0
| 2.078125 | 2 |
accounting/accounting/doctype/gl_entry/test_gl_entry.py | ChillarAnand/accounting | 2 | 12758809 | # Copyright (c) 2021, ac and Contributors
# See license.txt
import frappe
import unittest
from accounting.accounting.doctype.sales_invoice.test_sales_invoice import TestSalesInvoice
class TestGLEntry(unittest.TestCase):
def setUp(self) -> None:
self.doctype = 'GL Entry'
def test_gl_entries_for_sales_invoice(self):
gl_entry_count = frappe.db.count(self.doctype)
invoice = TestSalesInvoice.create_sales_invoice('Frappe', 'Laptop', 2)
invoice.submit()
new_gl_entry_count = frappe.db.count('GL Entry')
assert new_gl_entry_count == gl_entry_count + 2
last_gl_entry = frappe.get_last_doc('GL Entry')
assert not last_gl_entry.is_cancelled
assert last_gl_entry.credit == invoice.total_amount
def test_reverse_gl_entries_for_sales_invoice(self):
gl_entry_count = frappe.db.count('GL Entry')
invoice = TestSalesInvoice.create_sales_invoice('Frappe', 'Laptop', 2)
invoice.submit()
invoice.cancel()
new_gl_entry_count = frappe.db.count('GL Entry')
assert new_gl_entry_count == gl_entry_count + 4
last_gl_entry = frappe.get_last_doc('GL Entry')
assert last_gl_entry.is_cancelled
assert last_gl_entry.credit == invoice.total_amount
| 2.34375 | 2 |
py2diagrams/parsers/base.py | RomAviad/py2diagrams | 1 | 12758810 | <reponame>RomAviad/py2diagrams<filename>py2diagrams/parsers/base.py<gh_stars>1-10
class BaseAnalyzer(object):
def __init__(self, base_node):
self.base_node = base_node
def analyze(self):
raise NotImplementedError() | 2.15625 | 2 |
tw2gif/tw2gif.py | pablo-moreno/tw2gif | 0 | 12758811 | <gh_stars>0
"""
Tw2Gif: Tweet gif downloader using FFMPEG
"""
import os
import json
import twitter
import requests
import subprocess
import re
from pathlib import Path
from tw2gif.settings import (
CONSUMER_KEY, CONSUMER_SECRET,
ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET,
FFMPEG_SCRIPT
)
class Tw2Gif:
def __init__(self):
self.api = twitter.Api(
consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
access_token_key=ACCESS_TOKEN_KEY,
access_token_secret=ACCESS_TOKEN_SECRET
)
def get_video_url(self, tweet):
url = ""
try:
url = tweet['media'][0]['video_info']['variants'][0]['url']
except:
pass
return url
def get_tweet_id(self, url):
return re.split(r'\/', url)[-1]
def download_gif(self, url, path='./'):
"""
Parses tweet to get mp4 video url and transforms it using FFMPEG script
"""
print('Downloading gif...')
tweet_id = self.get_tweet_id(url)
tweet = self.api.GetStatus(tweet_id).AsDict()
url = self.get_video_url(tweet)
if not url:
raise Exception('No video URL found in this tweet :(')
req = requests.get(url)
mp4_file = "aux-{}.mp4".format(tweet_id)
gif_file = os.path.join(path, "{}.gif".format(tweet_id))
if req.status_code == 200:
with open(mp4_file, 'wb') as f:
f.write(req.content)
result = subprocess.call([FFMPEG_SCRIPT, mp4_file, gif_file])
if result == 0:
print('Gif succesfully created')
else:
print('Ups, something went wrong :(')
| 2.953125 | 3 |
flappy-bird.py | FernandaMakiHirose/flappy-bird | 3 | 12758812 | <reponame>FernandaMakiHirose/flappy-bird
# no python console digite: pip install pygame
# biblioteca para criar jogos
import pygame
# biblioteca permite integrar o código com os arquivos do computador
import os
# biblioteca que gera números aleatórios no python
import random
TELA_LARGURA = 500
TELA_ALTURA = 800
# ygame.transform.scale2x() -> aumenta a imagem para a imagem não ficar muito pequena
# pygame.image.load() -> vai pegar uma foto
# os.path.join() -> vai entrar na pasta que tem o arquivo
IMAGEM_CANO = pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'pipe.png')))
IMAGEM_CHAO = pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'base.png')))
IMAGEM_BACKGROUND = pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bg.png')))
# a imagem do pássaro vai ser uma lista de imagens porque ele vai aparecer com as asas de diferentes ângulos
IMAGENS_PASSARO = [
pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bird1.png'))),
pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bird2.png'))),
pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bird3.png'))),
]
# vai marcar a pontuação
# inicializando a fonte
pygame.font.init()
# é necessário passar a família da fonte e o tamanho
FONTE_PONTOS = pygame.font.SysFont('arial', 50)
class Passaro:
IMGS = IMAGENS_PASSARO
# animações da rotação
ROTACAO_MAXIMA = 25
VELOCIDADE_ROTACAO = 20
TEMPO_ANIMACAO = 5
def __init__(self, x, y):
self.x = x
self.y = y
self.angulo = 0
self.velocidade = 0
self.altura = self.y
self.tempo = 0
self.contagem_imagem = 0
# pega a primeira imagem do IMGS
self.imagem = self.IMGS[0]
def pular(self):
self.velocidade = -10.5
self.tempo = 0
self.altura = self.y
def mover(self):
# calcular o deslocamento
self.tempo += 1
# é a fóruma so sorvetão, a única coisa que vai mudar na fórmula é o número '1.5' podendo ser substituido por outro
deslocamento = 1.5 * (self.tempo**2) + self.velocidade * self.tempo
# restringir o deslocamento
# se o deslocamento for maior que 16 px
if deslocamento > 16:
# o deslocamento vai ser 16, porque esse é o limite que o pássado pode se deslocar
deslocamento = 16
elif deslocamento < 0:
# vai fazer o pássaro voar um pouco mais para cima
deslocamento -= 2
self.y += deslocamento
# o ângulo do passaro
# se a posição y tiver abaixo da altura o pássaro vai começar a ficar inclinado e a cair
if deslocamento < 0 or self.y < (self.altura + 50):
# se o pássaro não estiver virado para cima
if self.angulo < self.ROTACAO_MAXIMA:
# vai colocar ele virado pra cima
self.angulo = self.ROTACAO_MAXIMA
else:
# se o ângulo do pássaro for maior que -90
if self.angulo > -90:
# ele vai ser rotacionado para baixo
self.angulo -= self.VELOCIDADE_ROTACAO
def desenhar(self, tela):
# definir qual imagem do passaro vai usar
# vai mudar a imagem quando a contagem da imagem passar do tempo de animação
self.contagem_imagem += 1
# se a contagem for menor que o tempo de animação
if self.contagem_imagem < self.TEMPO_ANIMACAO:
# vai mostrar a primeira imagem
self.imagem = self.IMGS[0]
# se a contagem for menor que o tempo de animação * 2
elif self.contagem_imagem < self.TEMPO_ANIMACAO*2:
# vai mostrar a segunda imagem
self.imagem = self.IMGS[1]
# se a contagem for menor que o tempo de animação * 3
elif self.contagem_imagem < self.TEMPO_ANIMACAO*3:
# vai mostrar a terceira imagem
self.imagem = self.IMGS[2]
# se a contagem for menor que o tempo de animação * 4
elif self.contagem_imagem < self.TEMPO_ANIMACAO*4:
# vai mostrar a segunda imagem
self.imagem = self.IMGS[1]
# se a contagem for menor que o tempo de animação * 4 + 1
elif self.contagem_imagem >= self.TEMPO_ANIMACAO*4 + 1:
# vai mostrar a primeira imagem
self.imagem = self.IMGS[0]
# e vai zerar a contagem e começa a fazer esse processo desde o início
self.contagem_imagem = 0
# se o passaro tiver caindo eu não vou bater asa
if self.angulo <= -80:
# vai mostrar a segunda imagem
self.imagem = self.IMGS[1]
# a próxima batida de asa dele vai ser uma batida para baixo
self.contagem_imagem = self.TEMPO_ANIMACAO*2
# desenhar a imagem rotacionada
imagem_rotacionada = pygame.transform.rotate(self.imagem, self.angulo)
# vai desenhar um retângulo em volta da imagem e vai colar na tela
# desenhando o centro da imagem
pos_centro_imagem = self.imagem.get_rect(topleft=(self.x, self.y)).center
# retângulo da imagem
retangulo = imagem_rotacionada.get_rect(center=pos_centro_imagem)
# vai desenhar na tela a imagem na posição do topo a esquerda
tela.blit(imagem_rotacionada, retangulo.topleft)
# vai pegar a máscara do python para futuramente ver se o pássaro colidiu em alguma superfície
def get_mask(self):
return pygame.mask.from_surface(self.imagem)
class Cano:
# a distância entre um cano e outro
DISTANCIA = 250
# velocidade do cano
VELOCIDADE = 5
def __init__(self, x):
self.x = x
self.altura = 0
self.pos_topo = 0
self.pos_base = 0
# a imagem do cano do topo é apenas a imagem do cano porém de ponta cabeça
self.CANO_TOPO = pygame.transform.flip(IMAGEM_CANO, False, True)
# a imagem do cano da base é apenas a imagem do cano
self.CANO_BASE = IMAGEM_CANO
# se passou vai ficar True
self.passou = False
# essa função vai gerar o valor que vai ser a altura do cano
self.definir_altura()
def definir_altura(self):
# gerando um valor aleatório para a altura com valores específicos para os canos não ficarem com uma distância muito longa
self.altura = random.randrange(50, 450)
# a posição topo é para cima
self.pos_topo = self.altura - self.CANO_TOPO.get_height()
# a posição base é para baixo
self.pos_base = self.altura + self.DISTANCIA
def mover(self):
# o cano se movimenta para o lado, então precisa tirar um valor da posição 'x' dele
self.x -= self.VELOCIDADE
def desenhar(self, tela):
# o .blit desenha o cano, desenhando o cano de cima e o cano de baixo
tela.blit(self.CANO_TOPO, (self.x, self.pos_topo))
tela.blit(self.CANO_BASE, (self.x, self.pos_base))
def colidir(self, passaro):
# verificando qual pássaro vai colidir
passaro_mask = passaro.get_mask()
# pegando a máscara do topo e da base
topo_mask = pygame.mask.from_surface(self.CANO_TOPO)
base_mask = pygame.mask.from_surface(self.CANO_BASE)
# para verificar se tem colisão é necessário pegar a distância do topo e da base
# pegando a distância do eixo x e do eixo y e é necessário arredondar os números para serem números inteiros
distancia_topo = (self.x - passaro.x, self.pos_topo - round(passaro.y))
distancia_base = (self.x - passaro.x, self.pos_base - round(passaro.y))
# agora é só verificar se colidiu, o overlap verifica o ponto de colisão, ele verifica se existe 2 pixels iguais
topo_ponto = passaro_mask.overlap(topo_mask, distancia_topo)
base_ponto = passaro_mask.overlap(base_mask, distancia_base)
# se um desses for verdadeiro é porque teve colisão, caso contrário não teve colisão
if base_ponto or topo_ponto:
return True
else:
return False
class Chao:
VELOCIDADE = 5
LARGURA = IMAGEM_CHAO.get_width() # pegando a largura do chão
IMAGEM = IMAGEM_CHAO
def __init__(self, y):
self.y = y
# x1 = chão 1, x2 = chão 2
self.x1 = 0
self.x2 = self.LARGURA
def mover(self):
self.x1 -= self.VELOCIDADE
self.x2 -= self.VELOCIDADE
# se sair da tela 1 vai adicionar outro chão ao lado do primeiro chão
if self.x1 + self.LARGURA < 0:
self.x1 = self.x2 + self.LARGURA
# se sair da tela 2 vai adicionar outro chão ao lado do segundo chão
if self.x2 + self.LARGURA < 0:
self.x2 = self.x1 + self.LARGURA
def desenhar(self, tela):
# vai desenhar o chão
tela.blit(self.IMAGEM, (self.x1, self.y))
tela.blit(self.IMAGEM, (self.x2, self.y))
# função que vai desenhar o jogo
def desenhar_tela(tela, passaros, canos, chao, pontos):
# desenhando o fundo da tela na posição 0
tela.blit(IMAGEM_BACKGROUND, (0, 0))
# para cada pássaro na lista de pássaros
for passaro in passaros:
# vai desenhar o pássaro na tela
passaro.desenhar(tela)
# para cada cano em canos
for cano in canos:
# vai desenhar o cano na tela
cano.desenhar(tela)
# colocando o texto dentro da tela com um texto branco
texto = FONTE_PONTOS.render(f"{pontos}", 1, (255, 255, 255))
# desenhando o texto no canto superior da tela
tela.blit(texto, (TELA_LARGURA - 10 - texto.get_width(), 10))
# vai desenhar o chão
chao.desenhar(tela)
# vai atualizar a tela
pygame.display.update()
# função que vai executar o jogo (função principal)
def main():
# passando os valores que irão aparecer na tela
passaros = [Passaro(230, 350)]
chao = Chao(730)
canos = [Cano(700)]
tela = pygame.display.set_mode((TELA_LARGURA, TELA_ALTURA))
pontos = 0
# ele vai atualizar a tela
relogio = pygame.time.Clock()
# o jogo é um loop infinito, enquanto o jogo estiver rodando ele vai ser executado
rodando = True
while rodando:
# ele vai atualizar 30 frames por segundo
relogio.tick(30)
# interação com o usuário
# vai pegar a lista de eventos
for evento in pygame.event.get():
# se fechar a tela vai sair do jogo
if evento.type == pygame.QUIT:
rodando = False
pygame.quit()
quit()
# se apertar a barra de espaço vai pular
if evento.type == pygame.KEYDOWN:
if evento.key == pygame.K_SPACE:
for passaro in passaros:
passaro.pular()
# mover o pássaro e o chão
for passaro in passaros:
passaro.mover()
chao.mover()
# variável auxiliar por padrão sendo falsa
adicionar_cano = False
remover_canos = []
# para cada cano vai verificar se o cano bateu no pássaro
for cano in canos:
for i, passaro in enumerate(passaros):
# se o cano bateu com o pássaro vai retirar o pássaro da lista
if cano.colidir(passaro):
passaros.pop(i)
# se o pássaro passou do cano e se o 'x' do pássaro for maior que o 'x' do cano
if not cano.passou and passaro.x > cano.x:
# significa que o pássaro passou
cano.passou = True
adicionar_cano = True
# movimentando o cano
cano.mover()
# se o cano já saiu da tela
if cano.x + cano.CANO_TOPO.get_width() < 0:
# vai adicionar ele na lista de remover os canos
remover_canos.append(cano)
# não removemos o cano direto, porque se for remover um cano enquanto percorre a lista de canos pode dar um problema
# o usuário vai ganhar um ponto se adicionar_cano for verdadeiro
if adicionar_cano:
pontos += 1
canos.append(Cano(600))
# para cada canos em remover_canos vai remover o cano
for cano in remover_canos:
canos.remove(cano)
for i, passaro in enumerate(passaros):
# se a posição do eixo y do pássado + altura do pássaro for maior do que a posição do chão
if (passaro.y + passaro.imagem.get_height()) > chao.y or passaro.y < 0:
# vai excluir o pássaro que estiver na posição i
passaros.pop(i)
desenhar_tela(tela, passaros, canos, chao, pontos)
# se é um arquivo que vai ser executado manualmente vai rodar o que estiver dentro, caso contrário não vai rodar o que estiver dentro
if __name__ == '__main__':
main()
| 3.25 | 3 |
src/encoded/tests/fixtures/schemas/award.py | procha2/encoded | 102 | 12758813 | import pytest
@pytest.fixture
def ENCODE3_award(testapp):
item = {
'name': 'ABC1234',
'rfa': 'ENCODE3',
'project': 'ENCODE',
'title': 'A Generic ENCODE3 Award'
}
return testapp.post_json('/award', item, status=201).json['@graph'][0]
@pytest.fixture
def award_a():
return{
'name': 'ENCODE2',
}
@pytest.fixture
def award_1(award_a):
item = award_a.copy()
item.update({
'schema_version': '1',
'rfa': "ENCODE2"
})
return item
@pytest.fixture
def award_2(award_1):
item = award_1.copy()
item.update({
'schema_version': '3',
'viewing_group': 'ENCODE',
})
return item
@pytest.fixture
def award_5(award_2):
item = award_2.copy()
item.update({
'schema_version': '6',
'viewing_group': 'ENCODE',
})
return item
@pytest.fixture
def award(testapp):
item = {
'name': 'encode3-award',
'rfa': 'ENCODE3',
'project': 'ENCODE',
'title': 'A Generic ENCODE3 Award',
'viewing_group': 'ENCODE3',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def award_modERN(testapp):
item = {
'name': 'modERN-award',
'rfa': 'modERN',
'project': 'modERN',
'title': 'A Generic modERN Award',
'viewing_group': 'ENCODE3',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def remc_award(testapp):
item = {
'name': 'remc-award',
'rfa': 'GGR',
'project': 'GGR',
'title': 'A Generic REMC Award',
'viewing_group': 'REMC',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def encode2_award(testapp):
item = {
# upgrade/shared.py ENCODE2_AWARDS
'uuid': '1a4d6443-8e29-4b4a-99dd-f93e72d42418',
'name': 'encode2-award',
'rfa': 'ENCODE2',
'project': 'ENCODE',
'title': 'A Generic ENCODE2 Award',
'viewing_group': 'ENCODE3',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def encode4_award(testapp):
item = {
'name': 'encode4-award',
'rfa': 'ENCODE4',
'project': 'ENCODE',
'title': 'A Generic ENCODE4 Award',
'viewing_group': 'ENCODE4',
'component': 'mapping',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def award_encode4(testapp):
item = {
'name': 'encode4-award',
'rfa': 'ENCODE4',
'project': 'ENCODE',
'title': 'A Generic ENCODE4 Award',
'viewing_group': 'ENCODE4',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def roadmap_award(testapp):
item = {
'name': 'roadmap-award',
'rfa': 'Roadmap',
'project': 'Roadmap',
'title': 'A Generic Roadmap Award',
'viewing_group': 'REMC',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def award_8(award_1):
item = award_1.copy()
item.update({
'schema_version': '8',
'viewing_group': 'ENCODE',
})
return item
| 1.945313 | 2 |
discord/ext/timers/__init__.py | iagolirapasssos/discord-timers | 9 | 12758814 | <filename>discord/ext/timers/__init__.py<gh_stars>1-10
# -*- coding: utf-8 -*-
__author__ = "Lorenzo"
__copyright__ = "Copyright 2019 Lorenzo"
__docformat__ = "restructuredtext en"
__license__ = "MIT"
__title__ = "discord-timers"
__version__ = "0.1.0"
from .timers import Timer, TimerManager
| 1.492188 | 1 |
commands/werewolf.py | j4p/JeBB | 7 | 12758815 | # -*- coding: utf-8 -*-
from line2.models.command import ContinuousHybridCommand, Parameter, ParameterType, CommandResult, CommandResultType, CommandContinuousCallType
from line2.utils import IsEmpty, AddReverseDict, Lock, AddAtExit, DelAtExit, Acquire
from line2.models.messages import Buttons
from time import time, sleep
from threading import Timer, Condition
from random import randint, choice, shuffle
class RoomPhase:
idling=0
waiting=1
starting = 2
night=3
day=4
lynchVote=5
hunter=6
done = 7
toString = {
0:'idling',
1:'waiting',
2:'starting',
3:'night',
4:'day',
5:'lynchVote',
6:'hunter',
7:'done'
}
class ActionPhase:
none=0
night=RoomPhase.night
day=RoomPhase.day
hunter=RoomPhase.hunter
firstNight=7
anyday=8
class Role(object):
none=None
villager=None
werewolf=None
drunk=None
seer=None
harlot=None
beholder=None
gunner=None
traitor=None
guardianAngel=None
cursed=None
detective=None
apprenticeSeer=None
cultist=None
cultistHunter=None
wildChild=None
fool=None
mason=None
doppelganger=None
cupid=None
hunter=None
serialKiller=None
tanner=None
mayor=None
prince=None
sorcerer=None
clumsy=None
blacksmith=None
alphaWolf=None
wolfCub=None
werewolves = []
unconvertible = []
visitorKillers = []
needRoleModel = []
rolesById={}
rolesByName={}
validRoles = []
seers = []
def __init__(self, id, name, team, actionPhase, initMsg):
self.id = id
if id:
Role.validRoles.append(self)
self.name = name
self.team = team
self.actionPhase = actionPhase
self.initMsg = initMsg
Role.rolesById[id] = self
Role.rolesByName[name] = self
def __eq__(self, rhs):
return isinstance(rhs, Role) and rhs.id == self.id
def __ne__(self, rhs):
return not self.__eq__(rhs)
class Team:
none=0
village=1
villager=1
villagers=1
werewolf=2
werewolves=2
cult=3
cultist=3
cultists=3
doppelganger=4
serialKiller=5
tanner=6
independant=7
toString={
0:"None",
1:"Villager",
2:"Werewolf",
3:"Cult",
4:"Doppelganger",
5:"Serial Killer",
6:"Tanner",
7:"Independant"
}
seerLine = "You're the Seer! Every night you can choose to look into someone's role."
Role.none = Role(0, "None", Team.none, ActionPhase.none, "None")
Role.villager = Role(1, "Villager", Team.villager, ActionPhase.none, "You're a Villager. Go plow some field you ugly")
Role.werewolf = Role(2, "Werewolf", Team.werewolf, ActionPhase.night, "You're an awoo")
Role.drunk = Role(3, "Drunk", Team.villager, ActionPhase.none, "You're the Drunk. Werewolves will go drunk and skip one turn if they eat you")
Role.seer = Role(4, "Seer", Team.villager, ActionPhase.night, seerLine)
Role.harlot = Role(5, "Harlot", Team.villager, ActionPhase.night, "You're the Harlot(slut). Every night, you can choose to sneak into someone's bed. If a werewolf tries to kill you, you'll be safe cuz ur not home. However if you sneak into a werewolf's house, you're dead meat. Also, if a werewolf kills the house owner which you snucked into, you'll also be killed.")
Role.beholder = Role(6, "Beholder", Team.villager, ActionPhase.none, "It's nothing special. It's just that you know who the Seer is.")
Role.gunner = Role(7, "Gunner", Team.villager, ActionPhase.day, "You're the Gunner. Every day, you can choose to shoot someone. Your shooting will be revealed to all, as well as your role. You only have two bullets.")
Role.traitor = Role(8, "Traitor", Team.werewolf, ActionPhase.none, "You're a Traitor. You're on the werewolves' side. You will turn into a werewolf if all werewolves die.")
Role.guardianAngel = Role(9, "Guardian Angel", Team.villager, ActionPhase.night, "Your Guardian ANgle. Every night, you can choose to protect someone's house from Werewolves. You can't protect yours, though.")
Role.cursed = Role(10, "Cursed", Team.werewolf, ActionPhase.none, "You're the Cursed. If a werewolf tries to kill you, you will turn into one! Until then, you're on their side from the beginning, though. The Seer will see you as a Werewolf even when you haven't transformed yet.")
Role.detective = Role(11, "Detective", Team.villager, ActionPhase.day, "You're a Detective. Every day, you can choose to investigate someone. However, there are 40% chance the werewolves will notice.")
Role.apprenticeSeer = Role(12, "Apprentice Seer", Team.villager, ActionPhase.none, "You're an Apprentice Seer. If the Seer dies, you carry on his duty")
Role.cultist = Role(13, "Cultist", Team.cultist, ActionPhase.night, "You're a Cultist. You can invite someone over to your cult. If at the end of the game everyone is a cult member, the cult wins")
Role.cultistHunter = Role(14, "<NAME>", Team.villager, ActionPhase.night, "You're the Cultist Hunter. If a cultist tries to invite you, their cult's newest member will die. Every night, you can choose to hunt someone. If he's a cultist, he will die")
Role.wildChild = Role(15, "Wild Child", Team.villager, ActionPhase.firstNight, "You're a Wild Child. You can choose someone to be your role model. If he dies, you'll turn into a Werewolf.")
Role.fool = Role(16, "Fool", Team.villager, ActionPhase.night, seerLine)
Role.mason = Role(17, "Mason", Team.villager, ActionPhase.none, "You're a Mason. All Mason knows all Masons")
Role.doppelganger = Role(18, "Doppelganger", Team.doppelganger, ActionPhase.firstNight, "Your ancestors had the ability to metamorph into others... while you don't have their full abilities, you can pick a player at the start of the game. If that player dies, you will become what they were.\nNote: If they were Wild Child and their role model died, you will become a wolf. Otherwise, you will inherit their role model.\nThe cult cannot convert the Doppelgänger (but can after the doppelganger switches roles). Also - the Doppelgänger can NOT win unless they have transformed. If at the end of the game, the Doppelgänger is still the same, they lose (exception: lover)")
Role.cupid = Role(19, "Cupid", Team.villager, ActionPhase.firstNight, "Love is in the air. As Cupid, you will choose two players at the start of the game. These two players will become madly in love! If one of them dies, the other will die of sorrow :(\nNote: Lovers will know who each other are, but not be told their roles. If the lovers are the last two alive, they win, regardless of teams. If the lovers are on different teams (villager + wolf), and one team wins (wolf), the lover (villager) wins as well. TL;DR if at least one of the lovers was on the winning team, they both win together.")
Role.hunter = Role(20, "Hunter", Team.villager, ActionPhase.hunter, "A trigger happy, vindictive player. As the hunter, you try to keep to yourself. However, when others come to visit you, they may find themselves dead, as your paranoia takes hold and you shoot. If the wolves attack you, you have a chance to take one of them with you. Otherwise, if you die, you will get a chance to shoot someone as you die.\nNote: For wolf attacks, the chance starts at 30%. If there is one wolf, the hunter has a 30% chance to kill the wolf (and survive). For each additional wolf, add 20% (2 wolves = 50%, 3 = 70%, etc). However - if there are multiple wolves, while you may kill one of them, you will still be outnumbered and killed.\nIf the cult tries to convert you, they have a 50% chance to fail. If they fail, you have a 50% chance to kill one of them!")
Role.serialKiller = Role(21, "Serial Killer", Team.serialKiller, ActionPhase.night, "That asylum was silly anyways. What a joke. You are free now however, back to business as usual - killing! The serial killer is a lone player, on their own team. They can win only if they are the last player alive (exception: lovers). As the serial killer, you can kill ANYONE - wolves, hunters, gunners, guardian angels, whatever. If the wolves try to attack you, you will kill one of them (random), and live.")
Role.tanner = Role(22, "Tanner", Team.tanner, ActionPhase.none, "The Tanners goal is simple: Get Lynched. If the Tanner gets lynched, he wins, period. Everyone else loses.")
Role.mayor = Role(23, "Mayor", Team.villager, ActionPhase.anyday, "As mayor, you are a lowly villager, until you reveal yourself. Then you are given twice the vote count for lynching (meaning that your vote is twice as powerful as everyone else's). Use that power wisely to help the Village Team.")
Role.prince = Role(24, "Prince", Team.villager, ActionPhase.none, "Once the prince gets lynched, their role as Prince is revealed, and they survive. However, this can only happen once: if the village insists on lynching them, they will die.")
Role.sorcerer = Role(25, "Sorcerer", Team.werewolf, ActionPhase.night, "Do you remember the good old seer? Well now, it has its Wolf Team counterpart. The sorcerer is the Wuff's Seer. They can only see if someone is Wolf or Seer, and they win with the Wolves.")
Role.clumsy = Role(26, "Clumsy", Team.villager, ActionPhase.none, "You are the Clumsy Guy. Maybe you should not have had so much alcohol for breakfast. You can't see a damn thing. Can you even vote for the person you want to? (You have a 50% chance to vote for someone random.)")
Role.blacksmith = Role(27, "Blacksmith", Team.villager, ActionPhase.anyday, "You are the BlackSmith. Through the years, no blades nor swords gave you as much satisfaction as the Silver Blades the elves ordered.\nYou might have some silverdust left. Who knows ? It might *prevent Wolves from eating tonight*")
Role.alphaWolf = Role(28, "Alpha Wolf", Team.werewolf, ActionPhase.night, "You are the Alpha Wolf, the origin of the curse, the bane of banes. Every night, there's 20% chance that you will bite your pack's meal, and they will join your ranks instead of dying!")
Role.wolfCub = Role(29, "Wolf Cub", Team.werewolf, ActionPhase.night, "What a cuuuute little wuff. _tickles tickles_ -cough cough- As i was saying, you are the Wolf Cub and you _drops the mic_ -I just can't resist that. I think if anyone killed you, I'd give the wuffs two victims. You're too cute to die. I wouldn't be able to tickle you anymore-")
Role.werewolves.append(Role.werewolf)
Role.werewolves.append(Role.alphaWolf)
Role.werewolves.append(Role.wolfCub)
Role.unconvertible.extend(Role.werewolves)
Role.unconvertible.append(Role.doppelganger)
Role.unconvertible.append(Role.serialKiller)
Role.unconvertible.append(Role.cultistHunter)
Role.visitorKillers.extend(Role.werewolves)
Role.visitorKillers.append(Role.hunter)
Role.visitorKillers.append(Role.serialKiller)
Role.needRoleModel.append(Role.wildChild)
Role.needRoleModel.append(Role.doppelganger)
Role.seers.append(Role.seer)
Role.seers.append(Role.fool)
Role.seers.append(Role.sorcerer)
class Alive:
notPlaying = None
dead=False
alive=True
toString={
None:'not playing',
False:'dead',
True:'alive'
}
class Player(object):
def __init__(self, obj, room):
with room.lock:
self.lock = Lock()
with self.lock:
self._1role = None
self.obj = obj
self.room = room
self.room.lastPlayerId+=1
self.id = self.room.lastPlayerId
room.players.append(self)
room.playersById[self.id] = self
room.playersByObj[obj] = self
self.room.playersByObj[obj] = self
rObj = self.obj.rObj
self.originalRole = None
self.alive = Alive.notPlaying
self.lover = None
self.ammo = 0
self.protection=0
self.drunk=False
self.dayLastSeen=0
self.canAct=False
self.houseOwner = self
self.harlot = None
self.cultistId = 0
self.killerRole = None
self.mayorRevealed = False
self.princeRevealed = False
self.master = None
self.apprentices = []
self.freeloader = None
self.done = False
self.dayRoleSet = 0
self.getRole = None
self.kill = None
def Remove(self):
with Acquire(self.lock, self.room.lock):
if self.room.lastPlayerId - self.id == 1:
self.room.lastPlayerId-=1
if self in self.room.players:
self.room.players.remove(self)
if self.id in self.room.playersById:
del self.room.playersById[self.id]
if self.obj in self.room.playersByObj:
del self.room.playersByObj[self.obj]
return CommandResult.Done()
@property
def role(self):
return self._1role
def GetTeamNames(self, group):
group = list(group)
if len(group) == 0:
return ''
name = group[0].role.name
if self in group:
group.remove(self)
groupLen = len(group)
if groupLen < 1:
return "You are a lone %s" % name
elif groupLen == 1:
return "You and %s are %ss" % (group[0].name, name)
else:
return "You, %s and %s are %ss" % (', '.join([x.name for x in group[:groupLen-1]]), group[-1].name, name)
else:
groupLen = len(group)
if groupLen < 1:
return "There is no %s" % name
elif groupLen == 1:
return "%s is a lone %s" % (group[0].name, name)
elif groupLen == 2:
return "%s and %s are %ss" % (group[0].name, group[1].name, name)
else:
return "%s and %s are %ss" % (', '.join([x.name for x in group[:groupLen-1]]), group[-1].name, name)
def SendTeamNames(self, group):
return self.SendText(self.GetTeamNames(group))
@role.setter
def role(self, value):
if value == self.role:
return
if self._1role:
if self._1role == Role.alphaWolf or self._1role == Role.wolfCub:
if self in self.room.playersByRole[Role.werewolf]:
self.room.playersByRole[Role.werewolf].remove(self)
if self in self.room.playersByRole[self._1role]:
self.room.playersByRole[self._1role].remove(self)
self._1role = value
if self._1role not in self.room.playersByRole:
self.room.playersByRole[self._1role] = []
self.room.playersByRole[self._1role].append(self)
if not value:
return
if value == Role.cultist:
with self.room.lock, self.lock:
self.room.lastCultistId += 1
self.cultistId = self.room.lastCultistId
self.room.cultists.append(self)
if not self.room.hasCultist:
self.room.hasCultist = True
if Role.cultist.id not in self.room.votes:
self.room.votes[Role.cultist.id] = Vote(self.room)
elif value == Role.gunner:
self.ammo = 2
elif value == Role.harlot:
self.room.harlots.append(self)
elif value == Role.beholder:
self.room.beholders.append(self)
elif value in Role.werewolves:
self.room.werewolves.append(self)
self.room.hasWerewolf = True
elif value == Role.seer:
self.room.seers.append(self)
elif value == Role.mason:
self.room.masons.append(self)
elif value == Role.cupid:
self.room.lovers[self] = []
elif value == Role.traitor:
self.room.traitors.append(self)
elif value == Role.apprenticeSeer:
self.room.apprenticeSeers.append(self)
elif value == Role.guardianAngel:
self.room.guardianAngels.append(self)
self.dayRoleSet = self.room.day
if self.room.realPhase == RoomPhase.night:
self.dayRoleSet += 1
def Tell(self, to):
s = "%s is now a %s" % (self.name, self.role.name)
for x in to:
if x != self:
x.SendText(s)
def TellBeholders(self):
self.Tell(self.room.beholders)
self.room.shouldTellBeholders = True
def TellWerewolves(self):
self.Tell(self.room.werewolves)
self.room.shouldTellWerewolves = True
def TellMasons(self):
self.Tell(self.room.masons)
self.room.shouldTellMasons = True
def TellCultists(self):
self.Tell(self.room.cultists)
self.room.shouldTellCultists = True
def TryTell(self, to):
if self.room.phase > RoomPhase.starting:
self.Tell(to)
return True
def TryTellBeholders(self):
self.TryTell(self.room.beholders)
self.room.shouldTellBeholders=True
def TryTellWerewolves(self):
self.TryTell(self.room.werewolves)
self.room.shouldTellWerewolves=True
def TryTellMasons(self):
self.TryTell(self.room.masons)
self.room.shouldTellMasons=True
def TryTellCultists(self):
self.TryTell(self.room.cultists)
self.room.shouldTellCultists=True
def Die(self, killerRole=Role.none):
print("DIE")
if self.alive == Alive.alive:
print("WAS ALIVE")
self.alive = Alive.dead
self.killerRole = killerRole
self.room.playersByRole[self.role].remove(self)
with self.room.lock:
if self in self.room.alives:
self.room.alives.remove(self)
if self not in self.room.deads:
self.room.deads.append(self)
if self.role == Role.cultist:
with self.room.lock:
if self in self.room.cultists:
self.room.cultists.remove(self)
if len(self.room.cultists) == 0:
self.room.hasCultist = False
elif self.role == Role.hunter:
self.room.deadHunters.append(self)
elif self.role == Role.harlot:
with self.room.lock:
if self in self.room.harlots:
self.room.harlots.remove(self)
elif self.role in Role.werewolves:
with self.room.lock:
if self in self.room.werewolves:
self.room.werewolves.remove(self)
if len(self.room.werewolves) == 0:
if len(self.room.traitors) == 0:
self.room.hasWerewolf = False
else:
for x in self.room.traitors:
x.Inherit(self)
elif self.role == Role.seer:
with self.room.lock:
if self in self.room.seers:
self.room.seers.remove(self)
if len(self.room.seers) == 0:
for x in self.room.apprenticeSeers:
x.Inherit(self)
self.room.shouldTellBeholders = True
elif self.role == Role.mason:
with self.room.lock:
if self in self.room.masons:
self.room.masons.remove(self)
elif self.role == Role.traitor:
with self.room.lock:
if self in self.room.traitors:
self.room.traitors.remove(self)
elif self.role == Role.beholder:
with self.room.lock:
if self in self.room.beholders:
self.room.beholders.remove(self)
elif self.role == Role.apprenticeSeer:
with self.room.lock:
if self in self.room.apprenticeSeers:
self.room.apprenticeSeers.remove(self)
elif self.role == Role.guardianAngel:
with self.room.lock:
if self in self.room.guardianAngels:
self.room.guardianAngels.remove(self)
if killerRole == Role.villager:
self.room.SendText("Yall lynched %s the %s" % (self.name, self.role.name))
elif killerRole == self:
self.room.SendText("%s just can't live without %s", (self.name, self.lover.name))
elif killerRole == Role.none:
self.room.SendText("%s has been away for too long and considered dead" % self.name)
else:
k = "???"
if killerRole:
k = killerRole.name
self.room.SendText("%s the %s was killed by %s" % (self.name, self.role.name, k))
if len(self.apprentices) > 0:
for x in self.apprentices:
x.Inherit(self)
if self.lover and self.lover.alive:
self.lover.Die(self.lover)
def Inherit(self, master):
if not self.alive:
return
role = self.role
if self.role == Role.apprenticeSeer:
role = Role.seer
with self.room.lock:
if self in self.room.apprenticeSeers:
self.room.apprenticeSeers.remove(self)
elif self.role == Role.traitor:
role = Role.werewolf
with self.room.lock:
if self in self.room.traitors:
self.room.traitors.remove(self)
elif self.role == Role.wildChild:
role = Role.werewolf
elif self.role == Role.doppelganger:
role = master.role
if role == Role.wildChild:
if master.master:
self.Inherit(master.master)
else:
role = Role.werewolf
if role == Role.doppelganger:
if master.master:
return self.Inherit(master.master)
if role == Role.seer:
self.room.shouldTellBeholders = True
self.role = role
self.InitRole()
def SendText(self, text):
if text.startswith("[WW #"):
return self.rObj.SendText(text)
return self.rObj.SendText("[WW #%d : %s]\n%s" % (self.room.id, self.room.name, text))
def SendButtons(self, buttons):
if not buttons.columnText.startswith("[WW #"):
buttons.SetColumnText("[WW #%d : %s]\n%s" % (self.room.id, self.room.name, buttons.columnText))
buttons.SetAltTextHeader("[WW #%d : %s]\n%s" % (self.room.id, self.room.name, buttons.altTextHeader))
return self.rObj.SendButtons(buttons)
@property
def name(self):
return self.obj.name
@property
def rObj(self):
return self.obj.rObj
def InitRole(self):
role = self.role
if role == Role.fool:
role = Role.seer
self.SendText("Role #%d : %s\nTeam : %s\n%s" % (role.id, role.name, Team.toString[role.team], role.initMsg))
if role == Role.cupid:
self.room.AddWaitingCommand(self)
buts = Buttons("Choose who to pair up as lovers by typing '/ww room=%d pair=<id>' using the ids below" % self.room.id, "Choose who to pair up")
options = list(self.room.alives)
for option in options:
buts.AddButton(
option.name,
"/ww room=%d pair=%d" % (self.room.id, option.id),
"\n%s\t : %s" % (option.id, option.name)
)
self.SendButtons(buts)
elif role in Role.needRoleModel:
self.room.AddWaitingCommand(self)
buts = Buttons("Choose your role model by typing '/ww room=%d master=<id>' using the ids below" % self.room.id, "\nChoose your role model")
options = [x for x in self.room.alives if x != self]
for option in options:
buts.AddButton(
option.name,
"/ww room=%d master=%d" % (self.room.id, option.id),
"\n%s\t : %s" % (option.id, option.name)
)
self.SendButtons(buts)
elif role == Role.mason:
self.TryTellMasons()
elif role == Role.werewolf:
self.TryTellWerewolves()
elif role == Role.seer:
self.TryTellBeholders()
elif role == Role.cultist:
self.TryTellCultists()
def HandleCommand(self, action='', eat=0, kill=0, convert=0, shoot=0, pair=0, see=0, master=0, protect=0, lynch=0, *args, **kwargs):
self.dayLastSeen = self.room.day
if eat:
return self.Eat(eat)
elif kill:
return self.Kill(kill)
elif convert:
return self.Convert(convert)
elif shoot:
if self.role == Role.gunner:
return self.ShootGunner(shoot)
elif self.role == Role.hunter:
return self.ShootHunter(shoot)
elif self.role == Role.cultistHunter:
return self.ShootCH(shoot)
else:
self.SendText("You're neither a Gunner, a Hunter, nor a <NAME>.")
return CommandResult.Failed()
elif pair:
return self.Pair(pair)
elif see:
if self.role == Role.detective:
return self.Investigate(see)
else:
return self.SeeRole(see)
elif master:
return self.ChooseMaster(master)
elif protect:
return self.Protect(protect)
elif lynch:
return self.Lynch(lynch)
elif action == 'reveal':
return self.Reveal()
elif action == 'silver':
return self.SpreadDust()
self.SendText("Invalid command")
return CommandResult.Failed()
def Lynch(self, lynchId):
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if self.room.phase != RoomPhase.lynchVote:
self.SendText("It's not time to lynch")
return CommandResult.Failed()
if lynchId not in self.room.playersById:
self.SendText("Invalid ID")
return CommandResult.Failed()
if self not in self.room.votes[Role.villager.id].haventVoted and not self.room.allowRevote:
self.SendText("Either you have already choosen who to lynch or it's not your turn")
return CommandResult.Failed()
lynch = self.room.playersById[lynchId]
if lynch == self:
self.SendText("You can't lynch yourself")
return CommandResult.Failed()
if not lynch.alive:
self.SendText("The one you want to lynch is already dead")
return CommandResult.Failed()
if self.role == Role.clumsy or self.originalRole == Role.clumsy:
return self.room.votes[Role.villager.id].VoteRandom()
voteCount=1
if self.mayorRevealed:
voteCount=2
ret = self.room.votes[Role.villager.id].Vote(self, lynch, voteCount)
self.room.SendText("%s voted to lynch %s" % (self.name, lynch.name))
return ret
def Reveal(self):
if self.role != Role.mayor:
self.SendText("You're not a Mayor")
return CommandResult.Failed()
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if self.mayorRevealed:
self.SendText("You already revealed that you're a Mayor")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
self.mayorRevealed = True
self.room.SendText("%s has revealed that he is a Mayor! His votes will count twice from now on." % self.name)
self.SendText("You have revealed that you are a Mayor")
return CommandResult.Done()
def Eat(self, eatId):
if self.role not in Role.werewolves:
self.SendText("You're not a Werewolf")
return CommandResult.Failed()
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
if self.drunk:
self.SendText("Go home you're drunk")
return CommandResult.Failed()
if eatId not in self.room.playersById:
self.SendText("Invalid ID")
return CommandResult.Failed()
if self not in self.room.votes[Role.werewolf.id].haventVoted and not self.room.allowRevote:
self.SendText("You have already choosen who to eat or it's not your turn.")
return CommandResult.Failed()
eat = self.room.playersById[eatId]
if eat == self:
self.SendText("You can't eat yourself")
return CommandResult.Failed()
if eat.role in Role.werewolves:
self.SendText("You can't eat fellow Werewolf")
return CommandResult.Failed()
if not eat.alive:
self.SendText("The one you want to eat is already dead")
return CommandResult.Failed()
if self.originalRole == Role.clumsy:
return self.room.votes[Role.werewolf.id].VoteRandom()
return self.room.votes[Role.werewolf.id].Vote(self, eat)
def Kill(self, killId):
if self.role != Role.serialKiller:
self.SendText("You're not a Serial Killer")
return CommandResult.Failed()
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
if killId not in self.room.playersById:
self.SendText("Invalid ID")
return CommandResult.Failed()
if self.kill and not self.room.allowRevote:
self.SendText("You have already choosen who to kill.")
return CommandResult.Failed()
kill = self.room.playersById[killId]
if kill == self:
self.SendText("You can't kill yourself")
return CommandResult.Failed()
if not kill.alive:
self.SendText("The one you want to kill is already dead")
return CommandResult.Failed()
if kill == self.kill:
self.SendText("It's the same guuuyyyyyyyyy")
return CommandResult.Failed()
self.kill = kill
self.room.RemoveWaitingCommand(self)
return CommandResult.Done()
def Convert(self, convertId):
if self.role != Role.cultist:
self.SendText("You're not a Cultist")
return CommandResult.Failed()
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
if convertId not in self.room.playersById:
self.SendText("Invalid ID")
return CommandResult.Failed()
if self not in self.room.votes[Role.cultist.id].haventVoted and not self.room.allowRevote:
self.SendText("Either you have already choosen who to convert or it's not your turn.")
return CommandResult.Failed()
convert = self.room.playersById[convertId]
if convert == self:
self.SendText("You can't convert yourself")
return CommandResult.Failed()
if convert.role == Role.cultist:
self.SendText("You can't convert fellow Cultist")
return CommandResult.Failed()
if not convert.alive:
self.SendText("The one you want to convert is already dead")
return CommandResult.Failed()
if self.originalRole == Role.clumsy:
return self.room.votes[Role.cultist.id].VoteRandom()
return self.room.votes[Role.cultist.id].Vote(self, convert)
def ShootGunner(self, shootId):
if self.role != Role.gunner:
self.SendText("You're not a Gunner")
return CommandResult.Failed()
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
if self.ammo < 1:
self.SendText("You're out of bullets")
return CommandResult.Failed()
if self.done:
self.SendText("You've already shot someone")
return CommandResult.Failed()
if shootId not in self.room.playersById:
self.SendText("Invalid ID")
return CommandResult.Failed()
shoot = self.room.playersById[shootId]
if shoot == self:
self.SendText("You can't shoot yourself")
return CommandResult.Failed()
if not shoot.alive:
self.SendText("The one you want to shoot is already dead")
return CommandResult.Failed()
with self.lock:
shoot.Die(self.role)
self.ammo -= 1
self.done = True
self.room.SendText("%s the Gunner shot %s" % (self.name, shoot.name))
self.room.RemoveWaitingCommand(self)
return CommandResult.Done()
def ShootHunter(self, shootId):
if self.role != Role.hunter:
self.SendText("You're not a Hunter")
return CommandResult.Failed()
if self.alive:
self.SendText("You're not dying")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
if shootId not in self.room.playersById:
self.SendText("Invalid ID")
return CommandResult.Failed()
shoot = self.room.playersById[shootId]
if shoot == self:
self.SendText("You can't shoot yourself. It will all be over soon anyway")
return CommandResult.Failed()
if not shoot.alive:
self.SendText("The one you want to shoot is already dead")
return CommandResult.Failed()
if not shoot.role.team != Team.cultist:
return CommandResult.Failed()
self.kill = shoot
self.room.RemoveWaitingCommand(self)
return CommandResult.Done()
def ShootCH(self, shootId):
if self.role != Role.cultistHunter:
self.SendText("You're not a Cultist Hunter")
return CommandResult.Failed()
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
if self.kill and not self.room.allowRevote:
self.SendText("You have choosen who to hunt")
return CommandResult.Failed()
if shootId not in self.room.playersById:
self.SendText("Invalid ID")
return CommandResult.Failed()
shoot = self.room.playersById[shootId]
if shoot == self:
self.SendText("You can't shoot yourself")
return CommandResult.Failed()
if not shoot.alive:
self.SendText("The one you want to shoot is already dead")
return CommandResult.Failed()
if shoot == self.kill:
self.SendText("It's the same guuuyyyyyyyyy")
return CommandResult.Failed()
self.kill = shoot
self.room.RemoveWaitingCommand(self)
return CommandResult.Done()
def SpreadDust(self):
if self.role != Role.blacksmith:
self.SendText("You're not a Blacksmith")
return CommandResult.Failed()
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
if self.ammo < 1:
self.SendText("You're out of silver dust")
return CommandResult.Failed()
for x in self.room.alives:
x.protection = 1
self.room.SendText("%s the Blacksmith spread Silver Dust all over the village!\nEveryone should be safe from Werewolves tonight as long as they don't do anything dangerous" % self.name)
self.room.RemoveWaitingCommand(self)
return CommandResult.Done()
def PairLovers(self, loverId):
if self.role != Role.cupid:
self.SendText("You're not a Cupid")
return CommandResult.Failed()
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
if loverId not in self.room.playersById:
self.SendText("Invalid ID")
return CommandResult.Failed()
pair = self.room.lovers[self]
if len(pair) > 1:
if self.room.allowRevote:
self.room.AddWaitingCommand(self)
self.room.lovers[self] = []
else:
self.SendText("You have already set a pair of lovers")
return CommandResult.Failed()
lover = self.room.playersById[loverId]
if not lover.alive:
self.SendText("The one you want to pair as Lovers is already dead")
return CommandResult.Failed()
if lover in pair:
self.SendText("You have already choosen %s" % lover.name)
if lover.lover:
self.SendText("%s already has someone he loves" % lover.name)
pair.append(lover)
if len(pair) == 1:
self.SendText("Please choose the second person")
else:
pair[0].lover = pair[1]
pair[1].lover = pair[0]
self.SendText("You have set %s and %s to be lovers!" % (pair[0].name, pair[1].name))
self.room.RemoveWaitingCommand(self)
return CommandResult.Done()
def SleepSomewhereElse(self, otherId):
if self.role != Role.harlot:
self.SendText("You're not a Harlot")
return CommandResult.Failed()
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
if self.houseOwner != self and not self.room.allowRevote:
self.SendText("You are already at someone else's house")
return CommandResult.Failed()
if otherId not in self.room.playersById:
self.SendText("Invalid ID")
return CommandResult.Failed()
other = self.room.playersById[otherId]
if other == self:
self.SendText("You can't shoot yourself")
return CommandResult.Failed()
if not other.alive:
self.SendText("Ya can't sleep with a dead body ya sicko")
return CommandResult.Failed()
if other == self.houseOwner:
self.SendText("It's the same guuuyyyyyyyyy")
return CommandResult.Failed()
self.houseOwner = other
other.freeloader = self
self.room.RemoveWaitingCommand(self)
return CommandResult.Done()
def Protect(self, otherId):
if self.role != Role.guardianAngel:
self.SendText("You're not a Guardian Angel")
return CommandResult.Failed()
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
if self.houseOwner != self and not self.room.allowRevote:
self.SendText("You are already protecting someone else's house")
return CommandResult.Failed()
if otherId not in self.room.playersById:
self.SendText("Invalid ID")
return CommandResult.Failed()
other = self.room.playersById[otherId]
if other == self:
self.SendText("You can't protect yourself")
return CommandResult.Failed()
if not other.alive:
self.SendText("You can't protect a dead person.")
return CommandResult.Failed()
if other == self.houseOwner:
self.SendText("It's the same guuuyyyyyyyyy")
return CommandResult.Failed()
if self.houseOwner and self.houseOwner.protection==2:
self.houseOwner.protection = 0
self.houseOwner = other
other.protection = 2
self.room.RemoveWaitingCommand(self)
return CommandResult.Done()
def ChooseMaster(self, masterId):
if self.role not in [Role.wildChild, Role.doppelganger]:
self.SendText("You're neither a Wild Child nor a Doppelganger")
return CommandResult.Failed()
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
if self.master and not self.room.allowRevote:
self.SendText("You have already choosen %s" % self.master.name)
return CommandResult.Failed()
if masterId not in self.room.playersById:
self.SendText("Invalid ID")
return CommandResult.Failed()
master = self.room.playersById[masterId]
if master == self:
self.SendText("You can't choose yourself")
return CommandResult.Failed()
if not master.alive:
self.SendText("The one you want to choose is already dead")
return CommandResult.Failed()
if master == self.master:
self.SendText("It's the same guuuyyyyyyyyy")
return CommandResult.Failed()
self.master = master
self.room.RemoveWaitingCommand(self)
return CommandResult.Done()
def SeeRole(self, seeId):
if self.role not in Role.seers:
self.SendText("You're neither a Seer nor a Sorcerer")
return CommandResult.Failed()
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
if self.getRole != self and not self.room.allowRevote:
self.SendText("You've already choosen someone to see through")
return CommandResult.Failed()
if seeId not in self.room.playersById:
self.SendText("Invalid ID")
return CommandResult.Failed()
see = self.room.playersById[otherId]
if see == self:
self.SendText("You can't see through yourself")
return CommandResult.Failed()
if not see.alive:
self.SendText("The one you want to see through is already dead")
return CommandResult.Failed()
if see == self.getRole:
self.SendText("It's the same guuuyyyyyyyyy")
return CommandResult.Failed()
self.getRole = see
self.room.RemoveWaitingCommand(self)
return CommandResult.Done()
def Investigate(self, suspectId):
if self.role != Role.detective:
self.SendText("You're not a Detective")
return CommandResult.Failed()
if not self.alive:
self.SendText("You're dead")
return CommandResult.Failed()
if not self.myTurn:
self.SendText("It's not your turn")
return CommandResult.Failed()
if self.getRole != self and not self.room.allowRevote:
self.SendText("You've already choosen someone to investigate")
return CommandResult.Failed()
if suspectId not in self.room.playersById:
self.SendText("Invalid ID")
return CommandResult.Failed()
suspect = self.room.playersById[otherId]
if suspect == self:
self.SendText("You can't investigate yourself")
return CommandResult.Failed()
if not suspect.alive:
self.SendText("The one you want to investigate is already dead")
return CommandResult.Failed()
if suspect == self.getRole:
self.SendText("It's the same guuuyyyyyyyyy")
return CommandResult.Failed()
self.getRole = suspect
self.room.RemoveWaitingCommand(self)
return CommandResult.Done()
def GetRole(self):
if self.getRole:
if not self.alive or not self.getRole.alive:
self.getRole = None
return
if self.role in [Role.fool, Role.seer]:
if self.room.phase == RoomPhase.day:
if self.role == Role.seer:
role = self.getRole.role
else:
role = choice(Role.validRoles)
self.SendText("You have seen through %s and found out that he's a %s" % (self.getRole.name, role.name))
elif self.role == Role.sorcerer:
if self.room.phase == RoomPhase.day:
role = self.getRole.role
if role == Role.seer or role in Role.werewolves:
self.SendText("You have seen through %s and found out that he's a %s" % (self.getRole.name, role.name))
else:
self.SendText("You couldn't see through %s. At least, you know that he's neither a Werewolf nor a Seer" % self.getRole.name)
elif self.role == Role.detective:
if self.room.phase == RoomPhase.lynchVote:
role = self.getRole.role
if role != Role.hunter and role in Role.visitorKillers:
tell = randint(0, 9)
#if tell < 1 and role != Role.werewolf:
# self.Die(role)
# return True
if tell < 4:
self.getRole.SendText("%s seems to be sneaking around you very much. What's his problem?" % self.name)
self.SendText("You have investigated %s and found out that he's a %s" % (self.getRole.name, self.getRole.role.name))
self.getRole = None
def DoKill(self):
if self.kill:
if not self.kill.alive:
self.kill = None
return
if self.role == Role.serialKiller:
self.kill.Die(self.role)
elif self.role == Role.cultistHunter:
if self.kill.role.team == Team.cultist:
self.SendText("%s is a Cultist" % self.kill.name)
self.kill.Die(self.role)
else:
self.SendText("%s is not a Cultist" % self.kill.name)
self.kill = None
@property
def myTurn(self):
if self.role.actionPhase == ActionPhase.firstNight:
return self.room.phase == RoomPhase.night and self.room.day - self.dayRoleSet == 1
elif self.role.actionPhase == ActionPhase.anyday:
return True
else:
return self.role.actionPhase == self.room.phase
class Pending(object):
def __init__(self, method, args, kwargs, requireAlive=None, *args2, **kwargs2):
args.extend(args2)
kwargs.update(kwargs2)
self.method = method
self.args = args
self.kwargs = kwargs
self.requireAlive = requireAlive
def Call(self):
if self.requireAlive and not self.requireAlive.alive:
return
return self.method(*self.args, **self.kwargs)
lastRoomId = 0
roomIdByObj = {}
roomByObj = {}
roomById = {}
lock = Lock()
class Room(object):
def __init__(self, obj, creator, nightDuration=90, dayDuration=90, lynchVoteDuration=60, hunterDeathDuration = 30, allowRevote=False, noVillager=True, autostart=300, quick=True, *args, **kwargs):
global lock
with lock:
if obj in roomIdByObj:
self.id = roomIdByObj[self]
else:
global lastRoomId
lastRoomId += 1
self.id = lastRoomId
global roomById
roomById[self.id] = self
global roomByObj
roomByObj[obj] = self
global roomIdByObj
roomIdByObj[obj] = self.id
self.lock = Lock()
self.obj = obj
self.players = []
self.playersByObj = {}
self.day=0
self._1phase = RoomPhase.waiting
self.realPhase = RoomPhase.waiting
self.votes = {}
self.lovers = {}
self.nightDuration=nightDuration
self.dayDuration=dayDuration
self.lynchVoteDuration=lynchVoteDuration
self.hunterDeathDuration = hunterDeathDuration
self.werewolfKill = 1
self.lastCultistId = 0
self.lastPlayerId = 0
self.playersByRole = {}
self.cultists = []
self.werewolves = []
self.deadHunters = []
self.harlots = []
self.guardianAngels = []
self.seers = []
self.traitors = []
self.beholders = []
self.apprenticeSeers = []
self.masons = []
self.kickeds = []
self.cond = Condition()
self.allowRevote = allowRevote
self.noVillager = noVillager
self.playersById = {}
self.playersByObj = {}
self.shouldTellBeholders = False
self.shouldTellWerewolves = False
self.shouldTellMasons = False
self.shouldTellCultists = False
self.quick = quick
self.hasCultist = False
self.waitingCommands = []
self.room = self
AddAtExit(self, self.__del__)
buts = Buttons("Werewolf game created", "Werewolf game created")
buts.AddButton("Join", "/ww join", "\nType '/ww join' to join")
buts.AddButton("Leave", "/ww leave", "\nType '/ww leave' to leave")
buts.AddButton("Force Start", "/ww forcestart", "\nType '/ww forcestart' to force start")
self.SendButtons(buts)
self.AddPlayer(creator)
if autostart:
self.DelayedStart(autostart, autostart)
def __del__(self):
with self.lock:
if self.phase > RoomPhase.idling and self.phase < RoomPhase.done:
self.phase = RoomPhase.idling
self.SendText("Shutting down")
DelAtExit(self)
@property
def name(self):
return self.obj.name
def AddWaitingCommand(self, player):
if not self.quick and self.allowRevote:
return
with self.lock:
if player not in self.waitingCommands:
self.waitingCommands.append(player)
def ExtendWaitingCommand(self, players):
if not self.quick and self.allowRevote:
return
with self.lock:
for player in players:
if player not in self.waitingCommands:
self.waitingCommands.append(player)
def RemoveWaitingCommand(self, player):
if not self.quick and self.allowRevote:
return
with self.lock:
if player in self.waitingCommands:
self.waitingCommands.remove(player)
if len(self.waitingCommands) == 0:
with self.cond:
self.cond.notifyAll()
def DelayedStart(self, delay, autostart):
return self.obj.client.Thread(self._1DelayedStart, [delay, autostart])
def _1DelayedStart(self, delay, autostart):
if self.phase != RoomPhase.waiting:
return CommandResult.Failed()
with self.cond:
self.StartCountdown(time()+delay)
self.cond.wait(delay)
if self.phase == RoomPhase.waiting:
return self.Start(autostart)
def StartCountdown(self, end, s="Werewolf starting in %s", phase=RoomPhase.waiting):
return self.obj.client.Thread(self.Countdown, [end, s, phase])
def Countdown(self, end, s="Werewolf starting in %s", phase=RoomPhase.waiting):
if self.phase != phase:
return
delay = end-time()
delay+=5
if delay > 60:
mins = delay//60
sec = delay%60
sec -= 5
delay-=5
if mins > 1:
if sec > 0:
si = "%d minutes and %d seconds" % (mins, sec)
else:
si = "%d minutes" % mins
else:
if sec > 0:
si = "a minute and %d seconds" % sec
else:
si = "a minute"
self.SendText(s % si)
with self.cond:
if delay >= 120:
self.cond.wait(60)
elif delay > 60:
self.cond.wait(delay-60)
else:
self.cond.wait(30)
else:
delay -= 5
self.SendText(s % ("%d seconds" % delay))
with self.cond:
if delay > 30:
self.cond.wait(30)
elif delay > 20:
self.cond.wait(20)
elif delay > 5:
self.cond.wait(delay)
if self.phase == phase and (end-time()) > 5:
return self.Countdown(end, s, phase)
def HandleCommand(self, message, action='', eat=0, kill=0, convert=0, shoot=0, pair=0, see=0, master=0, protect=0, lynch=0, *args, **kwargs):
sender = message.sender
chatroom = message.chatroom
client = message.client
if not client.hasOA or not client.hasUser:
message.ReplyText("Sorry Werewolf needs both OAClient and UserClient")
return CommandResult.Failed()
elif not chatroom.hasUser:
message.ReplyText("Please invite the UserClient here first")
return CommandResult.Failed()
elif not chatroom.hasOA:
if client.oAClient.obj:
client.oAClient.obj.InviteInto(chatroom)
message.ReplyText("Please retry the command after the OAClient joined")
else:
message.ReplyText("Please invite the UserClient here first")
return CommandResult.Failed()
elif not sender or not sender.hasUser or (not sender.name and not sender.GetName()):
message.ReplyText("Sorry we can't identify you.")
return CommandResult.Failed()
elif not sender.rObj:
message.ReplyText("%s, please accept the group invitation" % sender.name)
return CommandResult.Failed()
elif action == 'join':
self.AddPlayer(sender)
return CommandResult.Done()
elif sender not in self.playersByObj:
message.ReplyText("Please join the game first.")
return CommandResult.Failed()
elif action == 'forcestart':
return self.ForceStart(sender)
elif action == 'leave':
return CommandResult.Done()
else:
return self.playersByObj[sender].HandleCommand(action=action, eat=eat, kill=kill, convert=convert, shoot=shoot, pair=pair, see=see, master=master, protect=protect, lynch=lynch, *args, **kwargs)
def Remove(self):
with self.lock:
self.phase = RoomPhase.idling
del roomByObj[self.obj]
del roomById[self.id]
del roomIdByObj[self.obj]
def Tell(self, about, to=None):
if not to:
to = about
for x in to:
x.SendTeamNames(about)
@property
def phase(self):
return self._1phase
@phase.setter
def phase(self, value):
self._1phase = value
self.realPhase = value
def TellBeholders(self):
self.Tell(self.seers, self.beholders)
self.shouldTellBeholders=False
def TellWerewolves(self):
self.Tell(self.werewolves)
self.shouldTellWerewolves=False
def TellMasons(self):
self.Tell(self.masons)
self.shouldTellMasons=False
def TellCultists(self):
self.Tell(self.cultists)
self.shouldTellCultists=False
def TryTellBeholders(self):
if self.shouldTellBeholders:
self.TellBeholders()
def TryTellWerewolves(self):
if self.shouldTellWerewolves:
self.TellWerewolves()
def TryTellMasons(self):
if self.shouldTellMasons:
self.TellMasons()
def TryTellCultists(self):
if self.shouldTellCultists:
self.TellCultists()
def TryTellAll(self):
self.TryTellBeholders()
self.TryTellWerewolves()
self.TryTellMasons()
self.TryTellCultists()
def AddPlayer(self, obj):
if self.phase != RoomPhase.waiting:
self.SendText("%s, the game already started" % obj.name)
return
if obj in self.playersByObj:
self.SendText("%s, You have already joined" % obj.name)
return
p = Player(obj, self)
p.SendText("You have just joined")
self.SendText("%s have successfully joined" % p.name)
return p
def Leave(self, obj):
if self.phase != RoomPhase.waiting:
self.SendText("%s, the game has already begun" % obj.name)
return
if obj not in self.playersByObj:
self.SendText("%s, You haven't even joined" % obj.name)
return
p = self.playersByObj[obj]
self.SendText("%s has left the game" % p.name)
return p.Remove()
def SendText(self, text):
if text.startswith("[WW #"):
return self.obj.SendText(text)
return self.obj.SendText("[WW #%d]\n%s" % (self.id, text))
def SendButtons(self, buttons):
if not buttons.columnText.startswith("[WW #"):
buttons.SetColumnText("[WW #%d]\n%s" % (self.id, buttons.columnText))
buttons.SetAltTextHeader("[WW #%d]\n%s" % (self.id, buttons.altTextHeader))
return self.obj.SendButtons(buttons)
def FreeloaderDie(self):
for harlot in list(self.harlots):
if harlot.houseOwner != harlot:
if harlot.houseOwner.alive:
if harlot.houseOwner.role in Role.visitorKillers:
harlot.Die(None)
self.SendText("%s the Harlot's dead body was found outside this morning. What could've happened?" % harlot.name)
harlot.houseOwner.SendText("The Harlot visited you last night. Defend yourself." % harlot.name)
else:
self.SendText("%s the Harlot was also in %s's house last night. Guess what?" % harlot.name)
harlot.Die(harlot.houseOwner.killerRole)
for angel in list(self.guardianAngels):
if angel.houseOwner != angel:
if angel.houseOwner.role in Role.werewolves:
tell = randint(0, 4)
if tell < 2:
angel.houseOwner.SendText("%s the Guardian Angel tried to protect you lol" % angel.name)
if tell < 1:
self.SendText("%s the Guardian Angel unknowingly tried to protect a Werewolf! The Werewolf found out and killed him" % angle.name)
angel.Die(Role.werewolves)
elif angel.houseOwner.alive:
angel.houseOwner.SendText("A Guardian Angel protected your house from Werewolves last night")
def InitRoles(self):
for player in self.alives:
player.InitRole()
self.TryTellAll()
def Eat(self):
eats = self.votes[Role.werewolf.id].votees
if len(eats) == 0:
print("EATS IS EMPTY")
return
print("EATS0 %s" % eats.items())
eats = [(v, k) for k, v in eats.items() if v>0 and k.alive]
print("EATS1 %s" % eats)
self.votes[Role.werewolf.id].Clear()
if len(eats) == 0:
print("EATS IS EMPTY 2 ")
return
eats.sort(reverse=True)
m = eats[0][0]
eatsMost = [x for x in eats if x[0] == m]
lenEatsMost = len(eatsMost)
if lenEatsMost < self.werewolfKill:
eats2 = eats[len(eatsMost)]
m = eats2[0][0]
eats2 = [x for x in eats2 if x[0] == m]
eatsMost.append(choice(eats2))
self.werewolfKill=1
for werewolf in self.werewolves:
werewolf.drunk = False
hasAlpha = len([x for x in self.werewolves if x.alive and x.role == Role.alphaWolf]) > 0
for eat in eatsMost:
eat = eat[1]
if eat.protection:
self.SendText(eat.name + " was about to be attacked by a werewolf, but he got some protection")
s = "Yall went to %s's home to eat her but he got some protection lol go home." % eat.name
for ww in self.werewolves:
ww.SendText(s)
continue
elif eat.role == Role.harlot and eat.houseOwner and eat.houseOwner != eat:
s = "Yall went to %s's home to eat her but she wasn't home." % eat.name
for ww in self.werewolves:
ww.SendText(s)
continue
elif eat.role == Role.cursed:
eat.role = Role.werewolf
eat.SendText('The Werewolf tried to kill you! You, who were a Cursed, are now a Werewolf!')
eat.InitRole()
s = "Yall tried to eat %s who is actually the Cursed!\nHe is now a fellow Werewolf." % eat.name
for ww in self.werewolves:
ww.SendText(s)
continue
elif eat.role == Role.hunter:
wwLen = len(self.werewolves)
if randint(0, 9) < 3 + (wwLen-1)*2:
randomWw = self.werewolves.pop(randint(0, wwLen-1))
randomWw.Die(eat.role)
if wwLen > 1:
eat.Die(Role.werewolf)
s = 'The Werewolf attacked %s the Hunter! He managed to get %s, one of them, down, but he was outnumbered' % (eat.name, randomWw.name)
self.SendText(s)
continue
else:
self.SendText('%s the werewolf tried to attack the Hunter! However, he had the [Quickdraw] ability not on cooldown. Death to Werewolves!' % randomWw.name)
continue
else:
eat.Die(Role.werewolf)
s = "Yall ate %s who is actually the Hunter!\nFortunately, his [Quickdraw] ability is on cooldown." % eat.name
for ww in self.werewolves:
ww.SendText(s)
continue
continue
elif eat.role == Role.serialKiller:
randomWw = self.werewolves.pop(randint(0, len(self.werewolves)-1))
randomWw.Die(eat.role)
self.SendText("The Werewolves tried to attack the Serial Killer! That was a bad move. %s the Werewolf got killed instead." % randomWw.name)
continue
elif hasAlpha and randint(0,4) < 1:
add = ''
if eat.role == Role.drunk:
add = '\nBtw he was the Drunk so yall will skip one turn'
for ww in self.werewolves:
ww.drunk = True
eat.role = Role.werewolf
eat.SendText("You were bitten by the Alpha Wolf, and thus, turned into a Werewolf!")
eat.InitRole()
s = "%s was bitten by the Alpha Wolf and turned into a fellow Werewolf.%s" % (eat.name, add)
for ww in self.werewolves:
ww.SendText(s)
continue
else:
eat.Die(Role.werewolf)
if eat.role == Role.drunk:
s = "Yall ate %s the Drunk so now you're all drunk and will skip one turn" % eat.name
for ww in self.werewolves:
ww.drunk = True
ww.SendText(s)
continue
def GetRole(self):
for player in list(self.alives):
player.GetRole()
def DoKill(self):
for player in list(self.alives):
player.DoKill()
def Lynch(self):
print("LYNCH")
lynches = self.votes[Role.villager.id].votees
if len(lynches) == 0:
self.SendText("Vote lah kampret")
return
print("LYNCH0 %s" % lynches)
lynches = [(v, k) for k, v in lynches.items() if v>0 and k.alive]
print("LYNCH1 %s" % lynches)
self.votes[Role.villager.id].Clear()
lynchesLen = len(lynches)
if lynchesLen == 0:
self.SendText("Somehow people yall voted for are dead")
return
elif lynchesLen > 1:
lynches.sort(reverse=True)
if lynches[0][0] == lynches[1][0]:
self.SendText("MICIN")
return
lynch = lynches[0][1]
if lynch.role == Role.prince:
if not lynch.princeRevealed:
self.SendText("Yall were gonna lynch %s but then he revealed that he's the Prince! Yall can rethink your decision" % lynch.name)
lynch.princeRevealed = True
return
elif lynch.role == Role.tanner:
self.SendText("YALL LYNCHED %s THE TANNER" % lynch.name.upper())
return self.Win(Team.tanner)
lynch.Die(Role.villager)
print("LYNCHDONE")
return
def Convert(self):
converts = self.votes[Role.cultist.id].votees
if len(converts) == 0:
return
converts = [(v, k) for k, v in converts.items() if v>0 and k.alive]
self.votes[Role.cultist.id].Clear()
if len(converts) == 0:
return
converts.sort(reverse=True)
converts = [x for x in converts if x[0] == converts[0][0]]
convert = choice(converts)[1]
cultistLen = len(self.cultists)
if convert.role == Role.cultistHunter:
if cultistLen > 0:
newestCultist = None
newestCultist = self.cultists[-1]
for cultist in self.cultists:
if cultist.cultistId > newestCultist.cultistId:
newestCultist = cultist
newestCultist.Die(convert.role)
self.SendText(newestCultist.name + " was killed by a Cultist Hunter because the cult unknowingly tried to convert the Cultist Hunter lol")
return
elif convert.role == Role.hunter and cultistLen > 0 and randint(0,3) < 1:
randomCultist = self.cultists.pop(randint(0, cultistLen-1))
randomCultist.Die(convert.role)
self.SendText("The cult tried to convert the Hunter. They failed and even got %s, one of their members, down." % randomCultist.name)
return
elif convert.role not in Role.unconvertible:
convert.role = Role.cultist
convert.SendText("You have been converted into a Cultist.")
convert.InitRole()
msg = convert.name + " is now a fellow Cultist."
for cultist in self.cultists:
cultist.SendText(msg)
return
def Status(self):
#alives = [x for x in self.players if x.alive]
#deads = [x for x in self.players if x.alive == False]
alives = self.alives
deads = self.deads
s = 'Day : %d\nPhase : %s\nPlayers:' % (self.day, RoomPhase.toString[self.phase].title())
for x in alives:
s = s + "\n%s, alive" % x.obj.name
for x in deads:
s = s + "\n%s, %s, dead" % (x.obj.name, x.role.name)
self.SendText(s)
return CommandResult.Done()
def Win(self, winningTeam):
#alives = [x for x in self.players if x.alive]
#deads = [x for x in self.players if x.alive == False]
alives = self.alives
deads = self.deads
if winningTeam:
aliveWinners = [x for x in alives if x.role.team == winningTeam]
aliveLosers = [x for x in alives if x.role.team != winningTeam]
deadWinners = [x for x in deads if x.role.team == winningTeam]
deadLosers = [x for x in deads if x.role.team != winningTeam]
s = 'Game over\nDay : %d\nPhase : %s\nWinners:' % (self.day, RoomPhase.toString[self.phase].title())
for x in aliveWinners:
s = s + "\n%s, %s, alive, won" % (x.obj.name, x.role.name)
for x in deadWinners:
s = s + "\n%s, %s, dead, won" % (x.obj.name, x.role.name)
s = s + "\nLosers:"
for x in aliveLosers:
s = s + "\n%s, %s, alive, lost" % (x.obj.name, x.role.name)
for x in deadLosers:
s = s + "\n%s, %s, dead, lost" % (x.obj.name, x.role.name)
self.SendText(s)
else:
s = 'Game over\nDay : %d\nPhase : %s\nLosers:' % (self.day, RoomPhase.toString[self.phase])
for x in alives:
s = s + "\n%s, %s, alive, lost" % (x.obj.name, x.role.name)
for x in deads:
s = s + "\n%s, %s, dead, lost" % (x.obj.name, x.role.name)
self.SendText(s)
for x in self.kickeds:
self.x.obj.InviteInto(self.obj)
self.phase = RoomPhase.done
self.Remove()
return True
def HunterDeathVote(self):
if len(self.deadHunters) == 0:
return
realPhase = self.phase
self._1phase = RoomPhase.hunter
with self.cond:
self.cond.notifyAll()
#something2
players = list(self.deadHunters)
self.deadHunters = []
self.room.ExtendWaitingCommand(players)
for player in players:
buts = Buttons("You can choose shoot someone by typing '/ww room=%d shoot=<id>' with the ids below." % self.id, "You can choose to shoot someone.")
candidates = [x for x in self.alives if x != player]
for x in candidates:
buts.AddButton(
x.name,
"/ww room=%d shoot=%d" % (self.id, x.id),
"\n%s\t : %s" % (x.id, x.name)
)
player.SendButtons(buts)
with self.cond:
self.SendText("Some hunters are gonna die! They have %g seconds to shoot as death comes closer" % self.hunterDeathDuration)
self.StartCountdown(time()+self.hunterDeathDuration, "Dying hunters have %s left", RoomPhase.hunter)
self.cond.wait(self.hunterDeathDuration)
self.DoKill()
self.phase = realPhase
return True
def Night(self):
with self.lock:
self.phase = RoomPhase.night
self.waitingCommands = []
with self.cond:
self.cond.notifyAll()
if self.hasCultist:
self.room.ExtendWaitingCommand(self.cultists)
candidates = [x for x in self.alives if x.role != Role.cultist]
self.votes[Role.cultist.id].Set(self.cultists, candidates)
buts = Buttons("You can vote to convert someone into a cultist by typing '/ww room=%d convert=<id>' with the ids below" % self.id, "You can vote to convert someone into a cultist")
for x in candidates:
buts.AddButton(
x.name,
"/ww room=%d convert=%d" % (self.id, x.id),
"\n%s\t : %s" % (x.id, x.name)
)
for x in self.cultists:
x.SendButtons(buts)
if self.hasWerewolf:
self.room.ExtendWaitingCommand(self.werewolves)
candidates = [x for x in self.alives if x.role not in Role.werewolves]
self.votes[Role.werewolf.id].Set(self.werewolves, candidates)
buts = Buttons("You can vote to eat someone by typing '/ww room=%d eat=<id>' with the ids below" % self.id, "You can vote to eat someone")
for x in candidates:
buts.AddButton(
x.name,
"/ww room=%d eat=%d" % (self.id, x.id),
"\n%s\t : %s" % (x.id, x.name)
)
for x in self.werewolves:
x.SendButtons(buts)
for player in self.alives:
player.getRole = None
player.done = False
if player.myTurn:
if player.role == Role.cultist:
pass
elif player.role == Role.guardianAngel:
self.room.AddWaitingCommand(player)
buts = Buttons("You can choose to protect someone's house from Werewolves by typing '/ww room=%d protect=<id>' with the ids below" % self.id, "You can choose to protect someone's house from Werewolves")
candidates = [x for x in self.alives if x != player]
for x in candidates:
buts.AddButton(
x.name,
"/ww room=%d protect=%d" % (self.id, x.id),
"\n%s\t : %s" % (x.id, x.name)
)
player.SendButtons(buts)
elif player.role == Role.harlot:
self.room.AddWaitingCommand(player)
buts = Buttons("You can choose to sleep in someone's house by typing '/ww room=%d sleep=<id>' with the ids below" % self.id, "You can choose to sleep in someone's house")
if self.allowRevote:
candidates = [x for x in self.alives]
else:
candidates = [x for x in self.alives if x != player]
for x in candidates:
buts.AddButton(
x.name,
"/ww room=%d sleep=%d" % (self.id, x.id),
"\n%s\t : %s" % (x.id, x.name)
)
player.SendButtons(buts)
elif player.role == Role.cultistHunter:
self.room.AddWaitingCommand(player)
buts = Buttons("You can choose hunt someone by typing '/ww room=%d shoot=<id>' with the ids below. If he's a cultist, he will die." % self.id, "You can choose to hunt someone. If he's a cultist, he will die.")
candidates = [x for x in self.alives if x != player]
for x in candidates:
buts.AddButton(
x.name,
"/ww room=%d shoot=%d" % (self.id, x.id),
"\n%s\t : %s" % (x.id, x.name)
)
player.SendButtons(buts)
elif player.role == Role.serialKiller:
self.room.AddWaitingCommand(player)
buts = Buttons("You can choose kill someone by typing '/ww room=%d kill=<id>' with the ids below" % self.id, "You can choose to kill someone")
candidates = [x for x in self.alives if x != player]
for x in candidates:
buts.AddButton(
"\n%s\t : %s" % (x.id, x.name),
x.name,
"/ww room=%d kill=%d" % (self.id, x.id)
)
player.SendButtons(buts)
elif player.role in Role.seers:
self.room.AddWaitingCommand(player)
buts = Buttons("You can choose see through someone's role by typing '/ww room=%d see=<id>' with the ids below" % self.id, "You can choose to see through someone's role")
candidates = [x for x in self.alives if x != player]
for x in candidates:
buts.AddButton(
x.name,
"/ww room=%d see=%d" % (self.id, x.id),
"\n%s\t : %s" % (x.id, x.name)
)
player.SendButtons(buts)
elif player.role in Role.werewolves:
pass
else:
self.SendText("MISSED NIGHT ROLE %s" % player.role.name)
self.Status()
with self.cond:
self.cond.notifyAll()
with self.cond:
self.SendText("Yall night players have %g seconds to do your stuff" % self.nightDuration)
self.StartCountdown(time()+self.nightDuration, "Night players have %s left", RoomPhase.night)
self.cond.wait(self.nightDuration)
if self.phase == RoomPhase.night:
with self.lock:
self.Eat()
self.DoKill()
if self.CheckWin():
return True
self.Convert()
self.GetRole()
self.DoKill()
if self.CheckWin():
return True
while len(self.deadHunters) > 0:
if self.HunterDeathVote() and self.CheckWin():
return True
return self.Day()
def Day(self):
with self.lock:
self.phase = RoomPhase.day
self.day+=1
self.waitingCommands = []
with self.cond:
self.cond.notifyAll()
for player in list(self.alives):
player.protection = 0
if self.day - player.dayLastSeen > 2:
player.Die(Role.none)
continue
if player.role == Role.harlot:
player.houseOwner = player
player.freeloader = None
elif player.myTurn:
if player.role == Role.mayor and not player.mayorRevealed:
self.room.AddWaitingCommand(player)
buts = Buttons("You can choose to reveal your role as a Mayor by typing '/ww room=%d action=reveal'" % self.id, "You can choose to reveal your role as a Mayor")
buts.AddButton(
"Reveal",
"/ww room=%d action=reveal" % self.id,
""
)
player.SendButtons(buts)
elif player.role == Role.blacksmith:
if player.ammo:
self.room.AddWaitingCommand(player)
player.done = False
buts = Buttons("You can choose to spread silver dust all over the village by typing '/ww room=%d action=silver'. You can do it %d times" % (self.id, player.ammo), "You can choose to spread silver dust all over the village. You can do it %d times" % player.ammo)
buts.AddButton(
"Reveal",
"/ww room=%d action=silver" % self.id,
""
)
player.SendButtons(buts)
elif player.role == Role.gunner:
if player.ammo:
self.room.AddWaitingCommand(player)
player.done = False
buts = Buttons("You can choose shoot someone by typing '/ww room=%d shoot=<id>' with the ids below. You have %d bullets" % (self.id, player.ammo), "You can choose to shoot someone. You have %d bullets" % player.ammo)
candidates = [x for x in self.alives if x != player]
for x in candidates:
buts.AddButton(
x.name,
"/ww room=%d shoot=%d" % (self.id, x.id),
"\n%s\t : %s" % (x.id, x.name)
)
player.SendButtons(buts)
elif player.role == Role.detective:
self.room.AddWaitingCommand(player)
buts = Buttons("You can choose investigate someone's role by typing '/ww room=%d see=<id>' with the ids below" % self.id, "You can choose to investigate someone's role")
candidates = [x for x in self.alives if x != player]
for x in candidates:
buts.AddButton(
x.name,
"/ww room=%d see=%d" % (self.id, x.id),
"\n%s\t : %s" % (x.id, x.name)
)
player.SendButtons(buts)
else:
print("MISSED DAY ROLE %s" % player.role.name)
self.Status()
with self.cond:
self.SendText("Yall day players have %g seconds to do your stuff" % self.dayDuration)
self.StartCountdown(time()+self.dayDuration, "Day players have %s left", RoomPhase.day)
self.cond.wait(self.dayDuration)
if self.phase == RoomPhase.day:
with self.lock:
self.DoKill()
if self.CheckWin():
return True
self.GetRole()
self.DoKill()
if self.CheckWin():
return True
while len(self.deadHunters) > 0:
if self.HunterDeathVote() and self.CheckWin():
return True
return self.LynchVote()
def LynchVote(self):
with self.lock:
self.phase = RoomPhase.lynchVote
self.waitingCommands = []
with self.cond:
self.cond.notifyAll()
self.votes[Role.villager.id].Set(self.alives, self.alives)
buts = Buttons("You can vote to lynch someone by typing '/ww room=%d lynch=<id>' with the ids below. Though you can't be dumb enough to vote for yourself, right?" % self.id, "You can vote to lynch someone. Though you can't be dumb enough to vote for yourself, right?")
for x in self.alives:
buts.AddButton(
x.name,
"/ww room=%d lynch=%d" % (self.id, x.id),
"\n%s\t : %s" % (x.id, x.name)
)
for x in self.alives:
x.SendButtons(buts)
self.Status()
with self.cond:
self.SendText("Yall have %g seconds to vote to lynch someone" % self.lynchVoteDuration)
self.StartCountdown(time()+self.lynchVoteDuration, "Yall have %s left", RoomPhase.lynchVote)
self.cond.wait(self.lynchVoteDuration)
if self.phase == RoomPhase.lynchVote:
with self.lock:
self.Lynch()
self.DoKill()
if self.CheckWin():
return True
self.GetRole()
self.DoKill()
if self.CheckWin():
return True
while len(self.deadHunters) > 0:
if self.HunterDeathVote() and self.CheckWin():
return True
return self.Night()
def ForceStart(self, starter):
with self.lock:
if starter not in self.playersByObj:
starter.SendText("%s, you haven't joined, thus, have no authority to force start the game" % starter.name)
return CommandResult.Failed()
return self.Start(False)
def Start(self, autostart=True):
with self.lock:
if self.phase == RoomPhase.idling:
self.SendText("No Werewolf game session")
return CommandResult.Failed()
elif self.phase != RoomPhase.waiting:
self.SendText("Werewolf game already started")
return CommandResult.Failed()
count = len(self.players)
if count < 5 and autostart:
self.SendText('Need at least 5 players to start')
return CommandResult.Failed()
if count < 1:
self.SendText("No players. I should've removed the room though. Removing it.")
self.phase = RoomPhase.idling
return CommandResult.Failed()
self.phase = RoomPhase.starting
with self.cond:
self.cond.notifyAll()
self.SendText('Starting werewolf game')
roles = list(Role.validRoles)
roles.extend([Role.mason, Role.mason])
roles.remove(Role.alphaWolf)
roles.remove(Role.wolfCub)
if self.noVillager:
roles.remove(Role.villager)
else:
roles.extend([Role.villager, Role.villager, Role.villager, Role.villager, Role.mason])
shuffle(roles)
self.alives = list(self.players)
self.deads = []
players = list(self.players)
shuffle(players)
wwCount = int(count//7)+1
hasSeer = False
hasCultist = False
specialWW = [Role.alphaWolf, Role.wolfCub]
specialWWLen = 2
wwTeamNonWWCount = int(count//6)
mason = 0
for player in players:
player.alive = Alive.alive
role = Role.villager
arg = 0
if wwCount > 0:
role = Role.werewolf
if specialWWLen > 2:
a = randint(0, 14)
if a < 2:
if specialWWLen == 1:
role = specialWW[0]
else:
role = specialWW.pop(randint (0, 1))
specialWWLen -= 1
wwCount -= 1
else:
rlen = len(roles)
if rlen == 0:
if self.noVillager:
role = Role.mason
else:
role = choice([Role.mason, Role.villager])
else:
role = roles[randint(0,rlen-1)]
if role.team == Team.werewolf:
if wwTeamNonWWCount>1:
wwTeamNonWWCount -= 1
else:
roles = [x for x in roles if x.team != Team.werewolf]
rlen = len(roles)
if rlen == 0:
if self.noVillager:
role = Role.mason
else:
role = choice([Role.mason, Role.villager])
while role.team == Team.werewolf:
role = choice(roles)
if mason==1:
role=Role.mason
if role==Role.mason:
mason+=1
if role==Role.beholder and not hasSeer:
role=Role.seer
if role==Role.seer:
hasSeer=True
if role==Role.cultistHunter and not hasCultist:
role=Role.cultist
if role==Role.cultist:
hasCultist=True
if rlen > 0:
roles.remove(role)
player.role = role
player.originalRole = role
self.hasCultist = self.hasCultist or hasCultist
self.day = 0
self.votes[Role.villager.id] = Vote(self)
self.votes[Role.werewolf.id] = Vote(self)
self.room.votes[Role.cultist.id] = Vote(self)
self.InitRoles()
self.obj.client.Thread(self.Night)
return CommandResult.Done()
def CheckWin(self):
ww = len(self.werewolves)
nonww = len(self.alives) - ww
if ww == 0:
wwteamnonww = len([x for x in self.alives if x.role.team == Team.werewolf and x.role not in Role.werewolves])
if nonww == 0:
if wwteamnonww == 0:
return self.Win(Team.none)
else:
return self.Win(Team.werewolf)
elif wwteamnonww > 0:
return False
else:
teams = list(set(x.role.team for x in self.alives))
if len(teams) == 1:
if teams[0] == Team.doppelganger or teams[0] == Team.tanner:
return self.Win(Team.none)
return self.Win(teams[0])
hasSK = False
for team in teams:
if team == Team.serialKiller:
hasSK = True
break
if hasSK:
if nonww < 3:
for alive in self.alive:
if alive.role != Role.serialKiller:
alive.SendText("You're left alone with the Serial Killer. You know what comes next, right?")
alive.Die(Role.serialKiller)
return self.Win(Team.serialKiller)
return False
if len(self.cultists) == nonww:
return self.Win(Team.cultist)
else:
return self.Win(Team.villager)
return True
if nonww - ww < 1:
if ww == 1:
nonwwteam = [x for x in self.alives if x.role.team != Team.werewolf]
if len(nonwwteam) == 0:
return self.Win(Team.werewolf)
role = nonwwteam[0].role
if role == Role.hunter:
for alive in list(self.alives):
alive.SendText("Only a Hunter and a Werewolf is left. Yall engaged in a deadly battle, which brought death to both of you.")
if alive.role == Role.hunter:
alive.Die(Role.werewolf)
else:
alive.Die(Role.hunter)
self.SendText("The werewolf attacks the Hunter! However, the Hunter doesn't let go of his gun and keep on shooting him. Both eventually dies.")
return self.Win(Team.none)
elif role == Role.serialKiller:
cur.execute("UPDATE WerewolfPlayers SET alive=FALSE WHERE roomId=%s AND role!=21 and alive RETURNING lineId", (roomId,))
deads = cur.fetchall()
conn.commit()
cur.execute("SELECT lineId FROM WerewolfPlayers WHERE roomId=%s AND alive", (roomId,))
alives = cur.fetchall()
for alive in list(self.alives):
if alive.role == Role.serialKiller:
alive.SendText("ONE STEP TOWARDS WORLD PEACE")
else:
alive.SendText("THE KILLER KILLER WILL RID THE WORLD OF MURDER")
alive.Die(Role.serialKiller)
self.SendText("THE KILLER KILLER WILL RID THE WORLD OF MURDER")
return self.Win(Team.serialKiller)
elif role == Role.gunner and nonwwteam[0].ammo and self.realPhase == RoomPhase.day:
cur.execute("UPDATE WerewolfPlayers SET alive=FALSE WHERE roomId=%s AND role!=7 and alive RETURNING lineId", (roomId,))
deads = cur.fetchall()
conn.commit()
for alive in list(self.alives):
if alive.role == Role.gunner:
alive.SendText("You are left alone with someone whom you are very sure to be a Werewolf. You are really lucky that it's daytime and you still have some bullets")
else:
alive.SendText("You were left alone with the Gunner. It was obvious to him that you were the Werewolf. Lucky for him that he still have some bullets and it was the day.")
self.SendText("The Gunner is our hero now.")
return self.Win(Team.villager)
if len([x for x in self.alives if x.role == Role.serialKiller]) > 0:
return False
for alive in list(self.alives):
if alive.role not in Role.werewolves:
alive.SendText("You're all out of time, and number.")
alive.Die(Role.werewolf)
return self.Win(Team.werewolf)
return False
class Vote(object):
def __init__(self, room):
self.canVote = []
self.haventVoted = []
self.candidates = []
self.voters = {}
self.votees = {}
self.room = room
self.lock = Lock()
@property
def allowRevote(self):
return self.room.allowRevote
def VoteRandom(self, voter):
return self.Vote(voter, choice(self.candidates))
def Vote(self, voter, votee, voteCount=1):
with self.lock:
if voter not in self.canVote:
voter.SendText("You have no authority to vote this one")
return CommandResult.Failed()
if votee not in self.candidates:
voter.SendText("You can't vote for %s" % votee.name)
return CommandResult.Failed()
elif voter in self.voters:
if self.voters[voter] == votee:
voter.SendText("It's the same guuuyyyyyyyyy")
return CommandResult.Failed()
elif self.allowRevote:
self.votees[self.voters[voter]] -= voteCount
else:
voter.SendText("You have already voted for %s" % self.voters[voter].name)
return CommandResult.Failed()
self.voters[voter] = votee
self.votees[votee] += voteCount
if voter in self.haventVoted:
self.haventVoted.remove(voter)
self.room.RemoveWaitingCommand(voter)
voter.SendText("You chose %s" % votee.name)
return CommandResult.Done()
@property
def everyoneVoted(self):
return len(self.haventVoted) == 0
def Clear(self):
with self.lock:
self.voters.clear()
self.votees.clear()
self.haventVoted = []
self.canVote = []
self.candidates = []
def Set(self, voters, candidates):
with self.lock:
self.Clear()
self.haventVoted = list(voters)
self.canVote = list(voters)
self.candidates = list(candidates)
for x in self.candidates:
self.votees[x] = 0
self.room.ExtendWaitingCommand(voters)
def Werewolf(message, options, continuous=CommandContinuousCallType.notContinuous, images=None, text='', room=0, action='', start=True, night=90, day=90, lynchvote=60, hunter=30, revote=False, villager=False, quick=True, autostart=300, eat=0, kill=0, convert=0, shoot=0, pair=0, see=0, master=0, protect=0, lynch=0, *args, **kwargs):
if continuous == CommandContinuousCallType.notContinuous:
if IsEmpty(action):
action = text
sender = message.sender
chatroom = message.chatroom
client = message.client
if not client.hasOA or not client.hasUser:
message.ReplyText("Sorry Werewolf needs both OAClient and UserClient")
return CommandResult.Failed()
elif not chatroom.hasUser:
message.ReplyText("Please invite the UserClient here first")
return CommandResult.Failed()
elif not chatroom.hasOA:
if client.oAClient.obj:
client.oAClient.obj.InviteInto(chatroom)
message.ReplyText("Please retry the command after the OAClient joined")
else:
message.ReplyText("Please invite the UserClient here first")
return CommandResult.Failed()
elif not sender or not sender.hasUser or (not sender.name and not sender.GetName()):
message.ReplyText("Sorry we can't identify you.")
return CommandResult.Failed()
elif not sender.rObj:
message.ReplyText("%s, please type '/robj' in a room consisting of only you, our UserClient, and our OAClient" % sender.name)
#message.ReplyText("%s, please accept the group invitation" % sender.name)
return CommandResult.Failed()
if action == 'create':
if chatroom in roomByObj:
room = roomByObj[chatroom]
if room.phase == RoomPhase.waiting:
room.SendText("WW already created. To forcestart, type '/ww forcestart'")
return CommandResult.Done()
elif room.phase > RoomPhase.waiting and room.phase < RoomPhase.done:
room.SendText("WW is running")
return CommandResult.Done()
room = Room(message.chatroom, message.sender, nightDuration=night, dayDuration=day, lynchVoteDuration=lynchvote, hunterDeathDuration=hunter, allowRevote=revote, noVillager=not villager, autostart=autostart, quick=quick, *args, **kwargs)
return CommandResult.Done()
elif room:
if room not in roomById:
message.ReplyText("Invalid room id")
return CommandResult.Failed()
else:
return roomById[room].HandleCommand(message=message, action=action, eat=eat, kill=kill, convert=convert, shoot=shoot, pair=pair, see=see, master=master, protect=protect, lynch=lynch, *args, **kwargs)
else:
if chatroom not in roomByObj:
message.ReplyText("No Werewolf game session or you need to provide 'room' argument")
return CommandResult.Failed()
room = roomByObj[chatroom]
if action == 'join':
room.AddPlayer(sender)
return CommandResult.Done()
elif action == 'leave':
room.Leave(sender)
return CommandResult.Done()
elif action == 'forcestart':
return room.ForceStart(sender)
elif action == 'status':
return room.Status()
else:
message.ReplyText("Invalid command or you need to provide 'room' argument")
return CommandResult.Failed()
else:
return CommandResult.Failed()
werewolfCmd = ContinuousHybridCommand(
'ww',
Werewolf,
desc='Awoo',
images=['the image']
)
| 2.296875 | 2 |
auto_package.py | znsoooo/IDLE-Advance | 4 | 12758816 | # TODO 增加右键菜单和拖拽启动打包
import os
import glob
import time
import zipfile
def mark(target):
tt = time.strftime('.%Y%m%d_%H%M%S')
base, ext = os.path.splitext(target)
os.rename(target, base + tt + ext)
def compress(paths, except_key=()):
save_name = os.path.splitext(paths[0])[0] + time.strftime('.%Y%m%d_%H%M%S.zip')
zip = zipfile.ZipFile(save_name, 'w', zipfile.ZIP_DEFLATED)
for path in paths:
if os.path.isfile(path):
zip.write(path, path)
else:
for file in glob.iglob('%s/**' % path, recursive=True):
if all(key not in file for key in except_key):
zip.write(file, file)
zip.close()
# mark('idlealib.zip')
lst = [file for file in os.listdir() if os.path.isfile(file) and not file.startswith('test') and not file.endswith('.zip')]
print(lst)
compress(('idlealib', *lst),
except_key=('__pycache__',))
| 3.453125 | 3 |
unittests/test_data_table.py | building-energy/ukds | 2 | 12758817 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import unittest
import ukds, os
from fairly import CreateRDF
base_dir=os.path.join(*[os.pardir]*4,r'_Data\United_Kingdom_Time_Use_Survey_2014-2015\UKDA-8128-tab')
dt_fp=os.path.join(base_dir,r'tab\uktus15_household.tab')
dd_fp=os.path.join(base_dir,r'mrdoc\allissue\uktus15_household_ukda_data_dictionary.rtf')
dt=ukds.DataTable(dt_fp,dd_fp)
dt.tab=dt.tab.head(5)
class Test_data_table(unittest.TestCase):
def test_to_bso_variable(self):
c=dt.to_bso_variable(base_prefix='eg',
base_uri='http://example.com/',
variable='hh_wt',
)
print(c.serialize_rdf())
def test_to_bso_survey(self):
c=dt.to_bso_survey(base_prefix='eg',
base_uri='http://example.com/',
)
#print(c.serialize_rdf())
# general query methods
#def test_read_tab(self):
#dt=ukds.DataTable(dt_fp)
#df=dt.tab
#print(df[df.columns[:5]])
#print(df.dtypes)
#def test_get_dataframe(self):
#dt=ukds.DataTable(dt_fp,dd_fp)
#df=dt.get_dataframe()
#df.head()
# def test_to_rdf_data_cube(self):
#
# c=dt.to_rdf_data_cube(base_prefix='ukds8128',
# base_uri='<http://www.purl.org/berg/ukds8128/>',
# dimension_columns=['serial'],
# column='strata',
# )
# #print(c.serialize())
# def test_to_rdf_bdo_observation_datum(self):
#
# st=dt.to_rdf_bdo_observation_datum(base_prefix='household',
# base_uri='http://www.purl.org/berg/ukds8128/uktus15_household/',
# column_name='strata',
# row_index=0
# )
# print(st)
#
# def test_to_rdf_bdo_observation(self):
#
# st=dt.to_rdf_bdo_observation(base_prefix='household',
# base_uri='http://www.purl.org/berg/ukds8128/uktus15_household/',
# row_index=0
# )
# print(st)
#
# def test_to_rdf_bdo_observation_set(self):
#
# st=dt.to_rdf_bdo_observation_set(base_prefix='household',
# base_uri='http://www.purl.org/berg/ukds8128/uktus15_household/',
# )
# print(st)
#
# def test_to_rdf_bdo_observation_data_set(self):
#
# st=dt.to_rdf_bdo_observation_data_set(base_prefix='household',
# base_uri='http://www.purl.org/berg/ukds8128/uktus15_household/',
# column_name='strata'
# )
# print(st)
if __name__=='__main__':
o=unittest.main(Test_data_table())
| 2.265625 | 2 |
test.py | DSTGNN/DSTGNN_ | 0 | 12758818 | from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import argparse
import math
from lib import utils
from lib.utils import log_string
from model.DSTGNN import DSTGNN
parser = argparse.ArgumentParser()
parser.add_argument('--P', type = int, default = 12,
help = 'history steps')
parser.add_argument('--Q', type = int, default = 12,
help = 'prediction steps')
parser.add_argument('--L', type = int, default = 5,
help = 'number of STAtt Blocks')
parser.add_argument('--K', type = int, default = 8,
help = 'number of attention heads')
parser.add_argument('--d', type = int, default = 8,
help = 'dims of each head attention outputs')
parser.add_argument('--train_ratio', type = float, default = 0.7,
help = 'training set [default : 0.7]')
parser.add_argument('--val_ratio', type = float, default = 0.1,
help = 'validation set [default : 0.1]')
parser.add_argument('--test_ratio', type = float, default = 0.2,
help = 'testing set [default : 0.2]')
parser.add_argument('--batch_size', type = int, default = 16,
help = 'batch size')
parser.add_argument('--max_epoch', type = int, default = 15,
help = 'epoch to run')
# parser.add_argument('--patience', type = int, default = 10,
# help = 'patience for early stop')
parser.add_argument('--learning_rate', type=float, default = 0.001,
help = 'initial learning rate')
# parser.add_argument('--decay_epoch', type=int, default = 5,
# help = 'decay epoch')
parser.add_argument('--traffic_file', default = 'data/METR-LA/metr-la.h5',
help = 'traffic file')
parser.add_argument('--SE_file', default = 'data/METR-LA/SE(METR).txt',
help = 'spatial emebdding file')
parser.add_argument('--model_file', default = 'data/METR-LA/METR',
help = 'save the model to disk')
parser.add_argument('--log_file', default = 'data/METR-LA/log(METR)',
help = 'log file')
args = parser.parse_args()
log = open(args.log_file, 'w')
device = torch.device("cuda:6" if torch.cuda.is_available() else "cpu")
log_string(log, "loading data....")
trainX, trainTE, trainY, valX, valTE, valY, testX, testTE, testY, SE, mean, std = utils.loadData(args)
# adj = np.load('./data/metr_adj.npy')
log_string(log, "loading end....")
def res(model, valX, valTE, valY, mean, std):
model.eval() # 评估模式, 这会关闭dropout
# it = test_iter.get_iterator()
num_val = valX.shape[0]
pred = []
label = []
num_batch = math.ceil(num_val / args.batch_size)
with torch.no_grad():
for batch_idx in range(num_batch):
if isinstance(model, torch.nn.Module):
start_idx = batch_idx * args.batch_size
end_idx = min(num_val, (batch_idx + 1) * args.batch_size)
X = torch.from_numpy(valX[start_idx : end_idx]).float().to(device)
y = valY[start_idx : end_idx]
te = torch.from_numpy(valTE[start_idx : end_idx]).to(device)
y_hat = model(X, te)
pred.append(y_hat.cpu().numpy()*std+mean)
label.append(y)
del X, te, y_hat
pred = np.concatenate(pred, axis = 0)
label = np.concatenate(label, axis = 0)
# print(pred.shape, label.shape)
for i in range(12):
mae, rmse, mape = metric(pred[:,i,:], label[:,i,:])
# if i == 11:
log_string(log,'step %d, mae: %.4f, rmse: %.4f, mape: %.4f' % (i+1, mae, rmse, mape))
# print('step %d, mae: %.4f, rmse: %.4f, mape: %.4f' % (i+1, mae, rmse, mape))
mae, rmse , mape = metric(pred, label)
log_string(log, 'average, mae: %.4f, rmse: %.4f, mape: %.4f' % (mae, rmse, mape))
# print('average, mae: %.4f, rmse: %.4f, mape: %.4f' % (mae, rmse, mape))
return mae
def test(model, valX, valTE, valY, mean, std):
model = torch.load(args.model_file)
mae = res(model, valX, valTE, valY, mean, std)
# print(mae)
# print('test loss %.4f, last val loss %.4f' % (test_loss, test_loss_l))
def _compute_loss(y_true, y_predicted):
# y_true = scaler.inverse_transform(y_true)
# y_predicted = scaler.inverse_transform(y_predicted)
return masked_mae(y_predicted, y_true, 0.0)
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mae_loss(y_pred, y_true, flag):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)
loss = loss * mask
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
if flag == True:
loss = loss * mask_l
return loss.mean()
def metric(pred, label):
with np.errstate(divide = 'ignore', invalid = 'ignore'):
mask = np.not_equal(label, 0)
mask = mask.astype(np.float32)
mask /= np.mean(mask)
mae = np.abs(np.subtract(pred, label)).astype(np.float32)
rmse = np.square(mae)
mape = np.divide(mae, label)
mae = np.nan_to_num(mae * mask)
mae = np.mean(mae)
rmse = np.nan_to_num(rmse * mask)
rmse = np.sqrt(np.mean(rmse))
mape = np.nan_to_num(mape * mask)
mape = np.mean(mape)
return mae, rmse, mape
if __name__ == '__main__':
log_string(log, "model constructed begin....")
model = DSTGNN(SE, 1, args.K*args.d, args.K, args.d, args.L).to(device)
log_string(log, "model constructed end....")
log_string(log, "test begin....")
test(model, testX, testTE, testY, mean, std)
log_string(log, "test end....")
| 2.046875 | 2 |
main.py | PandaraWen/NeuralStyleTransfer | 0 | 12758819 | import os
import sys
import scipy.io
import scipy.misc
import matplotlib.pyplot as plt
from PIL import Image
from nst_utils import *
from loss_function import *
import numpy as np
import tensorflow as tf
import time
STYLE_LAYERS = [
('conv1_1', 0.2),
('conv2_1', 0.2),
('conv3_1', 0.2),
('conv4_1', 0.2),
('conv5_1', 0.2)
]
content_image = scipy.misc.imread("resources/content.jpg")
generate_config = np.array(content_image).shape
content_image = reshape_and_normalize_image(content_image)
style_image = scipy.misc.imread("resources/style.jpg")
style_image = reshape_and_normalize_image(style_image)
generated_image = generate_noise_image(content_image, 0.6, generate_config)
# plt.imshow(generated_image[0])
# plt.show()
# Reset the graph
tf.reset_default_graph()
# Start interactive session
sess = tf.InteractiveSession()
model = load_vgg_model("resources/imagenet-vgg-verydeep-19.mat", generate_config)
# Assign the content image to be the input of the VGG model.
sess.run(model['input'].assign(content_image))
# Select the output tensor of layer conv4_2
out = model['conv4_2']
# Set a_C to be the hidden layer activation from the layer we have selected
a_C = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute the content cost
J_content = compute_content_cost(a_C, a_G)
# Assign the input of the model to be the "style" image
sess.run(model['input'].assign(style_image))
# Compute the style cost
J_style = compute_style_cost(sess, model, STYLE_LAYERS)
# Default is 10, 40
J = total_cost(J_content, J_style, 10, 40)
# define optimizer (1 line)
optimizer = tf.train.AdamOptimizer(2.0)
# define train_step (1 line)
train_step = optimizer.minimize(J)
def model_nn(sess, input_image, num_iterations = 200):
# Initialize global variables (you need to run the session on the initializer)
### START CODE HERE ### (1 line)
sess.run(tf.global_variables_initializer())
### END CODE HERE ###
# Run the noisy input image (initial generated image) through the model. Use assign().
### START CODE HERE ### (1 line)
sess.run(model['input'].assign(input_image))
### END CODE HERE ###
for i in range(num_iterations):
# Run the session on the train_step to minimize the total cost
### START CODE HERE ### (1 line)
sess.run(train_step)
### END CODE HERE ###
# Compute the generated image by running the session on the current model['input']
### START CODE HERE ### (1 line)
generated_image = sess.run(model['input'])
### END CODE HERE ###
# Print every 20 iteration.
if i%20 == 0:
Jt, Jc, Js = sess.run([J, J_content, J_style])
print("Iteration " + str(i) + " :")
print(time.asctime(time.localtime(time.time())))
print("total cost = " + str(Jt))
print("content cost = " + str(Jc))
print("style cost = " + str(Js))
# save current generated image in the "/output" directory
save_image("output/" + str(i) + ".png", generated_image)
# save last generated image
save_image('output/generated_image.jpg', generated_image)
return generated_image
print("start:" + time.asctime(time.localtime(time.time())))
model_nn(sess, generated_image) | 2.53125 | 3 |
teleprox/processspawner.py | campagnola/teleprox | 1 | 12758820 | <reponame>campagnola/teleprox
# -*- coding: utf-8 -*-
# Copyright (c) 2016, French National Center for Scientific Research (CNRS)
# Distributed under the (new) BSD License. See LICENSE for more info.
import sys
import json
import subprocess
import atexit
import zmq
import logging
import threading
import time
from .client import RPCClient
from .log import get_logger_address, LogSender
logger = logging.getLogger(__name__)
class ProcessSpawner(object):
"""Utility for spawning and bootstrapping a new process with an :class:`RPCServer`.
Automatically creates an :class:`RPCClient` that is connected to the remote
process (``spawner.client``).
Parameters
----------
name : str | None
Optional process name that will be assigned to all remote log records.
address : str
ZMQ socket address that the new process's RPCServer will bind to.
Default is ``'tcp://127.0.0.1:*'``.
**Note:** binding RPCServer to a public IP address is a potential
security hazard (see :class:`RPCServer`).
qt : bool
If True, then start a Qt application in the remote process, and use
a :class:`QtRPCServer`.
log_addr : str
Optional log server address to which the new process will send its log
records. This will also cause the new process's stdout and stderr to be
captured and forwarded as log records.
log_level : int
Optional initial log level to assign to the root logger in the new
process.
executable : str | None
Optional python executable to invoke. The default value is `sys.executable`.
Examples
--------
::
# start a new process
proc = ProcessSpawner()
# ask the child process to do some work
mod = proc._import('my.module')
mod.do_work()
# close the child process
proc.close()
proc.wait()
"""
def __init__(self, name=None, address="tcp://127.0.0.1:*", qt=False, log_addr=None,
log_level=None, executable=None, shell=False):
#logger.warning("Spawning process: %s %s %s", name, log_addr, log_level)
assert qt in (True, False)
assert isinstance(address, (str, bytes))
assert name is None or isinstance(name, str)
assert log_addr is None or isinstance(log_addr, (str, bytes)), "log_addr must be str or None; got %r" % log_addr
if log_addr is None:
log_addr = get_logger_address()
assert log_level is None or isinstance(log_level, int)
if log_level is None:
log_level = logger.getEffectiveLevel()
self.qt = qt
self.name = name
# temporary socket to allow the remote process to report its status.
bootstrap_addr = 'tcp://127.0.0.1:*'
bootstrap_sock = zmq.Context.instance().socket(zmq.PAIR)
bootstrap_sock.setsockopt(zmq.RCVTIMEO, 10000)
bootstrap_sock.bind(bootstrap_addr)
bootstrap_sock.linger = 1000 # don't let socket deadlock when exiting
bootstrap_addr = bootstrap_sock.last_endpoint
# Spawn new process
class_name = 'QtRPCServer' if qt else 'RPCServer'
args = {'address': address}
bootstrap_conf = dict(
class_name=class_name,
args=args,
bootstrap_addr=bootstrap_addr.decode(),
loglevel=log_level,
logaddr=log_addr.decode() if log_addr is not None else None,
qt=qt,
)
if executable is None:
executable = sys.executable
cmd = (executable, '-m', 'teleprox.bootstrap')
if name is not None:
cmd = cmd + (name,)
if shell is True:
cmd = ' '.join(cmd)
if log_addr is not None:
# start process with stdout/stderr piped
self.proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE,
stdout=subprocess.PIPE, shell=shell)
self.proc.stdin.write(json.dumps(bootstrap_conf).encode())
self.proc.stdin.close()
# create a logger for handling stdout/stderr and forwarding to log server
self.logger = logging.getLogger(__name__ + '.' + str(id(self)))
self.logger.propagate = False
self.log_handler = LogSender(log_addr, self.logger)
if log_level is not None:
self.logger.level = log_level
# create threads to poll stdout/stderr and generate / send log records
self.stdout_poller = PipePoller(self.proc.stdout, self.logger.info, '[%s.stdout] '%name)
self.stderr_poller = PipePoller(self.proc.stderr, self.logger.warning, '[%s.stderr] '%name)
else:
# don't intercept stdout/stderr
self.proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, shell=shell)
self.proc.stdin.write(json.dumps(bootstrap_conf).encode())
self.proc.stdin.close()
logger.info("Spawned process: %d", self.proc.pid)
# Receive status information (especially the final RPC address)
try:
status = bootstrap_sock.recv_json()
except zmq.error.Again:
raise TimeoutError("Timed out waiting for response from spawned process.")
logger.debug("recv status %s", status)
bootstrap_sock.send(b'OK')
bootstrap_sock.close()
if 'address' in status:
self.address = status['address']
#: An RPCClient instance that is connected to the RPCServer in the remote process
self.client = RPCClient(self.address.encode())
else:
err = ''.join(status['error'])
self.kill()
raise RuntimeError("Error while spawning process:\n%s" % err)
# Automatically shut down process when we exit.
atexit.register(self.stop)
def wait(self, timeout=10):
"""Wait for the process to exit and return its return code.
"""
# Using proc.wait() can deadlock; use communicate() instead.
# see: https://docs.python.org/2/library/subprocess.html#subprocess.Popen.wait
try:
self.proc.communicate()
except (AttributeError, ValueError):
# Python bug: http://bugs.python.org/issue30203
# Calling communicate on process with closed i/o can generate
# exceptions.
pass
start = time.time()
sleep = 1e-3
while True:
rcode = self.proc.poll()
if rcode is not None:
return rcode
if time.time() - start > timeout:
raise TimeoutError("Timed out waiting on process exit for %s" % self.name)
time.sleep(sleep)
sleep = min(sleep*2, 100e-3)
def kill(self):
"""Kill the spawned process immediately.
"""
if self.proc.poll() is not None:
return
logger.info("Kill process: %d", self.proc.pid)
self.proc.kill()
self.wait()
def stop(self):
"""Stop the spawned process by asking its RPC server to close.
"""
if self.proc.poll() is not None:
return
logger.info("Close process: %d", self.proc.pid)
closed = self.client.close_server()
assert closed is True, "Server refused to close. (reply: %s)" % closed
self.wait()
def poll(self):
"""Return the spawned process's return code, or None if it has not
exited yet.
"""
return self.proc.poll()
class PipePoller(threading.Thread):
def __init__(self, pipe, callback, prefix):
threading.Thread.__init__(self, daemon=True)
self.pipe = pipe
self.callback = callback
self.prefix = prefix
self.start()
def run(self):
callback = self.callback
prefix = self.prefix
pipe = self.pipe
while True:
line = pipe.readline().decode()
if line == '':
break
callback(prefix + line[:-1])
| 2.296875 | 2 |
src/programa.py | thiagopiassigit/Tumor-Maligno-ou-Benigno | 0 | 12758821 | <filename>src/programa.py
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from keras import Sequential
from keras.layers import Dense
transformador = StandardScaler()
def criar_classes(dados):
dados['diagnosis'] = LabelEncoder().fit_transform(dados['diagnosis'])
return dados
def repartir_dados(dados):
entradas = dados.iloc[:, 1:]
resultados = dados.iloc[:, :1]
return entradas, resultados
def deletar_dados_desnecessários(dados):
del dados['Unnamed: 32']
del dados['id']
return dados
def preparar_dados(dados):
dados = deletar_dados_desnecessários(dados)
dados.dropna(inplace=True)
dados = criar_classes(dados)
entradas, resultados = repartir_dados(dados)
entradas = transformador.fit_transform(entradas)
return train_test_split(entradas, resultados, test_size=0.2)
if __name__ == '__main__':
dados = read_csv('dados.csv')
entradas_treino, entradas_teste, resultados_treino, resultados_teste = preparar_dados(dados)
modelo = Sequential()
modelo.add(Dense(1, activation="sigmoid", input_dim=30))
modelo.compile(optimizer='adam', loss='binary_crossentropy', metrics=['binary_accuracy'])
modelo.fit(entradas_treino, resultados_treino, epochs=500)
score = modelo.evaluate(entradas_teste, resultados_teste)
conta = "%.2f" % ((-score[0] + score[1]) * 100)
print(f"assertividade: {conta}% ") | 2.953125 | 3 |
doodad/easy_sweep/__init__.py | charlesjsun/doodad | 36 | 12758822 | <reponame>charlesjsun/doodad
from .launcher import DoodadSweeper
| 1.023438 | 1 |
Code/Simple_plots/random_networks.py | Basvdbrink1998/Influencing-social-networks | 0 | 12758823 | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
"""
Plots random networks with a varying chance of connections between nodes
for figure 2.3.
"""
node_color = 'red'
node_border_color = 'black'
node_border_width = .6
edge_color = 'black'
N = 10
num_graphs = 6
N_columns = 3
N_rows = 2
P = np.linspace(0.0, 1.0, num=num_graphs)
print(P)
def draw(G, pos, ax):
# Plots a graph.
nodes1 = nx.draw_networkx_nodes(G, pos=pos, node_color=node_color, ax=ax)
nodes1.set_edgecolor(node_border_color)
nodes1.set_linewidth(node_border_width)
nx.draw_networkx_edges(G, pos, edge_color=edge_color, alpha=.8,
ax=ax)
ax.axis('off')
return ax
fig, axs = plt.subplots(N_columns, N_rows)
G = nx.fast_gnp_random_graph(N, P[0], seed=0)
pos = nx.spring_layout(G)
c = 0
for i in range(N_columns):
for j in range(N_rows):
G = nx.fast_gnp_random_graph(N, P[c], seed=0)
draw(G, pos, axs[i, j])
axs[i, j].text(0.5, -0.3, "P = " + str(round(P[c], 1)), size=12,
ha="center", transform=axs[i, j].transAxes)
c += 1
plt.subplots_adjust(hspace=0.3)
plt.show()
| 3.34375 | 3 |
arraytool/documentation/other_examples/comparison_of_polarization_definitions.py | markuskreitzer/arraytool | 1 | 12758824 | <gh_stars>1-10
#! /usr/bin/env python
# Author: <NAME> (srinivas . zinka [at] gmail . com)
# Copyright (c) 2011 <NAME>
# License: New BSD License.
""" Simple script for comparing different antenna polarization definitions """
import numpy as np
import matplotlib.pyplot as plt
# from enthought.mayavi import mlab
# import sys
# ==============================================================================
# My custom settings
# ==============================================================================
# eps = sys.float_info.min # to avoid zero in the denominator of s1, c1, etc.
# eps = 10e-16
eps = 0
np.set_printoptions(precision=2, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None)
# ==============================================================================
# Basic parameters
# ==============================================================================
e1 = 0.2
e2 = 0
phi_R_1 = 0
phi_R_2 = 0
# ==============================================================================
# Grid generation with "N" number of samples (denoted by the Nj)
# ==============================================================================
[tht, phi] = np.mgrid[0:(1 * np.pi):100j, 0:(2 * np.pi):200j]
# [tht, phi] = np.mgrid[(1e-3*np.pi):(1 * np.pi):100j, 0:(2 * np.pi):200j]
# ==============================================================================
# sin(zeta1), cos(zeta1), sin(zeta2) and cos(zeta2) values
# ==============================================================================
sc1 = (np.sin(phi - phi_R_1) * (np.cos(tht) + e1))
cc1 = (np.cos(phi - phi_R_1) * (1 + e1 * np.cos(tht)))
s1 = sc1 / np.sqrt(eps + sc1 ** 2 + cc1 ** 2)
c1 = cc1 / np.sqrt(eps + sc1 ** 2 + cc1 ** 2)
sc2 = (np.sin(phi - phi_R_2) * (np.cos(tht) + e2))
cc2 = (np.cos(phi - phi_R_2) * (1 + e2 * np.cos(tht)))
s2 = sc2 / np.sqrt(eps + sc2 ** 2 + cc2 ** 2)
c2 = cc2 / np.sqrt(eps + sc2 ** 2 + cc2 ** 2)
# ==============================================================================
# sin(zeta1-zeta2) and cos(zeta1-zeta2) values
# ==============================================================================
s12 = s1 * c2 - s2 * c1
c12 = c1 * c2 + s1 * s2
# ==============================================================================
# (zeta1-zeta2) value: 0 <= (zeta1-zeta2) <= 2pi
# ==============================================================================
# z12 = ((s12 > 0) * np.arccos(c12)) + ((s12 < 0) * (2 * np.pi - np.arccos(c12)))
# levels = np.linspace(0, 360, 19)
# ==============================================================================
# (zeta1-zeta2) value: -pi <= (zeta1-zeta2) <= pi
# ==============================================================================
z12 = np.arccos(c12)
for i1 in range(0, tht.shape[0]):
for i2 in range(0, tht.shape[1]):
if (s12[i1, i2] > 0):
z12[i1, i2] = np.arccos(c12[i1, i2])
elif (s12[i1, i2] < 0):
z12[i1, i2] = - np.arccos(c12[i1, i2])
else:
z12[i1, i2] = np.arccos(c12[i1, i2])
levels = np.linspace(-180, 180, 37)
# ==============================================================================
# (zeta1-zeta2) value: 0 <= (zeta1-zeta2) <= pi
# ==============================================================================
# z12 = np.arccos(c12)
# levels = np.linspace(0, 180, 19)
# ==============================================================================
# (zeta1-zeta2) value: -pi/2 <= (zeta1-zeta2) <= pi/2
# ==============================================================================
# z12 = np.arcsin(s12)
# levels = np.linspace(-90, 90, 19)
# ==============================================================================
# (zeta1-zeta2) value: 0 <= (zeta1-zeta2) <= pi/2
# ==============================================================================
# To be done
# ==============================================================================
# Converting angles from "radians" to "degrees"
# ==============================================================================
phi = phi * (180 / np.pi)
tht = tht * (180 / np.pi)
z12 = z12 * (180 / np.pi)
# ==============================================================================
# Contour plot
# ==============================================================================
fi1 = plt.figure(1)
pl1 = plt.subplot(111)
# Contour with filled area
cpf1 = pl1.contourf(tht, phi, z12, levels, cmap=plt.cm.jet, alpha=1)
cbar = plt.colorbar(cpf1)
cbar.ax.set_ylabel(r'$\Delta\zeta=\zeta_1-\zeta_2$ (degrees)')
## Contour without filled area
# plt.rcParams['contour.negative_linestyle'] = 'solid'
cp1 = plt.contour(tht, phi, z12, levels, colors='k', alpha=1, linewidths=1)
# plt.clabel(cp1, fontsize=9, inline=True, fmt='%1.0f', manual=False)
# pl1.axis([0, 360, 0, 180])
pl1.axis('tight')
pl1.grid(True)
# plt.title('Directional Error (degrees)')
plt.xlabel(r'$\theta$ (degrees)')
plt.ylabel(r'$\phi$ (degrees)')
plt.show()
# ==============================================================================
# 3D surface plot
# ==============================================================================
# fi1 = mlab.figure()
# me1 = mlab.mesh(tht, phi, z12)
# a1 = mlab.axes(xlabel="Theta", ylabel="Phi", zlabel="Error")
# mlab.show()
| 2 | 2 |
lib/structures/my_list.py | THU-DA-6D-Pose-Group/self6dpp | 33 | 12758825 | <gh_stars>10-100
import numpy as np
import torch
class MyList(list):
def __getitem__(self, index):
"""support indexing using torch.Tensor."""
if isinstance(index, torch.Tensor):
if isinstance(index, torch.BoolTensor):
return [self[i] for i, idx in enumerate(index) if idx]
else:
return [self[int(i)] for i in index]
elif isinstance(index, (list, tuple)):
if len(index) > 0 and isinstance(index[0], bool):
return [self[i] for i, idx in enumerate(index) if idx]
else:
return [self[int(i)] for i in index]
elif isinstance(index, np.ndarray):
if index.dtype == np.bool:
return [self[i] for i, idx in enumerate(index) if idx]
else:
return [self[int(i)] for i in index]
return list.__getitem__(self, index)
if __name__ == "__main__":
a = [None, "a", 1, 2.3]
a = MyList(a)
print(a)
print(type(a), isinstance(a, list))
print("\ntorch bool index")
index = torch.tensor([True, False, True, False])
print(index)
print(a[index])
print("torch int index")
index = torch.tensor([0, 2, 3])
print(index)
print(a[index])
print("\nnumpy bool index")
index = np.array([True, False, True, False])
print(index)
print(a[index])
print("numpy int index")
index = np.array([0, 2, 3])
print(index)
print(a[index])
print("\nlist bool index")
index = [True, False, True, False]
print(index)
print(a[index])
print("list int index")
index = [0, 2, 3]
print(index)
print(a[index])
print("\ntuple bool index")
index = (True, False, True, False)
print(index)
print(a[index])
print("tuple int index")
index = (0, 2, 3)
print(index)
print(a[index])
# print(a[1:-1])
| 2.765625 | 3 |
tools/train.py | jaeho-lee/batch_oce | 5 | 12758826 | import torch
import torch.nn.functional as F
def train(model,train_loader,test_loader,
optimizer,target_loss,test_losses,
num_steps,print_steps=10000):
model.train()
opt = optimizer(model.parameters())
device = next(model.parameters()).device
test_losslist = []
train_losslist = []
current_step = 0
while True:
for i, (x,y) in enumerate(train_loader):
current_step += 1
x = x.to(device)
y = y.to(device)
opt.zero_grad()
yhat = model(x)
lossvec = F.cross_entropy(yhat,y,reduction='none')
loss = target_loss(lossvec)
loss.backward()
opt.step()
if (current_step%print_steps == 0):
test_results = test(model,test_loader,test_losses)
train_results = test(model,train_loader,test_losses)
print(f'Steps: {current_step}/{num_steps} \t Test acc: {test_results[0]:.2f}', end='\r')
test_losslist.append(test_results)
train_losslist.append(train_results)
if current_step >= num_steps:
break
if current_step >= num_steps:
break
print(f'Train acc: {train_losslist[-1][0]:.2f}\t Test acc: {test_losslist[-1][0]:.2f}')
return torch.FloatTensor(train_losslist), torch.FloatTensor(test_losslist)
def test(model,loader,test_losses):
model.eval()
device = next(model.parameters()).device
total = len(loader.dataset)
correct = 0
count = 0
losslog = torch.zeros(total).to(device)
for i, (x,y) in enumerate(loader):
x = x.to(device)
y = y.to(device)
with torch.no_grad():
yhat = model(x)
_,pred = yhat.max(1)
losslog[count:count+len(x)] = F.cross_entropy(yhat,y,reduction='none')
correct += pred.eq(y).sum().item()
count += len(x)
losslist = []
losslist.append(correct/total*100.0)
for test_loss in test_losses:
losslist.append(test_loss(losslog))
model.train()
return losslist
| 2.75 | 3 |
digits/model/tasks/test_caffe_train.py | PhysicsTeacher13/Digits-NVIDIA | 111 | 12758827 | # Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from digits import test_utils
def test_caffe_imports():
test_utils.skipIfNotFramework('caffe')
import numpy # noqa
import google.protobuf # noqa
| 1.34375 | 1 |
src/__init__.py | oskart20/PythonServer-lifty | 1 | 12758828 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import datetime
import logging
import logging.handlers
import threading
from time import sleep
import Communication
import Time
import Algorithm, createDistanceMatrix
import Json
import SQLHandler
import url_constructer
one = SQLHandler.SQLHandler()
LOG_FORMAT = "%(name)2s %(levelname)2s %(asctime)2s - %(message)2s"
logging.basicConfig(filename='PythonServer.log', level=logging.DEBUG, format=LOG_FORMAT, filemode='w')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.handlers.SocketHandler(host="", port=logging.handlers.DEFAULT_TCP_LOGGING_PORT)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
# parameters: day, schools
# format: str, list(int)
def run_thread(day, schools):
for i in schools:
group = one.build_time_pool(day, i)
for y in group:
locations = one.select_all_addresses(i, day, y)
vehicle_data, location_data, driver_indices, passenger_indices, drivers, passengers = one.locations(i, day, y)
# potential bug when requests to google distance matrix api are synchronized (DDoS attack)
# https://developers.google.com/maps/documentation/distance-matrix/web-service-best-practices#synchronized-requests
matrix, time_matrix = createDistanceMatrix.main(one.select_all_addresses(i, day, y))
routes, dropped_nodes, durations = Algorithm.main(vehicle_data, location_data, matrix, time_matrix)
routes_temp = copy.deepcopy(routes)
urls = url_constructer.construct_route_url(locations, routes_temp)
for u in urls:
print(u)
temp1, temp2 = Json.build_list(urls, routes, dropped_nodes, driver_indices, passenger_indices, drivers, passengers, day, y, durations)
filepath, filename = Json.fill_data_matrix(i, day, y, temp1, temp2)
Communication.sftp_upload(filepath, filename)
sleep(120)
def main():
days = {}
days['Sunday'] = 'monday'
days['Monday'] = 'tuesday'
days['Tuesday'] = 'wednesday'
days['Wednesday'] = 'thursday'
days['Thursday'] = 'friday'
days['Friday'] = None
days['Saturday'] = None
deadline = datetime.time(20, 0, 0)
while True:
one = SQLHandler()
threads = []
timezones = one.build_timezone_pool()
already_run = False
logger.debug("here")
for t in timezones:
logger.info(t)
time_in_timezone = Time.add_timezone(deadline, t)
if days[datetime.date.today().strftime("%A")] is not None and Time.time_in_range(deadline, datetime.time(20, 2, 0), time_in_timezone) and already_run is False:
day = days[datetime.date.today().strftime("%A")]
schools = one.build_school_pool(t)
thread = threading.Thread(target=run_thread, args=(day, schools))
thread.start()
threads.append(thread)
already_run = True
if __name__ == '__main__':
main() | 2.5625 | 3 |
Exercicios_propostos_60.py | Alex-Francisco/Python-3 | 0 | 12758829 | <gh_stars>0
## Exercício 60 do livro Python 3 - Conceitos e Aplicações - Uma Abordagem Didática
""" Escreva um programa que leia um número inteiro Q e exiba na tela os Q primeiros termos da sequência de Fibonacci,
utilizando uma função recursiva para determinar o elemento da sequência a ser exibido."""
print("Os dez primeiros termos da sequência de Fibonacci são: 0, 1, 1, 2, 3, 5, 8, 13, 21 e 34.")
Q = 0
while Q < 2:
try:
Q = int(input("\nDigite Q(>1): "))
if Q < 2:
print("\nDigite Q >= 2")
except:
print("O dado digitado deve ser um número inteiro.")
def Fibonacci(QuantTermos):
L = [0, 1]
for i in range(Q-2):
L.append(L[i] + L[i+1])
return L
print(Fibonacci(Q))
| 4.15625 | 4 |
scripts/divide_lista.py | netlabufjf/Geo-Twitter-Scripts | 0 | 12758830 | <filename>scripts/divide_lista.py
"""
Sao passados 2 parametros para esse arquivo
o primeiro e a cidade e o segundo e a quantidade pela qual se quer dividir o arquivo
"""
import os
import sys
cidade = sys.argv[1]
qtd = int(sys.argv[2])
def divide_arquivo(cidade_param, quantidade):
dir_base = os.path.abspath(os.getcwd())+"/.."
dir_cidade = "{}/data/{}".format(dir_base, cidade_param)
if not os.path.exists(dir_cidade):
os.makedirs(dir_cidade)
contador = 0
arquivos = []
for i in range(0, quantidade):
arquivos.append(open("{}/{}.id_users.list.csv".format(dir_cidade, i), "a"))
arq_principal = open(
'{}/data/grafos/{}.complete.id_users.list.csv'.format(dir_base, cidade_param), 'r')
for line in arq_principal.readlines():
# o valor de contador roda entre 0, 1, 2 ... quantidade
contador = contador % quantidade
arquivos[contador].write(line)
contador += 1
arq_principal.close()
for i in range(0, quantidade):
arquivos[i].close()
divide_arquivo(cidade, qtd)
| 3.328125 | 3 |
GetNextLyric.py | Zxmax/Get-Next-Lyrics | 0 | 12758831 | #!/usr/bin/python
# -*- coding:utf-8 -*-
from pymongo import MongoClient
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
import datetime
import json
from fuzzywuzzy import fuzz
# 一次同步的数据量,批量同步
syncCountPer = 100000
# Es 数据库地址
es_url = 'localhost:9200'
# mongodb 数据库地址
mongo_url = 'localhost:27017'
# mongod 需要同步的数据库名
DB = 'song'
# mongod 需要同步的表名
COLLECTION = 'songLyricFull'
es = Elasticsearch(es_url, port=9200)
conn = MongoClient(mongo_url, 27017)
def connect_db():
count = 0
db = conn['Song']
sl = db['songLyricFull']
syncDataLst = []
mongoRecordRes = sl.find()
for record in mongoRecordRes:
count += 1
# 因为mongodb和Es中,对于数据类型的支持是有些差异的,所以在数据同步时,需要对某些数据类型和数据做一些加工
# 删掉 url 这个字段
record.pop('url', '')
# Es中不支持 float('inf') 这个数据, 也就是浮点数的最大值
#if record['rank'] == float('inf'):
#record['rank'] = 999999999999
syncDataLst.append({
"_index": DB, # mongod数据库 == Es的index
"_type": COLLECTION, # mongod表名 == Es的type
"_id": str(record.pop('_id')),
"_source": record,
})
if len(syncDataLst) == syncCountPer:
# 批量同步到Es中,就是发送http请求一样,数据量越大request_timeout越要拉长
bulk(es, syncDataLst, request_timeout=180)
# 清空数据列表
syncDataLst[:] = []
print(f"Had sync {count} records at {datetime.datetime.now()}")
# 同步剩余部分
if syncDataLst:
bulk(es, syncDataLst, request_timeout=180)
print(f"Had sync {count} records rest at {datetime.datetime.now()}")
def search(lyric):
dsl = {
'query': {
"match_phrase": {'lyric': lyric}
}
}
dsl2={
'query': {"match": {'lyric': lyric}
}
}
result = es.search(index=DB, doc_type=COLLECTION, body=dsl)
if (len(result['hits']['hits']) == 0):
result = es.search(index=DB, doc_type=COLLECTION, body=dsl2)
if (len(result['hits']['hits']) > 0):
lyricF = result['hits']['hits'][0]['_source']['lyric']
lyricF = lyricF.split('[')
for i in range(len(lyricF)):
lyricC = lyricF[i]
if(']' in lyricC and not lyricC.endswith(']')):
lyricF[i] = lyricC[lyricC.index(']')+1:]
lyricF = [i for i in lyricF if i != '\n']
lyricF_r = []
for j in range(len(lyricF)):
ratio = fuzz.ratio(lyric, lyricF[j])
lyricF_r.append(ratio)
if(lyricF_r.index(max(lyricF_r)) == len(lyricF_r)-1):
lyricF_r[lyricF_r.index(max(lyricF_r))
] = lyricF_r[lyricF_r.index(min(lyricF_r))]
res = lyricF[lyricF_r.index(max(lyricF_r))]
res.replace(' ', '')
if(res.startswith(lyric) and len(res)-1 > len(lyric)):
res = res.replace(lyric, '',1)
return res
else:
res = lyricF[lyricF_r.index(max(lyricF_r))+1]
if(lyricF[lyricF_r.index(max(lyricF_r))] != lyric+'\n'):
res = lyricF[lyricF_r.index(max(lyricF_r))]+res
if(''' in res):
res = res.replace(''', "\'")
print(res)
return res
return "无匹配"
#connect_db()
search('更怕你永远停留在这里')
| 2.46875 | 2 |
libs/python_scripts/MetaPathways_create_reports_fast.py | ariahahn/MetaPathways_Python.3.0 | 0 | 12758832 | <reponame>ariahahn/MetaPathways_Python.3.0
#!/usr/bin/python
# File created on Nov 27 Jan 2012
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "<NAME>"
__status__ = "Release"
try:
from os import makedirs, sys, remove, path, _exit
import re, traceback, gc, resource
from optparse import OptionParser, OptionGroup
from glob import glob
from libs.python_modules.taxonomy.LCAComputation import *
from libs.python_modules.taxonomy.MeganTree import *
from libs.python_modules.utils.metapathways_utils import parse_command_line_parameters,\
fprintf, printf, eprintf, GffFileParser, exit_process, getShortORFId, getSampleNameFromContig, ShortenORFId, ShortenContigId
from libs.python_modules.utils.sysutil import getstatusoutput, pathDelim
from libs.python_modules.utils.utils import *
from libs.python_modules.utils.errorcodes import *
except:
print(""" Could not load some user defined module functions""")
print(""" Make sure your typed 'source MetaPathwaysrc'""")
print(traceback.print_exc(10))
sys.exit(3)
usage= sys.argv[0] + """ -d dbname1 -b parsed_blastout_for_database1 [-d dbname2 -b parsed_blastout_for_database2 ] --input-annotated-gff input.gff """
PATHDELIM = pathDelim()
errorcode=16
parser=None
def createParser():
global parser
epilog = """Report tables summarizing and listing the functional and taxonomic annotation for all the ORFs in a sample are computed.The results are dropped in the folder <output_dir>"""
epilog = re.sub(r'\s+', ' ',epilog)
parser = OptionParser(usage = usage, epilog = epilog)
parser.add_option("-a", "--algorithm", dest="algorithm", default="BLAST", help="algorithm BLAST or LAST" )
parser.add_option("-b", "--blastoutput", dest="input_blastout", action='append', default=[],
help='blastout files in TSV format [at least 1 REQUIRED]')
parser.add_option("-d", "--dbasename", dest="database_name", action='append', default=[],
help='the database names [at least 1 REQUIRED]')
parser.add_option("-D", "--blastdir", dest="blastdir", default=None,
help='the blast dir where all the BLAST outputs are located')
parser.add_option("-s", "--samplename", dest="sample_name", default=None,
help='the sample name')
cutoffs_group = OptionGroup(parser, 'Cuttoff Related Options')
cutoffs_group.add_option("--min_score", dest="min_score", type='float', default=20,
help='the minimum bit score cutoff [default = 20 ] ')
cutoffs_group.add_option("--max_evalue", dest="max_evalue", type='float', default=1e-6,
help='the maximum E-value cutoff [ default = 1e-6 ] ')
cutoffs_group.add_option("--min_length", dest="min_length", type='float', default=30,
help='the minimum length of query cutoff [default = 30 ] ')
cutoffs_group.add_option("--max_length", dest="max_length", type='float', default=10000,
help='the maximum length of query cutoff [default = 10000 ] ')
cutoffs_group.add_option("--min_identity", dest="min_identity", type='float', default=20,
help='the minimum identity of query cutoff [default 30 ] ')
cutoffs_group.add_option("--max_identity", dest="max_identity", type='float', default=100,
help='the maximum identity of query cutoff [default = 100 ] ')
cutoffs_group.add_option("--limit", dest="limit", type='float', default=5,
help='max number of hits per query cutoff [default = 5 ] ')
cutoffs_group.add_option("--min_bsr", dest="min_bsr", type='float', default=0.0,
help='minimum BIT SCORE RATIO [default = 0.30 ] ')
parser.add_option_group(cutoffs_group)
output_options_group = OptionGroup(parser, 'Output table Options')
output_options_group.add_option("--ncbi-taxonomy-map", dest="ncbi_taxonomy_map", action='append', default=[],
help='add the ncbi taxonomy map ')
output_options_group.add_option("--ncbi-megan-map", dest="ncbi_megan_map", default=None,
help='megan map file of preferred taxonomy names')
output_options_group.add_option( "--input-cog-maps", dest="input_cog_maps",
help='input cog maps file')
output_options_group.add_option( "--subsystems2peg-file", dest="subsystems2peg_file", default = False,
help='the subsystems to peg file from fpt.theseed.org')
output_options_group.add_option( "--input-kegg-maps", dest="input_kegg_maps",
help='input kegg maps file')
output_options_group.add_option( "--input-cazy-maps", dest="input_cazy_maps",
help='input cazy maps file')
output_options_group.add_option( "--input-seed-maps", dest="input_seed_maps",
help='input seed maps file')
output_options_group.add_option('--input-annotated-gff', dest='input_annotated_gff',
metavar='INPUT', help='Annotated gff file [REQUIRED]')
output_options_group.add_option('--output-dir', dest='output_dir',
metavar='INPUT', help='Output directory [REQUIRED]')
output_options_group.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print lots of information to the stdout [default Off]")
parser.add_option_group(output_options_group)
lca_options_group = OptionGroup(parser, 'LCA algorithm Options')
lca_options_group.add_option("--lca-min-score", dest="lca_min_score", type='float', default=50,
help='minimum BLAST/LAST score to consider as for LCA rule')
lca_options_group.add_option("--lca-top-percent", dest="lca_top_percent", type='float', default=10,
help='set of considered matches are within this percent of the highest score hit')
lca_options_group.add_option("--lca-min-support", dest="lca_min_support", type='int', default=2,
help='minimum number of reads that must be assigned to a taxon for ' +\
'that taxon to be present otherwise move up the tree until there ' +
'is a taxon that meets the requirement')
lca_options_group.add_option("--lca-gi-to-taxon-map", dest="accession_to_taxon_map", type='str', default=None,
help='accession to taxon map')
parser.add_option_group(lca_options_group)
compact_io_options_group = OptionGroup(parser, 'Compact Input/Output Options')
compact_io_options_group.add_option( "--compact_output", dest="compact_output", action='store_true', default=False,
help='compact output [OPTIONAL]')
compact_io_options_group.add_option( "--compact_input", dest="compact_input", action='store_true', default=False,
help='compact input [OPTIONAL]')
parser.add_option_group(compact_io_options_group)
def printlist(list, lim):
i = 0;
for item in list:
i += 1
if i > lim:
break
def check_arguments(opts, args):
return True
if len(opts.input_blastout) == 0:
print("There sould be at least one blastoutput file")
return False
if len(opts.database_name) == 0:
print("There sould be at least one database name")
return False
if len(opts.input_blastout) != len(opts.database_name) :
print("The number of database names, blastoutputs files should be equal")
return False
if opts.input_annotated_gff == None:
print("Must specify the input annotated gff file")
return False
if opts.output_dir == None:
print("Must specify the output dir")
return False
return True
def process_gff_file(gff_file_name, orf_dictionary):
try:
gfffile = open(gff_file_name, 'r')
except IOError:
print("Cannot read file " + gff_file_name + " !")
gff_lines = gfffile.readlines()
gff_beg_pattern = re.compile("^#")
gfffile.close()
for line in gff_lines:
line = line.strip()
if gff_beg_pattern.search(line):
continue
insert_orf_into_dict(line, orf_dictionary)
def create_dictionary(databasemapfile, annot_map):
seq_beg_pattern = re.compile(">")
dbmapfile = open( databasemapfile,'r')
lines=dbmapfile.readlines()
dbmapfile.close()
for line in lines:
if seq_beg_pattern.search(line):
words = line.rstrip().split()
name = words[0].replace('>','',1)
words.pop(0)
annotation = ' '.join(words)
annot_map[name]= annotation
def copyList(a, b):
[ b.append(x) for x in a ]
def get_species(hit):
if not 'product' in hit:
return None
species = []
try:
m = re.findall(r'\[([^\[]+)\]', hit['product'])
if m != None:
copyList(m,species)
except:
return None
if species:
return species
else:
return None
def create_annotation(results_dictionary, dbname, annotated_gff, output_dir, Taxons, orfsPicked, orfToContig, lca, compact_output= False, sample_name = ""):
meganTree = None
#lca.set_results_dictionary(results_dictionary)
if not path.exists(output_dir):
makedirs(output_dir)
orf_dictionary={}
#process_gff_file(annotated_gff, orf_dictionary)
gffreader = GffFileParser(annotated_gff)
output_table_name = output_dir + PATHDELIM + sample_name + ".functional_and_taxonomic_table.txt"
output_table_file = open(output_table_name, 'a')
count = 0
for contig in gffreader:
# shortORFId = getShortORFId(orf['id'])
for orf in gffreader.orf_dictionary[contig]:
shortORFId = getShortORFId(orf['id'])
count +=1
#shortORFId = ShortenORFId(orf['id'])
if shortORFId not in orfsPicked:
continue
orfToContig[shortORFId] = contig
taxonomy = None
#_results = re.search(r'refseq', opts_global.database_name, re.I)
if shortORFId in Taxons:
taxonomy1=Taxons[shortORFId]
#print taxonomy1, shortORFId
taxonomy_id=lca.get_supported_taxon(taxonomy1, return_id=True)
# print taxonomy_id
preferred_taxonomy = lca.get_preferred_taxonomy(taxonomy_id)
if preferred_taxonomy:
taxonomy = preferred_taxonomy
else:
taxonomy = Taxons[shortORFId]
else:
taxonomy = 'root'
product = orf['product'] # leave product as it is
# product = re.sub(r'\[{1,2}.+?\]{1,2}','', orf['product']).strip()
# product = re.sub(r'\[[^\[]+?\]','', orf['product']).strip()
# if "partial" in orf['product']:
# print orf['product'].strip()
# print product
orf_id = orf['id']
seqname = orf['seqname']
if compact_output:
orf_id = ShortenORFId(orf_id)
seqname = ShortenContigId(seqname)
fprintf(output_table_file, "%s", orf_id)
fprintf(output_table_file, "\t%s", orf['orf_length'])
fprintf(output_table_file, "\t%s", orf['start'])
fprintf(output_table_file, "\t%s", orf['end'])
fprintf(output_table_file, "\t%s", seqname)
fprintf(output_table_file, "\t%s", orf['contig_length'])
fprintf(output_table_file, "\t%s", orf['strand'])
fprintf(output_table_file, "\t%s", orf['ec'])
# fprintf(output_table_file, "\t%s", str(species))
fprintf(output_table_file, "\t%s", taxonomy)
fprintf(output_table_file, "\t%s\n", product)
# adding taxons to the megan tree
#if meganTree and taxonomy != '':
# meganTree.insertTaxon(taxonomy)
#print meganTree.getChildToParentMap()
output_table_file.close()
# this prints out the megan tree
# if meganTree:
# megan_tree_file = open(output_dir + '/megan_tree.tre', 'w')
# fprintf(megan_tree_file, "%s;", meganTree.printTree('1'))
# megan_tree_file.close()
#write_annotation_for_orf(outputgff_file, candidatedbname, dbname_weight, results_dictionary, orf_dictionary, contig, candidate_orf_pos, orf['id'])
def remove_repeats(filtered_words):
word_dict = {}
newlist = []
for word in filtered_words:
if not word in word_dict:
if not word in ['', 'is', 'have', 'has', 'will', 'can', 'should', 'in', 'at', 'upon', 'the', 'a', 'an', 'on', 'for', 'of', 'by', 'with' ,'and', '>' ]:
word_dict[word]=1
newlist.append(word)
return ' '.join(newlist)
class BlastOutputTsvParser(object):
def __init__(self, dbname, blastoutput):
self.lineToProcess = ""
self.dbname = dbname
self.blastoutput = blastoutput
self.i=0
self.SIZE = 10000
self._size = 0
self.data = {}
self.fieldmap={}
self.seq_beg_pattern = re.compile("^#")
self.lines = []
self.headerline = None
self.MAX_READ_ERRORS_ALLOWED = 0
self.ERROR_COUNT = 0
self.STEP_NAME = 'CREATE_REPORT_FILES' #PARSE_BLAST'
self.error_and_warning_logger = None
try:
self.blastoutputfile = open( blastoutput,'r')
line = self.blastoutputfile.readline()
if not self.seq_beg_pattern.search(line) :
eprintf("First line must have field header names and begin with \"#\"\n")
exit_process()
self.headerline = line.strip()
self.lineToProcess = self.headerline
header = re.sub('^#','',line)
fields = [ x.strip() for x in header.rstrip().split('\t')]
k = 0
for x in fields:
self.fieldmap[x] = k
k += 1
except AttributeError:
print("Cannot read the map file for database :" + dbname)
sys.exit(0)
def setMaxErrorsLimit(self, max):
self.MAX_READ_ERRORS_ALLOWED = max
def setErrorAndWarningLogger(self, logger):
self.error_and_warning_logger = logger
def setSTEP_NAME(self, step_name):
self.STEP_NAME = step_name
def getHeaderLine(self):
return self.headerline
def getProcessedLine(self):
return self.lineToProcess
def refillBuffer(self):
i = 0
self.lines = []
while i < self.SIZE:
line = self.blastoutputfile.readline().strip()
if not line:
break
if self.seq_beg_pattern.search(line):
continue
self.lines.append(line)
i += 1
self._size = len(self.lines)
def rewind(self):
self.i = self.i - 1
def __iter__(self):
return self
def next(self):
if self.i % self.SIZE == 0:
self.refillBuffer()
if len(self.lines)==0:
raise StopIteration()
if self.i % self.SIZE < self._size:
fields = [ x.strip() for x in self.lines[self.i % self.SIZE].split('\t')]
try:
self.data = {}
self.data['query'] = fields[self.fieldmap['query']]
self.data['q_length'] = int(fields[self.fieldmap['q_length']])
self.data['bitscore'] = float(fields[self.fieldmap['bitscore']])
self.data['bsr'] = float(fields[self.fieldmap['bsr']])
self.data['target'] = fields[self.fieldmap['target']]
self.data['aln_length'] = float(fields[self.fieldmap['aln_length']])
self.data['expect'] = float(fields[self.fieldmap['expect']])
self.data['identity'] = float(fields[self.fieldmap['identity']])
self.data['ec'] = fields[self.fieldmap['ec']]
self.data['product'] = re.sub(r'=',' ',fields[self.fieldmap['product']])
self.lineToProcess = self.lines[self.i % self.SIZE]
except:
self.ERROR_COUNT += 1
if self.MAX_READ_ERRORS_ALLOWED > self.ERROR_COUNT:
eprintf("%s\tWARNING\till-formatted line \"%s\" \t %s\n", self.STEP_NAME, self.lines[self.i % self.SIZE], self.blastoutput)
if self.error_and_warning_logger != None:
self.error_and_warning_logger.write("%s\tWARNING\till-formatted line :\"%s\" \t source : %s\n" %(self.STEP_NAME, re.sub(r'\t', '<tab>', self.lines[self.i % self.SIZE]) , self.blastoutput))
self.i = self.i + 1
self.next()
else:
if self.error_and_warning_logger != None:
self.error_and_warning_logger.write("%s\tERROR\tThe number of lines in file %s exceeded the max tolerance %d\n" %(self.blastoutput, self.MAX_READ_ERRORS_ALLOWED) )
exit_process()
self.i = self.i + 1
return self.data
else:
self.lineToProcess = None
self.blastoutputfile.close()
raise StopIteration()
def isWithinCutoffs(data, cutoffs):
try:
if data['q_length'] < cutoffs.min_length:
return False
if data['bitscore'] < cutoffs.min_score:
return False
if data['expect'] > cutoffs.max_evalue:
return False
if data['identity'] < cutoffs.min_identity:
return False
if data['bsr'] < cutoffs.min_bsr:
return False
except:
print(traceback.print_exc())
# print cutoffs
sys.exit(0)
return True
def process_parsed_blastoutput(dbname, blastparser, cutoffs, annotation_results, pickorfs, callnum=0):
fields = ['target', 'q_length', 'bitscore', 'bsr', 'expect', 'identity', 'ec', 'query' ]
fields.append('product')
try:
hits = 0
for data in blastparser:
if re.search(r'xxrefseq', dbname) and callnum==2:
print('refseq process', data)
if data!=None and isWithinCutoffs(data, cutoffs) :
#if dbname=='refseq-nr-2014-01-18':
# print 'refseq process', data
# if dbname=='refseq':
#if "partial" in data['product']:
# print data['query'] + '\t' + str(data['q_length']) +'\t' + str(data['bitscore']) +'\t' + str(data['expect']) +'\t' + str(data['identity']) + '\t' + str(data['bsr']) + '\t' + data['ec'] + '\t' + data['product']
annotation = {}
shortORFId = None
for field in fields:
if field in data:
if field == 'query':
shortORFId = getShortORFId(data[field])
annotation[field] = shortORFId
else:
annotation[field] = data[field]
if not shortORFId in pickorfs:
continue
# blastparser.rewind()
# return None
annotation['dbname'] = dbname
if not shortORFId in annotation_results:
annotation_results[shortORFId] = []
annotation_results[shortORFId].append(annotation)
#if callnum==2 and 'refseq' in dbname:
# print(shortORFId, annotation)
except:
print (traceback.print_exc())
#if dbname=='refseq-nr-2014-01-18':
# print 'annot refseq process', len(annotation_results)
return None
def beginning_valid_field(line):
fields = [ x.strip() for x in line.split('\t') ]
count =0
for field in fields:
if len(field) > 0:
return count
count+=1
return -1
# creates an empty hierarchical tree with zeros at the lowest count
def read_map_file(dbname_map_filename, field_to_description, hierarchical_map) :
try:
map_file = open(dbname_map_filename, 'r')
map_filelines = map_file.readlines()
except:
eprintf("ERROR: Cannot open file %s\n", dbname_map_filename)
exit_process()
tempfields = [ '', '', '', '', '', '', '' ]
for line in map_filelines:
pos = beginning_valid_field(line)
if pos==-1:
continue
fields = [ x.strip() for x in line.split('\t') ]
tempfields[pos] = fields[pos]
if len(fields) > pos + 1:
field_to_description[fields[pos]] = fields[pos+1]
else:
field_to_description[fields[pos]] = fields[pos]
i=0
temp_hierarchical_map = hierarchical_map
while i < pos :
temp_hierarchical_map = temp_hierarchical_map[ tempfields[i] ]
i+=1
temp_hierarchical_map[ tempfields[i] ] = {}
fill_hierarchy_with_zeroes(hierarchical_map)
def fill_hierarchy_with_zeroes(dictionary):
for key in dictionary.keys():
if len(dictionary[key]) ==0 :
dictionary[key] = 0
else:
fill_hierarchy_with_zeroes(dictionary[key])
def cog_id(product):
results = re.search(r'COG[0-9][0-9][0-9][0-9]', product)
cog_id = ''
if results:
cog_id=results.group(0)
return cog_id
def seed_id(product):
seed_id = re.sub(r'\[[^\[]+\]', '', product)
return seed_id
def cazy_id(product):
""" Strips looks for the the cazy id"""
results = re.search(r'^#\s+(\S+)\s+#', product)
cazy_id = ''
if results:
cazy_id=results.group(1)
return cazy_id
def kegg_id(product):
""" Strips out the KO id from the product"""
results = re.search(r'K[0-9][0-9][0-9][0-9][0-9]', product)
kegg_id = ''
if results:
kegg_id=results.group(0)
return kegg_id
def create_table(results, std_dbname, output_dir, hierarchical_map, field_to_description):
if not path.exists(output_dir):
makedirs(output_dir)
#print field_to_description
orthology_count = {}
for key in field_to_description[std_dbname]:
orthology_count[key] = 0
#print hierarchical_map
for seqname in results:
for orf in results[seqname]:
if std_dbname =='seed':
seed = re.sub(r'\[.*\]','', orf['product']).strip()
if seed in orthology_count:
orthology_count[seed]+=1
if std_dbname =='cog':
cog = cog_id(orf['product'])
if cog in orthology_count:
orthology_count[cog]+=1
if std_dbname =='kegg':
kegg = kegg_id(orf['product'])
if kegg in orthology_count:
orthology_count[kegg]+=1
# print orthology_count.keys()
add_counts_to_hierarchical_map(hierarchical_map[std_dbname], orthology_count)
def print_kegg_cog_tables(dbname, output_dir, hierarchical_map, field_to_description, filePermType = 'w', sample_name = "" ):
if dbname=='cog':
outputfile = open( output_dir + PATHDELIM + sample_name + '.COG_stats_1.txt', filePermType)
print_counts_at_level(hierarchical_map[dbname], field_to_description[dbname], 0, 0, outputfile, printKey=False,\
header="Functional Category\tGene Count")
outputfile.close()
outputfile = open( output_dir + PATHDELIM + sample_name +'.COG_stats_2.txt', filePermType)
print_counts_at_level(hierarchical_map[dbname], field_to_description[dbname], 0, 1, outputfile,\
header="Function Abbr\tFunctional Category\tGene Count")
outputfile.close()
outputfile = open( output_dir + PATHDELIM + sample_name +'.COG_stats_3.txt', filePermType)
print_counts_at_level(hierarchical_map[dbname], field_to_description[dbname], 0, 2, outputfile,\
header="COGID\tFunction\tGene Count")
outputfile.close()
if dbname=='kegg':
outputfile = open( output_dir + PATHDELIM + sample_name +'.KEGG_stats_1.txt', filePermType)
print_counts_at_level(hierarchical_map[dbname], field_to_description[dbname], 0, 0, outputfile, printKey=False,\
header="Function Category Level 1\tGene Count")
outputfile.close()
outputfile = open( output_dir + PATHDELIM + sample_name +'.KEGG_stats_2.txt', filePermType)
print_counts_at_level(hierarchical_map[dbname], field_to_description[dbname], 0, 1, outputfile, printKey=False,\
header="Function Category Level 2a\tGene Count")
outputfile.close()
outputfile = open( output_dir + PATHDELIM + sample_name +'.KEGG_stats_3.txt', filePermType)
print_counts_at_level(hierarchical_map[dbname], field_to_description[dbname], 0, 2, outputfile,\
header="ID\tFunction Category Level 3\tGene Count" )
outputfile.close()
outputfile = open( output_dir + PATHDELIM + sample_name +'.KEGG_stats_4.txt', filePermType)
print_counts_at_level(hierarchical_map[dbname], field_to_description[dbname], 0, 3, outputfile,\
header="KO\tFunction Category Level 4\tGene Count")
outputfile.close()
def print_counts_at_level(hierarchical_map, field_to_description, depth, level, outputfile, printKey=True, header=None):
if type(hierarchical_map) is type(0):
return hierarchical_map
if header:
fprintf(outputfile, "%s\n",header )
count = 0
for key in hierarchical_map:
tempcount = print_counts_at_level(hierarchical_map[key],field_to_description, depth+1, level, outputfile, printKey=printKey)
if depth==level:
if key in field_to_description:
if printKey:
fprintf(outputfile, "%s\n", key + '\t' + field_to_description[key] + '\t' + str(tempcount) )
else:
fprintf(outputfile, "%s\n", field_to_description[key] + '\t' + str(tempcount) )
else:
if printKey:
fprintf(outputfile, "%s\n", key + '\t' + ' ' + '\t' + str(tempcount))
else:
fprintf(outputfile, "%s\n", key + '\t' + str(tempcount))
count+=tempcount
return count
# this function adds the count into the hierarchical map at the lowest level
def add_counts_to_hierarchical_map(hierarchical_map, orthology_count):
try:
for key in hierarchical_map:
if type(hierarchical_map[key])==int:
if key in orthology_count:
hierarchical_map[key]+=int(orthology_count[key])
else:
add_counts_to_hierarchical_map(hierarchical_map[key], orthology_count)
except:
traceback.print_exc()
print(len(hierarchical_map[key]))
sys.exit(0)
def get_list_of_queries(annotated_gff):
orfList = {}
gffreader = GffFileParser(annotated_gff)
count = 0
for contig in gffreader:
for orf in gffreader.orf_dictionary[contig]:
orfid = getShortORFId(orf['id'])
orfList[orfid] = 1
count += 1
# if count%500000==0:
# print count
return orfList.keys()
def Heapify(A, i, S):
while True:
l = 2*i + 1
r = l + 1
max = i
if l < S and A[l][1] < A[max][1]: # was >
max = l
if r < S and A[r][1] < A[max][1]: # was >
max = r
if max != i and i < S:
temp = A[i]
A[i] = A[max]
A[max] = temp
else:
break
i = max
def BuildHeap(S, A):
i = int(S/2)
while i >= 0:
Heapify(A, i, S)
i = i - 1
def writeParsedLines(fieldmapHeaderline, parsedLines, list, names, outputfilename):
try:
outputfile = open(outputfilename, 'w')
except OSError:
print("ERROR: Cannot create sequence file : " + outputfilename)
sys.exit(0)
outputStr=fieldmapHeaderline + "\n"
fprintf(outputfile, "%s", outputStr)
outputStr=""
i = 0
for item in list:
outputStr += parsedLines[item[0]]+'\n'
if i% 1000==0 and i > 0:
fprintf(outputfile, "%s", outputStr)
outputStr=""
i += 1
if len(outputStr) > 0:
fprintf(outputfile, "%s", outputStr)
outputfile.close()
def merge_sorted_parsed_files(dbname, filenames, outputfilename, verbose=False, errorlogger = None):
linecount = 0
readerhandles = []
if verbose:
eprintf("Processing database : %s\n", dbname)
if len(filenames)==0:
eprintf("WARNING : Cannot find any B/LAST output file for database : %\n", dbname)
exit_process()
#if dbname=='kegg-pep-2011-06-18':
# print filenames
try:
for i in range(len(filenames)):
#print filenames
readerhandles.append(BlastOutputTsvParser(dbname, filenames[i]) )
except OSError:
eprintf("ERROR: Cannot read sequence file : %s\n", filenames[i])
exit_process()
# set error and warning parameters
for readerhandle in readerhandles:
readerhandle.setMaxErrorsLimit(5)
readerhandle.setErrorAndWarningLogger(errorlogger)
readerhandle.setSTEP_NAME('PARSE BLAST')
try:
outputfile = open(outputfilename, 'w')
fieldmapHeaderLine = readerhandles[0].getHeaderLine()
fprintf(outputfile, "%s\n",fieldmapHeaderLine)
except OSError:
eprintf("ERROR: Cannot create sequence file : %s\n", outputfilename)
exit_process()
values = []
for i in range(len(filenames)):
iterate = iter(readerhandles[i])
try :
next(iterate)
line = readerhandles[i].getProcessedLine()
fields = [ x.strip() for x in line.split('\t') ]
shortORFId = getShortORFId(fields[0])
values.append( (i, shortORFId, line) )
except:
outputfile.close()
return
S = len(filenames)
BuildHeap(S, values)
missingcount =0
count =0
while S>0:
try:
iterate = iter(readerhandles[values[0][0]])
line = readerhandles[values[0][0]].getProcessedLine()
fields = [ x.strip() for x in line.split('\t') ]
#print fields[0], orfRanks[fields[0]]
fprintf(outputfile, "%s\n",line)
next(iterate)
line = readerhandles[values[0][0]].getProcessedLine()
fields = [ x.strip() for x in line.split('\t') ]
shortORFId = getShortORFId(fields[0])
# if dbname=='kegg-pep-2011-06-18':
# print shortORFId
values[0] = (values[0][0], shortORFId, line)
count+=1
except:
# if dbname=='kegg-pep-2011-06-18':
# print 'finished ' , line
# print traceback.print_exc(10)
values[0] = values[S-1]
S = S - 1
if S>0:
Heapify(values, 0, S)
#print 'line count ' + str(linecount)
outputfile.close()
def create_sorted_parse_blast_files(dbname, blastoutput, listOfOrfs, size = 100000, verbose=False, errorlogger= None):
orfRanks = {}
count = 0
for orf in listOfOrfs:
orfRanks[orf] = count
count += 1
sorted_parse_file = blastoutput + ".tmp"
currSize = 0
parsedLines={}
list = []
names = {}
seqid =0
batch = 0
filenames = []
if verbose:
eprintf("\n\n\n")
eprintf("dbname : %s\n", dbname)
eprintf("Parsed file : %s\n", blastoutput)
blastparser = BlastOutputTsvParser(dbname, blastoutput)
blastparser.setMaxErrorsLimit(5)
blastparser.setErrorAndWarningLogger(errorlogger)
fieldmapHeaderLine = blastparser.getHeaderLine()
for data in blastparser:
# query = getShortORFId(data['query'])
query = data['query']
#names[seqid] = data['query']
#print query, data['query']
names[seqid] = query
parsedLines[seqid] = blastparser.getProcessedLine()
list.append( (seqid, names[seqid]) )
seqid +=1
currSize += 1
if currSize % size ==0:
list.sort(key=lambda tup: tup[1], reverse=False)
#print "Num of lines writing to file " + sorted_parse_file + "." + str(batch) + " : " + str(len(list))
writeParsedLines(fieldmapHeaderLine, parsedLines, list, names, sorted_parse_file + "." + str(batch))
filenames.append(sorted_parse_file + "." + str(batch))
batch += 1
list = []
names = {}
seqid =0
parsedLines = {}
if currSize == 0:
list.sort(key=lambda tup: tup[1], reverse=False)
writeParsedLines(fieldmapHeaderLine, parsedLines, list, names, sorted_parse_file + "." + str(batch))
filenames.append(sorted_parse_file + "." + str(batch))
else :
if currSize % size !=0 :
list.sort(key=lambda tup: tup[1], reverse=False)
writeParsedLines(fieldmapHeaderLine, parsedLines, list, names, sorted_parse_file + "." + str(batch))
filenames.append(sorted_parse_file + "." + str(batch))
if verbose:
eprintf("Number of lines : %s\n", str(currSize))
eprintf("Merge the files : %s\n" , sorted_parse_file)
eprintf("Number of files to merge : %s\n", str(len(filenames)))
try:
merge_sorted_parsed_files(dbname, filenames, sorted_parse_file, verbose = verbose, errorlogger = errorlogger)
except:
pass
# if dbname=='kegg-pep-2011-06-18':
# print 'finished ' , line
# print traceback.print_exc(10)
#if dbname=='kegg-pep-2011-06-18':
# sys.exit(0)
#remove the split files
for file in filenames:
remove(file)
def getBlastFileNames(opts) :
database_names = []
parsed_blastouts = []
weight_dbs = []
dbnamePATT = re.compile(r'' + opts.blastdir + '*' + opts.sample_name + '*[.](.*)[.]' + opts.algorithm.upper() + 'out.parsed.txt')
blastOutNames = glob(opts.blastdir + '*' + opts.algorithm.upper() + 'out.parsed.txt')
for blastoutname in blastOutNames :
result = dbnamePATT.search(blastoutname)
if result:
dbname = result.group(1)
database_names.append(dbname)
parsed_blastouts.append(blastoutname)
weight_dbs.append(1)
return database_names, parsed_blastouts, weight_dbs
opts_global = ""
# the main function
def main(argv, errorlogger = None, runstatslogger = None):
global parser
(opts, args) = parser.parse_args(argv)
global opts_global
opts_global = opts
if not check_arguments(opts, args):
print(usage)
sys.exit(0)
db_to_map_Maps = {'cog':opts.input_cog_maps, 'seed':opts.input_seed_maps, 'kegg':opts.input_kegg_maps, 'cazy':opts.input_cazy_maps}
results_dictionary={}
dbname_weight={}
checkOrCreateFolder(opts.output_dir)
output_table_name = opts.output_dir + PATHDELIM +'functional_and_taxonomic_table.txt'
if path.exists(output_table_name):
remove(output_table_name)
output_table_name = opts.output_dir + PATHDELIM + opts.sample_name + ".functional_and_taxonomic_table.txt"
if path.exists(output_table_name):
remove(output_table_name)
output_table_file = open(output_table_name, 'w')
fprintf(output_table_file, "ORF_ID\tORF_length\tstart\tend\tContig_Name\tContig_length\tstrand\tec\ttaxonomy\t product\n")
output_table_file.close()
# print "memory used = %s" %(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss /1000000)
listOfOrfs = get_list_of_queries(opts.input_annotated_gff)
listOfOrfs.sort(key=lambda tup: tup, reverse=False)
if opts.blastdir !=None and opts.sample_name != None:
try:
database_names, input_blastouts, weight_dbs = getBlastFileNames(opts)
except:
print(traceback.print_exc(10))
pass
else:
database_names = opts.database_name
input_blastouts = opts.input_blastout
weight_dbs = opts.weight_db
##### uncomment the following lines
for dbname, blastoutput in zip(database_names, input_blastouts):
create_sorted_parse_blast_files(dbname, blastoutput, listOfOrfs, verbose= opts.verbose, errorlogger = errorlogger)
#####
# process in blocks of size _stride
lca = LCAComputation(opts.ncbi_taxonomy_map, opts.ncbi_megan_map)
lca.setParameters(opts.lca_min_score, opts.lca_top_percent, opts.lca_min_support)
print(opts.accession_to_taxon_map)
#if opts.accession_to_taxon_map:
# lca.load_accession_to_taxon_map(opts.accession_to_taxon_map)
blastParsers={}
for dbname, blastoutput in zip( database_names, input_blastouts):
blastParsers[dbname] = BlastOutputTsvParser(dbname, blastoutput + '.tmp')
blastParsers[dbname].setMaxErrorsLimit(5)
blastParsers[dbname].setErrorAndWarningLogger(errorlogger)
# this part of the code computes the occurence of each of the taxons
# which is use in the later stage is used to evaluate the min support
# as used in the MEGAN software
start = 0
Length = len(listOfOrfs)
_stride = 5000000
Taxons = {}
while start < Length:
pickorfs= {}
last = min(Length, start + _stride)
for i in range(start, last):
pickorfs[listOfOrfs[i]]= 'root'
start = last
#print 'Num of Min support orfs ' + str(start)
results_dictionary={}
for dbname, blastoutput in zip(database_names, input_blastouts):
results = re.search(r'refseq', dbname, re.I)
if results:
#if True:
try:
results_dictionary[dbname]={}
eprintf("Scanning database : %s...", dbname)
process_parsed_blastoutput(dbname, blastParsers[dbname], opts, results_dictionary[dbname], pickorfs, callnum=1)
lca.set_results_dictionary(results_dictionary)
lca.compute_min_support_tree(opts.input_annotated_gff, pickorfs, dbname = dbname )
for key, taxon in pickorfs.iteritems():
Taxons[key] = taxon
except:
eprintf("ERROR: while training for min support tree %s\n", dbname)
insert_error(errorcode)
traceback.print_exc()
for dbname in results_dictionary.keys():
print("number of collected in block hits in {}: {}".format(dbname, len(results_dictionary[dbname].keys())))
# this loop determines the actual/final taxonomy of each of the ORFs
# taking into consideration the min support
blastParsers={}
for dbname, blastoutput in zip(database_names, input_blastouts):
blastParsers[dbname] = BlastOutputTsvParser(dbname, blastoutput + '.tmp')
filePermTypes= {}
start = 0
outputfile = open( opts.output_dir + PATHDELIM + opts.sample_name + '.ORF_annotation_table.txt', 'w')
short_to_long_dbnames = {}
for dbname in database_names:
results = re.search(r'^seed', dbname, re.IGNORECASE)
if results:
short_to_long_dbnames['seed'] = dbname
results = re.search(r'^eggnog', dbname, re.IGNORECASE)
if results:
short_to_long_dbnames['cog'] = dbname
results = re.search(r'^kegg', dbname, re.IGNORECASE)
if results:
short_to_long_dbnames['kegg'] = dbname
results = re.search(r'^cazy', dbname, re.IGNORECASE)
if results:
short_to_long_dbnames['cazy'] = dbname
standard_dbs = ['cog', 'seed', 'kegg', 'cazy' ]
standard_db_maps = [opts.input_cog_maps, opts.input_seed_maps, opts.input_kegg_maps, opts.input_cazy_maps]
field_to_description = {}
hierarchical_map = {}
for db in standard_dbs:
if db in short_to_long_dbnames:
field_to_description[db] = {}
hierarchical_map[db] = {}
for dbname in standard_dbs:
if dbname in short_to_long_dbnames:
try:
read_map_file(db_to_map_Maps[dbname], field_to_description[dbname], hierarchical_map[dbname])
except:
raise
pass
while start < Length:
pickorfs= {}
last = min(Length, start + _stride)
for i in range(start, last):
pickorfs[listOfOrfs[i]]= True
start = last
gc.collect()
eprintf("\nMemory used = %s MB\n", str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1000000))
results_dictionary={}
for dbname, blastoutput in zip( database_names, input_blastouts):
try:
results_dictionary[dbname]={}
eprintf("Processing database : %s...", dbname)
process_parsed_blastoutput(dbname, blastParsers[dbname], opts, results_dictionary[dbname], pickorfs, callnum=2)
eprintf("done\n")
except:
traceback.print_exc()
eprintf("ERROR: %s\n", dbname)
pass
# print dbname + ' ' + str(len(results_dictionary[dbname]))
for dbname in results_dictionary.keys():
print("number of collected hits in {}: {}".format(dbname, len(results_dictionary[dbname].keys())))
eprintf("Num orfs processed : %s\n", str(start))
# create the annotations now
orfToContig = {}
create_annotation(results_dictionary, database_names, opts.input_annotated_gff, opts.output_dir, Taxons, pickorfs, orfToContig, lca, compact_output=opts.compact_output, sample_name =opts.sample_name )
for std_dbname, db_map_filename in zip(standard_dbs, standard_db_maps):
if std_dbname in short_to_long_dbnames:
create_table(results_dictionary[short_to_long_dbnames[std_dbname]], std_dbname, opts.output_dir, hierarchical_map, field_to_description)
# create_table(results_dictionary[dbname], opts.input_kegg_maps, 'kegg', opts.output_dir, filePermType)
print_orf_table(results_dictionary, orfToContig, opts.output_dir, outputfile, compact_output= opts.compact_output)
# comment these lines out if you want to generate the KEGG and COG reports
#for std_dbname, db_map_filename in zip(standard_dbs, standard_db_maps):
# if std_dbname in short_to_long_dbnames:
# print_kegg_cog_tables(std_dbname, opts.output_dir, hierarchical_map, field_to_description, filePermType = 'w', sample_name = opts.sample_name)
outputfile.close()
# now remove the temporary files
for dbname, blastoutput in zip( database_names, input_blastouts):
try:
remove( blastoutput + '.tmp')
pass
except:
pass
def refseq_id(product):
results = re.search(r'gi\|[0-9.]*', product)
refseq_id = ''
if results:
refseq_id=results.group(0)
return refseq_id
def process_subsys2peg_file(subsystems2peg, subsystems2peg_file):
try:
orgfile = open(subsystems2peg_file,'r')
except IOError:
print("Cannot open " + str(subsystems2peg_file))
insert_error(errorcode)
lines = orgfile.readlines()
orgfile.close()
for line in lines:
hits = line.split('\t')
if len(hits) > 2:
subsystems2peg[hits[2]]=hits[1]
try:
orgfile.close()
except:
insert_error(errorcode)
print("Cannot close " + str(subsystems2peg_file))
halt =0
def print_orf_table(results, orfToContig, output_dir, outputfile, compact_output=False):
addHeader =True
if not path.exists(output_dir):
makedirs(output_dir)
orf_dict = {}
for dbname in results.keys():
print("number of hits in {}: {}".format(dbname, len(results[dbname].keys())))
for orfname in results[dbname]:
for orf in results[dbname][orfname]:
if not orf['query'] in orf_dict:
orf_dict[orf['query']] = {}
if dbname in orf_dict[orf['query']]: # only the best hit prevails
continue
#if orf['query']=='2_0' and dbname=='refseq-nr-2014-01-18':
# print orf
orf_dict[orf['query']]['contig'] = orfToContig[orfname]
product = orf['product'].strip()
_results = re.search(r'cog', dbname, re.I)
if _results:
orf_dict[orf['query']][dbname] = cog_id(product)
continue
_results = re.search(r'eggnog', dbname, re.I)
if _results:
orf_dict[orf['query']][dbname] = cog_id(product)
continue
_results = re.search(r'kegg', dbname, re.I)
if _results:
orf_dict[orf['query']][dbname] = kegg_id(product)
continue
_results = re.search(r'cazy', dbname, re.I)
if _results:
orf_dict[orf['query']][dbname] = cazy_id(product)
#print(orf_dict[orf['query']], cazy_id(product), product)
continue
_results = re.search(r'metacyc', dbname, re.I)
if _results:
orf_dict[orf['query']][dbname] = product
continue
_results = re.search(r'refseq', dbname, re.I)
if _results:
orf_dict[orf['query']][dbname] = product
continue
_results = re.search(r'seed', dbname, re.I)
if _results:
orf_dict[orf['query']][dbname] = seed_id(product)
# print "---", orf_dict[orf['query']][dbname]
continue
#if dbname=='refseq-nr-2014-01-18':
# if orf['query']=='2_0':
# print product
#adds it anyway
orf_dict[orf['query']][dbname] = product
# compute the databases
database_maps = {}
for dbname in results.keys():
_results = re.search(r'cog', dbname, re.I)
if _results:
database_maps['cog'] = dbname
continue
_results = re.search(r'eggnog', dbname, re.I)
if _results:
database_maps['cog'] = dbname
continue
_results = re.search(r'kegg', dbname, re.I)
if _results:
database_maps['kegg'] = dbname
hit=True
continue
_results = re.search(r'cazy', dbname, re.I)
if _results:
database_maps['cazy'] = dbname
continue
_results = re.search(r'seed', dbname, re.I)
if _results:
database_maps['seed'] = dbname
continue
_results = re.search(r'metacyc', dbname, re.I)
if _results:
database_maps['metacyc'] = dbname
continue
_results = re.search(r'refseq', dbname, re.I)
if _results:
database_maps['refseq'] = dbname
continue
database_maps[dbname] = dbname
print('database maps', database_maps)
std_dbnames = ['cog', 'kegg', 'seed', 'cazy', 'metacyc', 'refseq']
dbnames = std_dbnames
headers = ["ORF_ID", "CONTIG_ID"]
for std_dbname in std_dbnames:
headers.append(std_dbname.upper())
for dbname in sorted(results.keys()):
non_std =True
for std_dbname in std_dbnames:
if re.search(std_dbname, dbname.lower(), re.I):
"""none of the std had a match"""
non_std = False
if non_std:
dbnames.append(dbname)
headers.append(std_dbname)
sampleName = None
for orfn in orf_dict:
#if orfn=='2_0':
#print orfn, '<<', orf_dict[orfn], ' >> xxxx'
#_keys = orf_dict[orfn].keys()
#_results = re.search(r'cog', dbname, re.I)
if 'cog' in database_maps and database_maps['cog'] in orf_dict[orfn]:
cogFn = orf_dict[orfn][database_maps['cog']]
else:
cogFn = ""
if 'kegg' in database_maps and database_maps['kegg'] in orf_dict[orfn]:
keggFn = orf_dict[orfn][database_maps['kegg']]
#print orfn, keggFn
else:
keggFn = ""
if 'metacyc' in database_maps and database_maps['metacyc'] in orf_dict[orfn]:
metacycPwy = orf_dict[orfn][database_maps['metacyc']]
else:
metacycPwy = ""
if 'seed' in database_maps and database_maps['seed'] in orf_dict[orfn]:
seedFn = orf_dict[orfn][database_maps['seed']]
else:
seedFn = ""
if 'cazy' in database_maps and database_maps['cazy'] in orf_dict[orfn]:
cazyFn = orf_dict[orfn][database_maps['cazy']]
else:
cazyFn= ""
if 'refseq' in database_maps and database_maps['refseq'] in orf_dict[orfn]:
refseqFn = orf_dict[orfn][database_maps['refseq']]
else:
refseqFn= ""
if not sampleName:
sampleName = getSampleNameFromContig(orf_dict[orfn]['contig'])
orfName = orfn
contigName= orf_dict[orfn]['contig']
if compact_output:
orfName = orfn
contigName= ShortenContigId(contigName)
row = [ orfName, contigName ]
for dbname in dbnames:
# print(dbname)
if dbname in database_maps and database_maps[dbname] in orf_dict[orfn]:
row.append(orf_dict[orfn][database_maps[dbname]])
# print("\t" + orf_dict[orfn][database_maps[dbname]])
else:
row.append("")
# print '\t'.join(headers)
# print '\t'.join(row)
#fprintf(outputfile, "%s\n", orfName + "\t" + contigName + '\t' + cogFn + '\t' + keggFn +'\t' + seedFn + '\t' + cazyFn + '\t'+ metacycPwy)
if addHeader:
#fprintf(outputfile, "# %s\n", '\t'.join(headers)_"ORF_ID" + "\t" + "CONTIG_ID" + '\t' + "COG" + '\t' + "KEGG" +'\t' + "SEED" + '\t' + "CAZY" + '\t'+ "METACYC" + '\t' + "REFSEQ" )
fprintf(outputfile, "# %s\n", '\t'.join(headers))
addHeader=False
#fprintf(outputfile, "%s\n", orfName + "\t" + contigName + '\t' + cogFn + '\t' + keggFn +'\t' + seedFn + '\t' + cazyFn + '\t'+ metacycPwy + '\t' + refseqFn )
fprintf(outputfile, "%s\n", '\t'.join(row))
def MetaPathways_create_reports_fast(argv, errorlogger = None, runstatslogger = None):
createParser()
errorlogger.write("#STEP\tCREATE_ANNOT_REPORTS\n")
try:
main(argv,errorlogger= errorlogger, runstatslogger = runstatslogger )
except:
insert_error(16)
return (0,'')
return (0,'')
# the main function of metapaths
if __name__ == "__main__":
createParser()
main(sys.argv[1:])
| 1.703125 | 2 |
day-13-solution.py | nityansuman/30-days-of-code | 0 | 12758833 | # Import packages
from abc import ABCMeta, abstractmethod
class Book(object, metaclass=ABCMeta):
def __init__(self, title, author):
self.title = title
self.author = author
@abstractmethod
def display():
# Abstract methods are to be implemented in sub-class
pass
# Create sub-class by inheriting the above parent class
class MyBook(Book):
def __init__(self, title, author, price):
self.price = price
super(MyBook, self).__init__(title, author)
def display(self):
print("Title:", self.title)
print("Author:", self.author)
print("Price:", self.price)
if __name__ == "__main__":
# Read input title from stdin
title = input()
# Read input author from stdin
author = input()
# Read input price from stdin
price = int(input())
# Create an instance of the custom class `MyBook`
new_novel = MyBook(title,author,price)
# Call `display` class method to print details of the book
new_novel.display()
| 4.28125 | 4 |
examples/script_archive/01_minimal_example_2d.py | petuum/tuun | 33 | 12758834 | <reponame>petuum/tuun<filename>examples/script_archive/01_minimal_example_2d.py
from tuun import AcqOptDesigner, AcqOptimizer, SimpleGp
import numpy as np
# define model
model = SimpleGp({'ls': 3.0, 'alpha': 1.5, 'sigma': 1e-5})
# define acqfunction
acqfunction = {'acq_str': 'ei', 'n_gen': 100}
# define acqoptimizer
acqoptimizer = AcqOptimizer(domain={'min_max': [(-5, 5), (-5, 5)]})
# define initial dataset
data = {
'x': [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0], [0.5, 0.5]],
'y': [6.0, 3.0, 4.0, 5.0, 2.0],
}
# define designer
designer = AcqOptDesigner(model, acqfunction, acqoptimizer, data)
# get acquisition optima
acq_optima = designer.get()
print('acq_optima: {}'.format(acq_optima))
| 2.125 | 2 |
reservations/migrations/0002_auto_20181005_1022.py | marcusaj0114/aion | 0 | 12758835 | # Generated by Django 2.1.2 on 2018-10-05 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reservations', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='bio',
field=models.TextField(blank=True, max_length=500, null=True),
),
migrations.AlterField(
model_name='profile',
name='phone',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='profile',
name='room_no',
field=models.CharField(blank=True, max_length=30, null=True),
),
]
| 1.742188 | 2 |
ebcpy/simulationapi/fmu.py | RWTH-EBC/ebcpy | 2 | 12758836 | <reponame>RWTH-EBC/ebcpy
"""Module for classes using a fmu to
simulate models."""
import os
import logging
import pathlib
import atexit
import shutil
from typing import List, Union
import fmpy
from fmpy.model_description import read_model_description
from pydantic import Field
import pandas as pd
import numpy as np
from ebcpy import simulationapi, TimeSeriesData
from ebcpy.simulationapi import SimulationSetup, SimulationSetupClass, Variable
# pylint: disable=broad-except
class FMU_Setup(SimulationSetup):
"""
Add's custom setup parameters for simulating FMU's
to the basic `SimulationSetup`
"""
timeout: float = Field(
title="timeout",
default=np.inf,
description="Timeout after which the simulation stops."
)
_default_solver = "CVode"
_allowed_solvers = ["CVode", "Euler"]
class FMU_API(simulationapi.SimulationAPI):
"""
Class for simulation using the fmpy library and
a functional mockup interface as a model input.
:keyword bool log_fmu:
Whether to print fmu messages or not.
Example:
>>> import matplotlib.pyplot as plt
>>> from ebcpy import FMU_API
>>> # Select any valid fmu. Replace the line below if
>>> # you don't have this file on your device.
>>> model_name = "Path to your fmu"
>>> fmu_api = FMU_API(model_name)
>>> fmu_api.sim_setup = {"stop_time": 3600}
>>> result_df = fmu_api.simulate()
>>> fmu_api.close()
>>> # Select an exemplary column
>>> col = result_df.columns[0]
>>> plt.plot(result_df[col], label=col)
>>> _ = plt.legend()
>>> _ = plt.show()
.. versionadded:: 0.1.7
"""
_sim_setup_class: SimulationSetupClass = FMU_Setup
_fmu_instances: dict = {}
_unzip_dirs: dict = {}
_type_map = {
float: np.double,
bool: np.bool_,
int: np.int_
}
def __init__(self, cd, model_name, **kwargs):
"""Instantiate class parameters"""
# Init instance attributes
self._model_description = None
self._fmi_type = None
self.log_fmu = kwargs.get("log_fmu", True)
self._single_unzip_dir: str = None
if isinstance(model_name, pathlib.Path):
model_name = str(model_name)
if not model_name.lower().endswith(".fmu"):
raise ValueError(f"{model_name} is not a valid fmu file!")
if cd is None:
cd = os.path.dirname(model_name)
super().__init__(cd, model_name, **kwargs)
# Register exit option
atexit.register(self.close)
def _update_model(self):
# Setup the fmu instance
self.setup_fmu_instance()
def close(self):
"""
Closes the fmu.
:return: bool
True on success
"""
# Close MP of super class
super().close()
# Close if single process
if not self.use_mp:
if not self._fmu_instances:
return # Already closed
self._single_close(fmu_instance=self._fmu_instances[0],
unzip_dir=self._unzip_dirs[0])
self._unzip_dirs = {}
self._fmu_instances = {}
def _single_close(self, **kwargs):
fmu_instance = kwargs["fmu_instance"]
unzip_dir = kwargs["unzip_dir"]
try:
fmu_instance.terminate()
except Exception as error: # This is due to fmpy which does not yield a narrow error
self.logger.error(f"Could not terminate fmu instance: {error}")
try:
fmu_instance.freeInstance()
except OSError as error:
self.logger.error(f"Could not free fmu instance: {error}")
# Remove the extracted files
if unzip_dir is not None:
try:
shutil.rmtree(unzip_dir)
except FileNotFoundError:
pass # Nothing to delete
except PermissionError:
self.logger.error("Could not delete unzipped fmu "
"in location %s. Delete it yourself.", unzip_dir)
def _close_multiprocessing(self, _):
"""Small helper function"""
idx_worker = self.worker_idx
if idx_worker not in self._fmu_instances:
return # Already closed
self.logger.error(f"Closing fmu for worker {idx_worker}")
self._single_close(fmu_instance=self._fmu_instances[idx_worker],
unzip_dir=self._unzip_dirs[idx_worker])
self._unzip_dirs = {}
self._fmu_instances = {}
def simulate(self,
parameters: Union[dict, List[dict]] = None,
return_option: str = "time_series",
**kwargs):
"""
Perform the single simulation for the given
unzip directory and fmu_instance.
See the docstring of simulate() for information on kwargs.
Additional kwargs:
:keyword str result_file_suffix:
Suffix of the result file. Supported options can be extracted
from the TimeSeriesData.save() function.
Default is 'csv'.
"""
return super().simulate(parameters=parameters, return_option=return_option, **kwargs)
def _single_simulation(self, kwargs):
"""
Perform the single simulation for the given
unzip directory and fmu_instance.
See the docstring of simulate() for information on kwargs.
The single argument kwarg is to make this
function accessible by multiprocessing pool.map.
"""
# Unpack kwargs:
parameters = kwargs.pop("parameters", None)
return_option = kwargs.pop("return_option", "time_series")
inputs = kwargs.get("inputs", None)
fail_on_error = kwargs.get("fail_on_error", True)
if self.use_mp:
idx_worker = self.worker_idx
if idx_worker not in self._fmu_instances:
self._setup_single_fmu_instance(use_mp=True)
else:
idx_worker = 0
fmu_instance = self._fmu_instances[idx_worker]
unzip_dir = self._unzip_dirs[idx_worker]
if inputs is not None:
if not isinstance(inputs, (TimeSeriesData, pd.DataFrame)):
raise TypeError("DataFrame or TimeSeriesData object expected for inputs.")
inputs = inputs.copy() # Create save copy
if isinstance(inputs, TimeSeriesData):
inputs = inputs.to_df(force_single_index=True)
if "time" in inputs.columns:
raise IndexError(
"Given inputs contain a column named 'time'. "
"The index is assumed to contain the time-information."
)
# Convert df to structured numpy array for fmpy: simulate_fmu
inputs.insert(0, column="time", value=inputs.index)
inputs_tuple = [tuple(columns) for columns in inputs.to_numpy()]
# Try to match the type, default is np.double.
# 'time' is not in inputs and thus handled separately.
dtype = [(inputs.columns[0], np.double)] + \
[(col,
self._type_map.get(self.inputs[col].type, np.double)
) for col in inputs.columns[1:]]
inputs = np.array(inputs_tuple, dtype=dtype)
if parameters is None:
parameters = {}
else:
self.check_unsupported_variables(variables=list(parameters.keys()),
type_of_var="parameters")
try:
# reset the FMU instance instead of creating a new one
fmu_instance.reset()
# Simulate
res = fmpy.simulate_fmu(
filename=unzip_dir,
start_time=self.sim_setup.start_time,
stop_time=self.sim_setup.stop_time,
solver=self.sim_setup.solver,
step_size=self.sim_setup.fixedstepsize,
relative_tolerance=None,
output_interval=self.sim_setup.output_interval,
record_events=False, # Used for an equidistant output
start_values=parameters,
apply_default_start_values=False, # As we pass start_values already
input=inputs,
output=self.result_names,
timeout=self.sim_setup.timeout,
step_finished=None,
model_description=self._model_description,
fmu_instance=fmu_instance,
fmi_type=self._fmi_type,
)
except Exception as error:
self.logger.error(f"[SIMULATION ERROR] Error occurred while running FMU: \n {error}")
if fail_on_error:
raise error
return None
# Reshape result:
df = pd.DataFrame(res).set_index("time")
df.index = np.round(df.index.astype("float64"),
str(self.sim_setup.output_interval)[::-1].find('.'))
if return_option == "savepath":
result_file_name = kwargs.get("result_file_name", "resultFile")
result_file_suffix = kwargs.get("result_file_suffix", "csv")
savepath = kwargs.get("savepath", None)
if savepath is None:
savepath = self.cd
os.makedirs(savepath, exist_ok=True)
filepath = os.path.join(savepath, f"{result_file_name}.{result_file_suffix}")
TimeSeriesData(df).droplevel(1, axis=1).save(
filepath=filepath,
key="simulation"
)
return filepath
if return_option == "last_point":
return df.iloc[-1].to_dict()
# Else return time series data
tsd = TimeSeriesData(df, default_tag="sim")
return tsd
def setup_fmu_instance(self):
"""
Manually set up and extract the data to
avoid this step in the simulate function.
"""
self.logger.info("Extracting fmu and reading fmu model description")
# First load model description and extract variables
self._single_unzip_dir = os.path.join(self.cd,
os.path.basename(self.model_name)[:-4] + "_extracted")
os.makedirs(self._single_unzip_dir, exist_ok=True)
self._single_unzip_dir = fmpy.extract(self.model_name,
unzipdir=self._single_unzip_dir)
self._model_description = read_model_description(self._single_unzip_dir,
validate=True)
if self._model_description.coSimulation is None:
self._fmi_type = 'ModelExchange'
else:
self._fmi_type = 'CoSimulation'
def _to_bound(value):
if value is None or \
not isinstance(value, (float, int, bool)):
return np.inf
return value
self.logger.info("Reading model variables")
_types = {
"Enumeration": int,
"Integer": int,
"Real": float,
"Boolean": bool,
"String": str
}
# Extract inputs, outputs & tuner (lists from parent classes will be appended)
for var in self._model_description.modelVariables:
if var.start is not None:
var.start = _types[var.type](var.start)
_var_ebcpy = Variable(
min=-_to_bound(var.min),
max=_to_bound(var.max),
value=var.start,
type=_types[var.type]
)
if var.causality == 'input':
self.inputs[var.name] = _var_ebcpy
elif var.causality == 'output':
self.outputs[var.name] = _var_ebcpy
elif var.causality == 'parameter' or var.causality == 'calculatedParameter':
self.parameters[var.name] = _var_ebcpy
elif var.causality == 'local':
self.states[var.name] = _var_ebcpy
else:
self.logger.error(f"Could not map causality {var.causality}"
f" to any variable type.")
if self.use_mp:
self.logger.info("Extracting fmu %s times for "
"multiprocessing on %s processes",
self.n_cpu, self.n_cpu)
self.pool.map(
self._setup_single_fmu_instance,
[True for _ in range(self.n_cpu)]
)
self.logger.info("Instantiated fmu's on all processes.")
else:
self._setup_single_fmu_instance(use_mp=False)
def _setup_single_fmu_instance(self, use_mp):
if not use_mp:
wrk_idx = 0
else:
wrk_idx = self.worker_idx
if wrk_idx in self._fmu_instances:
return True
if use_mp:
unzip_dir = self._single_unzip_dir + f"_worker_{wrk_idx}"
unzip_dir = fmpy.extract(self.model_name,
unzipdir=unzip_dir)
else:
unzip_dir = self._single_unzip_dir
self.logger.info("Instantiating fmu for worker %s", wrk_idx)
self._fmu_instances.update({wrk_idx: fmpy.instantiate_fmu(
unzipdir=unzip_dir,
model_description=self._model_description,
fmi_type=self._fmi_type,
visible=False,
debug_logging=False,
logger=self._custom_logger,
fmi_call_logger=None)})
self._unzip_dirs.update({
wrk_idx: unzip_dir
})
return True
def _custom_logger(self, component, instanceName, status, category, message):
""" Print the FMU's log messages to the command line (works for both FMI 1.0 and 2.0) """
# pylint: disable=unused-argument, invalid-name
label = ['OK', 'WARNING', 'DISCARD', 'ERROR', 'FATAL', 'PENDING'][status]
_level_map = {'OK': logging.INFO,
'WARNING': logging.WARNING,
'DISCARD': logging.WARNING,
'ERROR': logging.ERROR,
'FATAL': logging.FATAL,
'PENDING': logging.FATAL}
if self.log_fmu:
self.logger.log(level=_level_map[label], msg=message.decode("utf-8"))
| 2.609375 | 3 |
server/server.py | cattlepi/cattlepi | 257 | 12758837 | <reponame>cattlepi/cattlepi<filename>server/server.py<gh_stars>100-1000
import falcon
import json
import os
import hashlib
class ServerUtils(object):
@staticmethod
def get_file_location(filename):
dirname = os.path.dirname(__file__)
relpath = os.path.join(dirname, '../builder/latest/output', filename)
return os.path.abspath(relpath)
@staticmethod
def get_file_dir(filename):
return os.path.dirname(ServerUtils.get_file_location(filename))
@staticmethod
def get_my_rsa_key():
path_to_key = os.path.join(os.environ['HOME'], '.ssh/id_rsa.pub')
return open(path_to_key).read().strip()
class DeviceConfigResource(object):
def md5(self, fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def get_filedescriptor(self, filename):
return {
'url': "http://%s/images/global/%s" % (os.environ['CATTLEPI_LOCALAPI'], filename),
'md5sum': self.md5(ServerUtils.get_file_location(filename))
}
def on_get(self, req, resp, deviceid):
resp.status = falcon.HTTP_200
body = {
'initfs': self.get_filedescriptor('initramfs.tgz'),
'rootfs': self.get_filedescriptor('rootfs.sqsh'),
'bootcode': '',
'usercode': '',
'config': {
'ssh': {
'pi': {
'authorized_keys': [ ServerUtils.get_my_rsa_key() ]
}
}
}
}
resp.body = json.dumps(body)
class TrackAllResource(object):
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = "Ok: dummy response"
class TrackResource(object):
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = "Ok: dummy response"
app = falcon.API()
app.add_route('/boot/{deviceid}/config', DeviceConfigResource())
app.add_route('/track', TrackAllResource())
app.add_route('/track/{deviceid}', TrackResource())
app.add_static_route('/images/global', ServerUtils.get_file_dir('initramfs.tgz')) | 2.15625 | 2 |
src/0064.minimum-path-sum/minimum-path-sum.py | lyphui/Just-Code | 782 | 12758838 | <filename>src/0064.minimum-path-sum/minimum-path-sum.py
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
dp = grid[0][:]
for i in range(1, n):
dp[i] += dp[i-1]
for i in range(1, m):
for j in range(n):
if j > 0:
dp[j] = grid[i][j] + min(dp[j], dp[j-1])
else:
dp[j] = grid[i][j] + dp[j]
return dp[-1] | 3.40625 | 3 |
python/example100/13.py | claviering/code | 1 | 12758839 | <filename>python/example100/13.py<gh_stars>1-10
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 claviering <<EMAIL>>
#
# Distributed under terms of the WTFPL license.
for n in range(100,1000):
i = n % 10
j = n / 10 % 10
k = n / 100
if n == i**3 + j**3 + k**3:
print n
| 3.15625 | 3 |
var/spack/repos/builtin/packages/sloccount/package.py | jeanbez/spack | 0 | 12758840 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Sloccount(MakefilePackage):
"""SLOCCount is a set of tools for counting physical Source Lines of Code
(SLOC) in a large number of languages of a potentially large set of
programs."""
homepage = "https://dwheeler.com/sloccount/"
url = "https://dwheeler.com/sloccount/sloccount-2.26.tar.gz"
version('2.26', sha256='fa7fa2bbf2f627dd2d0fdb958bd8ec4527231254c120a8b4322405d8a4e3d12b')
# md5sum needed at run-time
depends_on('coreutils', type=('build', 'run'))
def edit(self, spec, prefix):
makefile = FileFilter('makefile')
makefile.filter('^PREFIX=.*', 'PREFIX=' + prefix)
makefile.filter('^CC=.*', 'CC=' + spack_cc)
# Needed for `make test` to pass
makefile.filter('PATH=.:${PATH}', 'PATH=$(CURDIR):${PATH}',
string=True)
def install(self, spec, prefix):
mkdir(prefix.bin)
make('install')
| 2.015625 | 2 |
campusLogin.py | forAllBright/mbpTools | 0 | 12758841 | <reponame>forAllBright/mbpTools<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: forAllBright
# @Date: 2018-12-22 17:58:16
# @Last Modified by: forAllBright
# @Last Modified time: 2019-03-13 16:40:54
########################################################
# 用来配合 Alfred 快速认证登录校园网
########################################################
import requests
from bs4 import BeautifulSoup
import socket
import sys
import json
import argparse
parser = argparse.ArgumentParser(
description="seu campus wlan login command line tool")
parser.add_argument(
"-l", "--log", required=True, help="option login or logout")
args = parser.parse_args()
class bcolors:
# Colors
PURPLE = '\033[1;35;1m'
RED = '\033[1;31;1m'
BLUE = '\033[1;34;1m'
GREEN = '\033[1;32;1m'
CYAN = "\033[1;36;1m"
YELLOW = "\033[1;33;1m"
BLACK = "\033[1;30;1m"
WHITE = "\033[1;37;1m"
# Font effects
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
NEFATIVE1 = '\033[3m'
NEFATIVE2 = '\033[5m'
# Background colors
BACKGROUND_BLACK = "\033[;;40m"
BACKGROUND_RED = "\033[;;41m"
BACKGROUND_GREEN = "\033[;;42m"
BACKGROUND_YELLOW = "\033[;;43m"
BACKGROUND_BLUE = "\033[;;44m"
BACKGROUND_PURPLE = "\033[;;45m"
BACKGROUND_CYAN = "\033[;;46m"
BACKGROUND_WHITE = "\033[;;47m"
def login_request(login_url, username, password):
response = requests.post(
login_url, # URL
{
'username': username,
'password': password,
}, # form-data
headers={
"User-Agent":
r"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
}, # headers
verify=False # this simply disables SSL security check
)
root = BeautifulSoup(response.content, "html.parser")
return root # dict
def pre_login(main_url):
try:
socket.create_connection((main_url, 80), 2)
except socket.error:
print("{} connect error")
sys.exit(1)
try:
main_ip = socket.gethostbyname(main_url)
except socket.error:
print("{} ip parse error")
sys.exit(1)
return main_ip
def parse_response(resp):
resp_str = str(resp)
resp_json = json.loads(resp_str)
info, status = resp_json['info'], resp_json['status']
return (info, status)
if __name__ == "__main__":
main_url = "w.seu.edu.cn"
username = "你的账号"
password = "<PASSWORD>"
main_ip = pre_login(main_url)
resp = ''
if args.log == 'login':
resp = login_request("http://{}/index.php/index/login".format(main_ip),
username, password)
elif args.log == 'logout':
resp = login_request("http://{}/index.php/index/logout".format(main_ip),
'', '')
info, status = parse_response(resp)
if status == 1 and args.log == 'login':
print(bcolors.GREEN + " **** " + info + " **** " + bcolors.ENDC)
print(bcolors.GREEN + "Login IP: {}".format(json.loads(str(resp))['logout_ip']) + bcolors.ENDC)
elif status == 1 and args.log == 'logout':
print(bcolors.GREEN + " **** " + info + " **** " + bcolors.ENDC)
elif status == 0:
print(bcolors.RED + info + bcolors.ENDC)
| 1.835938 | 2 |
venv/lib/python2.7/site-packages/dotmap/test.py | Shanka123/snips_assistant | 0 | 12758842 | import unittest
from __init__ import DotMap
class ReadmeTestCase(unittest.TestCase):
def test_basic_use(self):
m = DotMap()
self.assertIsInstance(m, DotMap)
m.name = 'Joe'
self.assertEqual(m.name, 'Joe')
self.assertEqual('Hello ' + m.name, 'Hello Joe')
self.assertIsInstance(m, dict)
self.assertTrue(issubclass(m.__class__, dict))
self.assertEqual(m['name'], 'Joe')
m.name += ' Smith'
m['name'] += ' Jr'
self.assertEqual(m.name, '<NAME> Jr')
def test_automatic_hierarchy(self):
m = DotMap()
m.people.steve.age = 31
self.assertEqual(m.people.steve.age, 31)
def test_key_init(self):
m = DotMap(a=1, b=2)
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
def test_dict_conversion(self):
d = {'a': 1, 'b': 2, 'c': {'d': 3, 'e': 4}}
m = DotMap(d)
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
d2 = m.toDict()
self.assertIsInstance(d2, dict)
self.assertNotIsInstance(d2, DotMap)
self.assertEqual(len(d2), 3)
self.assertEqual(d2['a'], 1)
self.assertEqual(d2['b'], 2)
self.assertNotIsInstance(d2['c'], DotMap)
self.assertEqual(len(d2['c']), 2)
self.assertEqual(d2['c']['d'], 3)
self.assertEqual(d2['c']['e'], 4)
def test_ordered_iteration(self):
m = DotMap()
m.people.john.age = 32
m.people.john.job = 'programmer'
m.people.mary.age = 24
m.people.mary.job = 'designer'
m.people.dave.age = 55
m.people.dave.job = 'manager'
expected = [
('john', 32, 'programmer'),
('mary', 24, 'designer'),
('dave', 55, 'manager'),
]
for i, (k, v) in enumerate(m.people.items()):
self.assertEqual(expected[i][0], k)
self.assertEqual(expected[i][1], v.age)
self.assertEqual(expected[i][2], v.job)
class BasicTestCase(unittest.TestCase):
def setUp(self):
self.d = {
'a': 1,
'b': 2,
'subD': {'c': 3, 'd': 4}
}
def test_dict_init(self):
m = DotMap(self.d)
self.assertIsInstance(m, DotMap)
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
self.assertIsInstance(m.subD, DotMap)
self.assertEqual(m.subD.c, 3)
self.assertEqual(m.subD.d, 4)
def test_copy(self):
m = DotMap(self.d)
dm_copy = m.copy()
self.assertIsInstance(dm_copy, DotMap)
self.assertEqual(dm_copy.a, 1)
self.assertEqual(dm_copy.b, 2)
self.assertIsInstance(dm_copy.subD, DotMap)
self.assertEqual(dm_copy.subD.c, 3)
self.assertEqual(dm_copy.subD.d, 4)
def test_fromkeys(self):
m = DotMap.fromkeys([1, 2, 3], 'a')
self.assertEqual(len(m), 3)
self.assertEqual(m[1], 'a')
self.assertEqual(m[2], 'a')
self.assertEqual(m[3], 'a')
def test_dict_functionality(self):
m = DotMap(self.d)
self.assertEqual(m.get('a'), 1)
self.assertEqual(m.get('f', 33), 33)
self.assertIsNone(m.get('f'))
self.assertTrue(m.has_key('a'))
self.assertFalse(m.has_key('f'))
m.update([('rat', 5), ('bum', 4)], dog=7, cat=9)
self.assertEqual(m.rat, 5)
self.assertEqual(m.bum, 4)
self.assertEqual(m.dog, 7)
self.assertEqual(m.cat, 9)
m.update({'lol': 1, 'ba': 2})
self.assertEqual(m.lol, 1)
self.assertEqual(m.ba, 2)
ordered_keys = [
'a',
'b',
'subD',
'rat',
'bum',
'dog',
'cat',
'lol',
'ba',
]
for i, k in enumerate(m):
self.assertEqual(ordered_keys[i], k)
self.assertTrue('a' in m)
self.assertFalse('c' in m)
ordered_values = [1, 2, DotMap(c=3, d=4), 5, 4, 7, 9, 1, 2]
for i, v in enumerate(m.values()):
self.assertEqual(ordered_values[i], v)
self.assertTrue('c' in m.subD)
self.assertTrue(len(m.subD), 2)
del m.subD.c
self.assertFalse('c' in m.subD)
self.assertTrue(len(m.subD), 1)
def test_list_comprehension(self):
parentDict = {
'name': 'Father1',
'children': [
{'name': 'Child1'},
{'name': 'Child2'},
{'name': 'Child3'},
]
}
parent = DotMap(parentDict)
ordered_names = ['Child1', 'Child2', 'Child3']
comp = [x.name for x in parent.children]
self.assertEqual(ordered_names, comp)
class PickleTestCase(unittest.TestCase):
def setUp(self):
self.d = {
'a': 1,
'b': 2,
'subD': {'c': 3, 'd': 4}
}
def test(self):
import pickle
pm = DotMap(self.d)
s = pickle.dumps(pm)
m = pickle.loads(s)
self.assertIsInstance(m, DotMap)
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
self.assertIsInstance(m.subD, DotMap)
self.assertEqual(m.subD.c, 3)
self.assertEqual(m.subD.d, 4)
class EmptyTestCase(unittest.TestCase):
def test(self):
m = DotMap()
self.assertTrue(m.empty())
m.a = 1
self.assertFalse(m.empty())
self.assertTrue(m.b.empty())
self.assertIsInstance(m.b, DotMap)
class DynamicTestCase(unittest.TestCase):
def test(self):
m = DotMap()
m.still.works
m.sub.still.works
nonDynamic = DotMap(_dynamic=False)
def assignNonDynamic():
nonDynamic.no
self.assertRaises(KeyError, assignNonDynamic)
nonDynamicWithInit = DotMap(m, _dynamic=False)
nonDynamicWithInit.still.works
nonDynamicWithInit.sub.still.works
def assignNonDynamicWithInit():
nonDynamicWithInit.no.creation
self.assertRaises(KeyError, assignNonDynamicWithInit)
class RecursiveTestCase(unittest.TestCase):
def test(self):
m = DotMap()
m.a = 5
m_id = id(m)
m.recursive = m
self.assertEqual(id(m.recursive.recursive.recursive), m_id)
self.assertEqual(str(m), '''DotMap(a=5, recursive=DotMap(...))''')
d = m.toDict()
d_id = id(d)
d['a'] = 5
d['recursive'] = d
d['recursive']['recursive']['recursive']
self.assertEqual(id(d['recursive']['recursive']['recursive']), d_id)
self.assertEqual(str(d), '''{'a': 5, 'recursive': {...}}''')
m2 = DotMap(d)
m2_id = id(m2)
self.assertEqual(id(m2.recursive.recursive.recursive), m2_id)
self.assertEqual(str(m2), '''DotMap(a=5, recursive=DotMap(...))''')
class kwargTestCase(unittest.TestCase):
def test(self):
a = {'1': 'a', '2': 'b'}
b = DotMap(a, _dynamic=False)
def capture(**kwargs):
return kwargs
self.assertEqual(a, capture(**b.toDict()))
class DeepCopyTestCase(unittest.TestCase):
def test(self):
import copy
original = DotMap()
original.a = 1
original.b = 3
shallowCopy = original
deepCopy = copy.deepcopy(original)
self.assertEqual(original, shallowCopy)
self.assertEqual(id(original), id(shallowCopy))
self.assertEqual(original, deepCopy)
self.assertNotEqual(id(original), id(deepCopy))
original.a = 2
self.assertEqual(original, shallowCopy)
self.assertNotEqual(original, deepCopy)
def test_order_preserved(self):
import copy
original = DotMap()
original.a = 1
original.b = 2
original.c = 3
deepCopy = copy.deepcopy(original)
orderedPairs = []
for k, v in original.iteritems():
orderedPairs.append((k, v))
for i, (k, v) in enumerate(deepCopy.iteritems()):
self.assertEqual(k, orderedPairs[i][0])
self.assertEqual(v, orderedPairs[i][1])
class DotMapTupleToDictTestCase(unittest.TestCase):
def test(self):
m = DotMap({'a': 1, 'b': (11, 22, DotMap({'c': 3}))})
d = m.toDict()
self.assertEqual(d, {'a': 1, 'b': (11, 22, {'c': 3})})
class OrderedDictInitTestCase(unittest.TestCase):
def test(self):
from collections import OrderedDict
o = OrderedDict([('a', 1), ('b', 2), ('c', [OrderedDict([('d', 3)])])])
m = DotMap(o)
self.assertIsInstance(m, DotMap)
self.assertIsInstance(m.c[0], DotMap)
class EmptyAddTestCase(unittest.TestCase):
def test_base(self):
m = DotMap()
for i in range(7):
m.counter += 1
self.assertNotIsInstance(m.counter, DotMap)
self.assertIsInstance(m.counter, int)
self.assertEqual(m.counter, 7)
def test_various(self):
m = DotMap()
m.a.label = 'test'
m.a.counter += 2
self.assertIsInstance(m.a, DotMap)
self.assertEqual(m.a.label, 'test')
self.assertNotIsInstance(m.a.counter, DotMap)
self.assertIsInstance(m.a.counter, int)
self.assertEqual(m.a.counter, 2)
m.a.counter += 1
self.assertEqual(m.a.counter, 3)
def test_proposal(self):
my_counters = DotMap()
pages = [
'once upon a time',
'there was like this super awesome prince',
'and there was this super rad princess',
'and they had a mutually respectful, egalitarian relationship',
'the end'
]
for stuff in pages:
my_counters.page += 1
self.assertIsInstance(my_counters, DotMap)
self.assertNotIsInstance(my_counters.page, DotMap)
self.assertIsInstance(my_counters.page, int)
self.assertEqual(my_counters.page, 5)
def test_string_addition(self):
m = DotMap()
m.quote += 'lions'
m.quote += ' and tigers'
m.quote += ' and bears'
m.quote += ', oh my'
self.assertEqual(m.quote, 'lions and tigers and bears, oh my')
def test_strange_addition(self):
m = DotMap()
m += "I'm a string now"
self.assertIsInstance(m, str)
self.assertNotIsInstance(m, DotMap)
self.assertEqual(m, "I'm a string now")
m2 = DotMap() + "I'll replace that DotMap"
self.assertEqual(m2, "I'll replace that DotMap")
def test_protected_hierarchy(self):
m = DotMap()
m.protected_parent.key = 'value'
def protectedFromAddition():
m.protected_parent += 1
self.assertRaises(TypeError, protectedFromAddition)
def test_type_error_raised(self):
m = DotMap()
def badAddition():
m.a += 1
m.a += ' and tigers'
self.assertRaises(TypeError, badAddition)
| 3.3125 | 3 |
src/boogie/configurations/tools.py | pencil-labs/django-boogie | 0 | 12758843 | import base64
import importlib.util
from hashlib import md5
def secret_hash(data):
"""
Create a secret hash from data.
"""
strings = []
for key, value in sorted(data.items()):
strings.append(key)
try:
if isinstance(value, dict):
value = sorted(value.items())
if isinstance(value, list):
value = tuple(value)
data = hash(value)
if data != -1:
strings.append(str(data))
except TypeError:
pass
data = "".join(strings)
hash_value = md5(data.encode("utf8")).digest()
return base64.b85encode(hash_value).decode("ascii")
def module_exists(mod, package=None):
spec = importlib.util.find_spec(mod, package=package)
return spec is not None
def module_path(mod, package=None):
spec = importlib.util.find_spec(mod, package=package)
return spec.origin
| 2.90625 | 3 |
softmax.py | drsstein/PyRat | 0 | 12758844 | <reponame>drsstein/PyRat
# short lecture on learning linear neurons by <NAME>:
# https://www.youtube.com/watch?v=yqsI-X40OBY
import numpy as np
class softmax:
def __init__(self, n_units, n_inputs):
self.n_outputs = n_units
self.n_inputs = n_inputs
self.w = (np.random.rand(self.n_inputs+1, self.n_outputs)- 0.5)/self.n_inputs
def forward(self, x):
self.x = np.vstack([x, np.ones(x.shape[1])])
#estimate logit
z = np.dot(self.w.T, self.x)
#estimate output
# y_i = e^(z_i) / sum_i(e^(z_i))
self.y = np.exp(z)
self.y /= sum(self.y)
return self.y
#as the softmax is always the last layer in the network, and the cross-entropy
#error is computed with respect to the logit (y - t), we do both
#forward and backpropagation in a single function call
def evaluate(self, x, targets, learning_rate):
self.forward(x)
#estimate error derivative
dEdz = targets - self.y
#estimate cross entropy error across samples
#it is important to normalize here wrt. the number of training cases
n_samples = dEdz.shape[1]
C = sum(-sum(targets*np.log(self.y)))/n_samples
#propagate backwards through weights and inputs
dEdw = np.dot(self.x, dEdz.T)/n_samples
#dE/dx for each training sample
dEdx = np.dot(self.w[0:self.n_inputs,:], dEdz)
#update weights
self.w += learning_rate * dEdw
return C, dEdx
| 3.90625 | 4 |
digihel/wagtail_hooks.py | HotStew/digihel | 21 | 12758845 | from wagtail.core import hooks
def allow_blindly(tag):
return tag
# See: http://docs.wagtail.io/en/v1.6/reference/hooks.html#construct-whitelister-element-rules
@hooks.register('construct_whitelister_element_rules')
def whitelister_element_rules():
rules = {}
# Tables
rules.update(dict.fromkeys(['table', 'thead', 'tbody', 'tfoot', 'tr', 'th', 'td'], allow_blindly))
# Divs, spans, code and anchors
rules.update(dict.fromkeys(['div', 'span', 'a', 'code', 'pre', 'blockquote', 'section'], allow_blindly))
return rules
| 1.992188 | 2 |
Arena_Test.py | Robotics-Club-IIT-BHU/Vision-2.0-2020-Arena | 11 | 12758846 | import gym
import vision_arena
import time
import pybullet as p
import pybullet_data
import cv2
if __name__=="__main__":
env = gym.make("vision_arena-v0")
x=0
while True:
p.stepSimulation()
env.move_husky(5, 5, 5, 5)
if x==100:
img = env.camera_feed()
cv2.imwrite('media/testrun'+str(x)+'.png', img)
x+=1
time.sleep(100) | 2.171875 | 2 |
StringSearch/BruteForce.py | ywkpl/DataStructuresAndAlgorithms | 1 | 12758847 | import time
class BruteForce:
def __init__(self, mainStr, searchStr):
self._mainStr=mainStr
self._searchStr=searchStr
def search(self)->int:
searchLen=len(self._searchStr)
mainLen=len(self._mainStr)
if mainLen==0 or mainLen<searchLen:
return -1
i=0
while i<=mainLen-searchLen:
if self._mainStr[i:i+searchLen]==self._searchStr:
return i
i=i+1
return -1
def test_BruteForce():
mainStr='神光财经表示,今日上午沪指震荡下行,小幅下跌,农业种植板块逆势大涨,领涨两市,神农科技等4股涨停,稀土板块继续大涨,板块掀涨停潮,金力永磁等9股涨停,带动小金属、有色、黄金等板块大涨,建筑装饰板块继续保持强势,建研院等7股涨停,5G概念表现活跃,欣天科技等6股涨停,分散染料领跌两市,汽车整车、白酒、医药、家电等白马股集体下跌,跌幅居前。从盘面表现看,股市目前在低位震荡盘整,下跌空间有限,所以大家不必太过担心,短期随时都可能会走出一轮反弹行情,所以大家可以选择优质个股逢低买入,但是要控制好仓位,耐心持股待涨。'
searchStr='持股XX待涨。'
start=time.time()
bf=BruteForce(mainStr, searchStr)
end=time.time()
print(end-start)
index=bf.search()
print('查找到位置是:'+str(index))
if __name__=="__main__":
test_BruteForce()
| 3.640625 | 4 |
scripts/standalone_blob_server.py | nishp77/lbry-sdk | 4,996 | 12758848 | <filename>scripts/standalone_blob_server.py<gh_stars>1000+
import sys
import os
import asyncio
from lbry.blob.blob_manager import BlobManager
from lbry.blob_exchange.server import BlobServer
from lbry.schema.address import decode_address
from lbry.extras.daemon.storage import SQLiteStorage
async def main(address: str):
try:
decode_address(address)
except:
print(f"'{address}' is not a valid lbrycrd address")
return 1
loop = asyncio.get_running_loop()
storage = SQLiteStorage(os.path.expanduser("~/.lbrynet/lbrynet.sqlite"))
await storage.open()
blob_manager = BlobManager(loop, os.path.expanduser("~/.lbrynet/blobfiles"), storage)
await blob_manager.setup()
server = await loop.create_server(
lambda: BlobServer(loop, blob_manager, address),
'0.0.0.0', 4444)
try:
async with server:
await server.serve_forever()
finally:
await storage.close()
if __name__ == "__main__":
asyncio.run(main(sys.argv[1]))
| 2.65625 | 3 |
benchmarks/dataset_selectivity_benchmark.py | JayjeetAtGithub/benchmarks | 8 | 12758849 | <filename>benchmarks/dataset_selectivity_benchmark.py
import conbench.runner
import pyarrow.dataset as ds
from benchmarks import _benchmark
@conbench.runner.register_benchmark
class DatasetSelectivityBenchmark(_benchmark.Benchmark):
"""Read and filter a dataset with different selectivity."""
name = "dataset-selectivity"
arguments = ["source"]
sources = [
"nyctaxi_multi_parquet_s3",
"nyctaxi_multi_ipc_s3",
"chi_traffic_2020_Q1",
]
sources_test = [
"nyctaxi_multi_parquet_s3_sample",
"nyctaxi_multi_ipc_s3_sample",
"chi_traffic_sample",
]
valid_cases = (["selectivity"], ["1%"], ["10%"], ["100%"])
filters = {
"nyctaxi_multi_parquet_s3": {
"1%": ds.field("pickup_longitude") < -74.013451, # 561384
"10%": ds.field("pickup_longitude") < -74.002055, # 5615432
"100%": None, # 56154689
},
"nyctaxi_multi_ipc_s3": {
"1%": ds.field("pickup_longitude") < -74.014053, # 596165
"10%": ds.field("pickup_longitude") < -74.002708, # 5962204
"100%": None, # 59616487
},
"chi_traffic_2020_Q1": {
"1%": ds.field("END_LONGITUDE") < -87.807262, # 124530
"10%": ds.field("END_LONGITUDE") < -87.7624, # 1307565
"100%": None, # 13038291
},
**dict.fromkeys(
["nyctaxi_multi_parquet_s3_sample", "nyctaxi_multi_ipc_s3_sample"],
{
"1%": ds.field("pickup_longitude") < -74.0124, # 20
"10%": ds.field("pickup_longitude") < -74.00172, # 200
"100%": None, # 2000
},
),
"chi_traffic_sample": {
"1%": ds.field("END_LONGITUDE") < -87.80726, # 10
"10%": ds.field("END_LONGITUDE") < -87.76148, # 100
"100%": None, # 1000
},
}
def run(self, source, case=None, **kwargs):
cases = self.get_cases(case, kwargs)
for source in self.get_sources(source):
source.download_source_if_not_exists()
tags = self.get_tags(kwargs, source)
format_str = source.format_str
schema = ds.dataset(source.source_paths[0], format=format_str).schema
for case in cases:
(selectivity,) = case
dataset = ds.dataset(
source.source_paths, schema=schema, format=format_str
)
f = self._get_benchmark_function(dataset, source.name, selectivity)
yield self.benchmark(f, tags, kwargs, case)
def _get_benchmark_function(self, dataset, source, selectivity):
return lambda: dataset.to_table(
filter=self.filters[source][selectivity]
).num_rows
| 2.328125 | 2 |
blueapps/account/backends.py | jin-cc/bastion-test | 42 | 12758850 | <filename>blueapps/account/backends.py
# -*- coding: utf-8 -*-
from blueapps.account.conf import ConfFixture
from blueapps.account.utils import load_backend
if hasattr(ConfFixture, 'USER_BACKEND'):
UserBackend = load_backend(ConfFixture.USER_BACKEND)
if hasattr(ConfFixture, 'WEIXIN_BACKEND'):
WeixinBackend = load_backend(ConfFixture.WEIXIN_BACKEND)
if hasattr(ConfFixture, 'RIO_BACKEND'):
RioBackend = load_backend(ConfFixture.RIO_BACKEND)
if hasattr(ConfFixture, 'BK_JWT_BACKEND'):
BkJwtBackend = load_backend(ConfFixture.BK_JWT_BACKEND)
| 1.945313 | 2 |