hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
50d8d110901dc8d422a6e7035234a1d64625b3dc | 1,854 | py | Python | raspi/test/bork_dprc.py | lukaschoebel/bed | 4f3619809adab9156e530bfa71c426871749d53b | [
"MIT"
] | null | null | null | raspi/test/bork_dprc.py | lukaschoebel/bed | 4f3619809adab9156e530bfa71c426871749d53b | [
"MIT"
] | 1 | 2020-09-11T13:11:16.000Z | 2020-09-11T13:11:16.000Z | raspi/test/bork_dprc.py | lukaschoebel/bed | 4f3619809adab9156e530bfa71c426871749d53b | [
"MIT"
] | null | null | null | #!/usr/bin/python
import time
import random
import RPi.GPIO as GPIO
import firebase_admin
from firebase_admin import credentials, firestore
cred = credentials.Certificate("secrets/firestore-creds.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# reference to the firestore document
doc_ref = db.collection(u'current_measure').document(u'0')
count = 0
prev_inp = 1
def random_number(infested):
"""
This function mocks the functionality of the detection.
If tree is infested, generate number between 50-100.
If tree is not infested, generate number between 0-50.
Args:
infested (bool): [description]
"""
if infested:
return random.randint(51, 100)
return random.randint(0, 50)
def trigger_detection(PIN_NO):
"""
On button press, trigger the send process of the message.
Args:
PIN_NO (int): Pin number on raspi zero board
"""
global prev_inp
global count
t1 = time.time()
inp = GPIO.input(PIN_NO)
duration = time.time() - t1
if ((not prev_inp) and inp):
count = count + 1
print("Button pressed")
print(round(duration, 2))
if duration > 0.8:
print("befallen")
else:
print("cool")
print(count)
# only update degree of infestiation and duration
doc_ref.update({
u'duration': 5,
u'infestation': random_number(infested=True),
u'status': u'completed'
})
prev_inp = inp
time.sleep(0.05)
@DeprecationWarning
if __name__ == "__main__":
print("+++ borki initialized +++")
try:
while True:
trigger_detection(18)
except KeyboardInterrupt:
GPIO.cleanup()
| 22.888889 | 62 | 0.639159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.347896 |
50d94d09fea5fca32eea005cff0447eb0f2d233c | 461 | py | Python | src/test/when.py | cgjones/rr | dcdde9c497cad27618b27f6754c11b0693894178 | [
"MIT"
] | 3 | 2015-03-01T23:26:27.000Z | 2021-01-14T03:31:36.000Z | src/test/when.py | rogerwang/rr | 619b8951d392b46c6f62b2bad7c0861a05d9f32e | [
"MIT"
] | null | null | null | src/test/when.py | rogerwang/rr | 619b8951d392b46c6f62b2bad7c0861a05d9f32e | [
"MIT"
] | null | null | null | from rrutil import *
import re
send_gdb('when\n')
expect_gdb(re.compile(r'= (\d+)'))
t = eval(last_match().group(1));
if t < 1 or t > 10000:
failed('ERROR in first "when"')
send_gdb('b main\n')
expect_gdb('Breakpoint 1')
send_gdb('c\n')
send_gdb('when\n')
expect_gdb(re.compile(r'= (\d+)'))
t2 = eval(last_match().group(1));
if t2 < 1 or t2 > 10000:
failed('ERROR in second "when"')
if t2 <= t:
failed('ERROR ... "when" failed to advance')
ok()
| 20.043478 | 48 | 0.624729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.321041 |
50d96de220acc9a59d7ab8f8ff91b286fe24be92 | 1,137 | py | Python | tests/test_invex.py | xridge/invex | 05cbdea29e486a82965d6798d2482296654939bd | [
"Apache-2.0"
] | 1 | 2019-09-08T19:30:28.000Z | 2019-09-08T19:30:28.000Z | tests/test_invex.py | xridge/invex | 05cbdea29e486a82965d6798d2482296654939bd | [
"Apache-2.0"
] | 7 | 2020-03-24T17:02:53.000Z | 2021-12-13T19:59:03.000Z | tests/test_invex.py | otto-von-bivouac/invex | 05cbdea29e486a82965d6798d2482296654939bd | [
"Apache-2.0"
] | 1 | 2019-04-24T21:20:37.000Z | 2019-04-24T21:20:37.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `invex` package."""
import os
import py
from click.testing import CliRunner
import pytest
from flexfolio.utils import run
from invex import cli
from .reports import ALL_FLEX_REPORTS
@pytest.mark.parametrize("flex_report", ALL_FLEX_REPORTS) # type: ignore
def test_to_pdf_command(flex_report: str,
tmpdir: py.path.local # pylint: disable=no-member
) -> None:
# Given the invex app with an input flex report and desired output pdf
runner = CliRunner()
output_filename = os.path.basename(flex_report).replace('xml', 'pdf')
output_pdf = \
'{tmpdir}/{filename}'.format(tmpdir=tmpdir, filename=output_filename)
# When we call the to_pdf command
result = runner.invoke(
cli.main,
['to_pdf',
flex_report,
'--pdf-result-path', output_pdf])
# Then it should create pdf file
assert result.exit_code == 0
file_type_result = run('file -b {output_pdf}'.format(
output_pdf=output_pdf))
assert file_type_result.output.startswith('PDF document')
| 28.425 | 77 | 0.663149 | 0 | 0 | 0 | 0 | 888 | 0.781003 | 0 | 0 | 359 | 0.315743 |
50d99798c529ede9378d5878470651f778e5c271 | 2,078 | py | Python | lista_ex3.1.py/exercicio23.py | robinson-1985/mentoria_exercises | 8359cead6ee5351851b04cb45f252e3881b79117 | [
"MIT"
] | null | null | null | lista_ex3.1.py/exercicio23.py | robinson-1985/mentoria_exercises | 8359cead6ee5351851b04cb45f252e3881b79117 | [
"MIT"
] | null | null | null | lista_ex3.1.py/exercicio23.py | robinson-1985/mentoria_exercises | 8359cead6ee5351851b04cb45f252e3881b79117 | [
"MIT"
] | null | null | null | ''' 23. Faça um programa que receba o valor do salário mínimo, o turno de trabalho (M
— matutino; V — vespertino; ou N — noturno), a categoria (O — operário; G —
gerente) e o número de horas trabalhadas no mês de um funcionário. Suponha a
digitação apenas de dados válidos e, quando houver digitação de letras, utilize
maiúsculas. Calcule e mostre:
■ O coeficiente do salário, de acordo com a tabela a seguir.
Turno de trabalho Valor do coeficiente
M - matutino 10% do salário mínimo
V - Vespertino 15% do salário mínimo
N - Noturno 12% do salário mínimo
■ O valor do salário bruto, ou seja, o número de horas trabalhadas multiplicado pelo
valor do coeficiente do salário.
■ O imposto, de acordo com a tabela a seguir.
Categoria Salário Bruto Imposto sobre o salário bruto
O - Operário >= R$ 300,00 5%
O - Operário < R$ 300,00 3%
G - Gerente >= R$ 400,00 6%
G - Gerente < R$ 400,00 4%
■ A gratificação, de acordo com as regras a seguir.
Se o funcionário preencher todos os requisitos a seguir, sua gratificação será de
R$ 50,00; caso contrário, será de R$ 30,00. Os requisitos são:
Turno: Noturno
Número de horas trabalhadas: Superior a 80 horas
■ O auxílio alimentação, de acordo com as seguintes regras.
Se o funcionário preencher algum dos requisitos a seguir, seu auxílio alimentação será
de um terço do seu salário bruto; caso contrário, será de metade do seu salário bruto.
Os requisitos são:
Categoria: Operário
Coeficiente do salário: < = 25
■ O salário líquido, ou seja, salário bruto menos imposto mais gratificação mais auxílio
alimentação.
■ A classificação, de acordo com a tabela a seguir:
Salário líquido Mensagem
Menor que R$ 350,00 Mal remunerado
Entre R$ 350,00 e R$ 600,00 Normal
Maior que R$ 600,00 Bem remunerado ''' | 50.682927 | 88 | 0.63667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,167 | 1 |
50db4e1da37404b4b48c1ff5cba8f3fb72458aa5 | 538 | py | Python | how-to-use-azureml/training-with-deep-learning/how-to-use-estimator/dummy_train.py | faxu/MachineLearningNotebooks | ea1b7599c3e6903873aa152dc5829afa080e885a | [
"MIT"
] | 1 | 2021-01-18T16:19:04.000Z | 2021-01-18T16:19:04.000Z | how-to-use-azureml/training-with-deep-learning/how-to-use-estimator/dummy_train.py | faxu/MachineLearningNotebooks | ea1b7599c3e6903873aa152dc5829afa080e885a | [
"MIT"
] | 1 | 2019-03-18T04:33:24.000Z | 2019-03-18T04:33:24.000Z | MachineLearningNotebooks/how-to-use-azureml/training-with-deep-learning/how-to-use-estimator/dummy_train.py | raina777/bp-demo | 064848299731d8388c3709b5b809788860b63fc5 | [
"MIT"
] | 2 | 2020-09-07T01:41:49.000Z | 2020-10-01T18:16:28.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
print("*********************************************************")
print("Hello Azure ML!")
try:
from azureml.core import Run
run = Run.get_context()
print("Log Fibonacci numbers.")
run.log_list('Fibonacci numbers', [0, 1, 1, 2, 3, 5, 8, 13, 21, 34])
run.complete()
except:
print("Warning: you need to install Azure ML SDK in order to log metrics.")
print("*********************************************************")
| 31.647059 | 79 | 0.524164 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.628253 |
50dcec222e6e08eebcd423b5a1becc6f5cb978b4 | 5,916 | py | Python | sequential_inference/envs/mujoco/rand_param_envs/gym/scoreboard/client/api_requestor.py | cvoelcker/sequential_inference | acdc23aa8fdbfc76ded771e82a4abcdd081a3280 | [
"MIT"
] | null | null | null | sequential_inference/envs/mujoco/rand_param_envs/gym/scoreboard/client/api_requestor.py | cvoelcker/sequential_inference | acdc23aa8fdbfc76ded771e82a4abcdd081a3280 | [
"MIT"
] | null | null | null | sequential_inference/envs/mujoco/rand_param_envs/gym/scoreboard/client/api_requestor.py | cvoelcker/sequential_inference | acdc23aa8fdbfc76ded771e82a4abcdd081a3280 | [
"MIT"
] | null | null | null | import json
import platform
import gym
import six.moves.urllib as urlparse
from six import iteritems
from environments.mujoco.rand_param_envs.gym import error, version
from environments.mujoco.rand_param_envs.gym.scoreboard.client import http_client
verify_ssl_certs = True # [SECURITY CRITICAL] only turn this off while debugging
http_client = http_client.RequestsClient(verify_ssl_certs=verify_ssl_certs)
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlparse.urlsplit(url)
if base_query:
query = "%s&%s" % (base_query, query)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def _strip_nulls(params):
if isinstance(params, dict):
stripped = {}
for key, value in iteritems(params):
value = _strip_nulls(value)
if value is not None:
stripped[key] = value
return stripped
else:
return params
class APIRequestor(object):
def __init__(self, key=None, api_base=None):
self.api_base = api_base or gym.scoreboard.api_base
self.api_key = key
self._client = http_client
def request(self, method, url, params=None, headers=None):
rbody, rcode, rheaders, my_api_key = self.request_raw(
method.lower(), url, params, headers
)
resp = self.interpret_response(rbody, rcode, rheaders)
return resp, my_api_key
def handle_api_error(self, rbody, rcode, resp, rheaders):
# Rate limits were previously coded as 400's with code 'rate_limit'
if rcode == 429:
raise error.RateLimitError(resp.get("detail"), rbody, rcode, resp, rheaders)
elif rcode in [400, 404]:
type = resp.get("type")
if type == "about:blank":
type = None
raise error.InvalidRequestError(
resp.get("detail"), type, rbody, rcode, resp, rheaders
)
elif rcode == 401:
raise error.AuthenticationError(
resp.get("detail"), rbody, rcode, resp, rheaders
)
else:
detail = resp.get("detail")
# This information will only be returned to developers of
# the OpenAI Gym Scoreboard.
dev_info = resp.get("dev_info")
if dev_info:
detail = "{}\n\n<dev_info>\n{}\n</dev_info>".format(
detail, dev_info["traceback"]
)
raise error.APIError(detail, rbody, rcode, resp, rheaders)
def request_raw(self, method, url, params=None, supplied_headers=None):
"""
Mechanism for issuing an API call
"""
if self.api_key:
my_api_key = self.api_key
else:
my_api_key = gym.scoreboard.api_key
if my_api_key is None:
raise error.AuthenticationError(
"""You must provide an OpenAI Gym API key.
(HINT: Set your API key using "gym.scoreboard.api_key = .." or "export OPENAI_GYM_API_KEY=..."). You can find your API key in the OpenAI Gym web interface: https://gym.openai.com/settings/profile."""
)
abs_url = "%s%s" % (self.api_base, url)
if params:
encoded_params = json.dumps(_strip_nulls(params))
else:
encoded_params = None
if method == "get" or method == "delete":
if params:
abs_url = _build_api_url(abs_url, encoded_params)
post_data = None
elif method == "post":
post_data = encoded_params
else:
raise error.APIConnectionError(
"Unrecognized HTTP method %r. This may indicate a bug in the "
"OpenAI Gym bindings. Please contact gym@openai.com for "
"assistance." % (method,)
)
ua = {
"bindings_version": version.VERSION,
"lang": "python",
"publisher": "openai",
"httplib": self._client.name,
}
for attr, func in [
["lang_version", platform.python_version],
["platform", platform.platform],
]:
try:
val = func()
except Exception as e:
val = "!! %s" % (e,)
ua[attr] = val
headers = {
"Openai-Gym-User-Agent": json.dumps(ua),
"User-Agent": "Openai-Gym/v1 PythonBindings/%s" % (version.VERSION,),
"Authorization": "Bearer %s" % (my_api_key,),
}
if method == "post":
headers["Content-Type"] = "application/json"
if supplied_headers is not None:
for key, value in supplied_headers.items():
headers[key] = value
rbody, rcode, rheaders = self._client.request(
method, abs_url, headers, post_data
)
return rbody, rcode, rheaders, my_api_key
def interpret_response(self, rbody, rcode, rheaders):
content_type = rheaders.get("Content-Type", "")
if content_type.startswith("text/plain"):
# Pass through plain text
resp = rbody
if not (200 <= rcode < 300):
self.handle_api_error(rbody, rcode, {}, rheaders)
else:
# TODO: Be strict about other Content-Types
try:
if hasattr(rbody, "decode"):
rbody = rbody.decode("utf-8")
resp = json.loads(rbody)
except Exception:
raise error.APIError(
"Invalid response body from API: %s "
"(HTTP response code was %d)" % (rbody, rcode),
rbody,
rcode,
rheaders,
)
if not (200 <= rcode < 300):
self.handle_api_error(rbody, rcode, resp, rheaders)
return resp
| 34 | 199 | 0.560514 | 4,961 | 0.838573 | 0 | 0 | 0 | 0 | 0 | 0 | 1,181 | 0.199628 |
50dd6bc0abebb9b9d1eabf20579e458a1fd16adf | 3,835 | py | Python | FinanceDicBuilder/DicBuilder.py | dukechain2333/Finance-Text-Analysis-System | 12c55e5298fb6d5420acae6d361db4506fef0d64 | [
"MIT"
] | 2 | 2021-05-25T02:36:13.000Z | 2021-09-11T01:44:16.000Z | FinanceDicBuilder/DicBuilder.py | dukechain2333/Finance-Text-Analysis-System | 12c55e5298fb6d5420acae6d361db4506fef0d64 | [
"MIT"
] | null | null | null | FinanceDicBuilder/DicBuilder.py | dukechain2333/Finance-Text-Analysis-System | 12c55e5298fb6d5420acae6d361db4506fef0d64 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
from pyhanlp import *
from DataBaseOperator import DBConnector
class DicBuilder:
def __init__(self, id=-1):
"""
构建金融行业字典
:param id: 从数据库中通过index选择文本,默认-1即全选
"""
self.id = id
self.stopWords_path = r'EmotionBasedDic/stopwords.txt'
self.negativeWords_path = r'EmotionBasedDic/TsingHua/tsinghua.negative.gb.txt'
self.positiveWords_path = r'EmotionBasedDic/TsingHua/tsinghua.positive.gb.txt'
self.financeDic_path = r'EmotionBasedDic/FinanceWordDic.txt'
def loadContent(self):
"""
加载数据库中的文本
:return: ((文本,),)
"""
dbConnector = DBConnector.DBConnector()
data = dbConnector.selectContent(self.id)
return data
def split_word(self, data):
"""
使用hanlp进行分词
:param data:传入文本
:return:
"""
splitWords = HanLP.segment(data)
tmp = []
for i in splitWords:
tmp.append(str(i).split('/'))
return tmp
def remove_attribute(self, data):
"""
去除词性
:param data:传入[[文本,词性],]的列表
:return: [文本,]
"""
wordList = []
for i in data:
wordList.append(i[0])
return wordList
def rubbish_dic(self):
"""
生成需要去除的词汇列表(停止词,感情词汇,标点符号)
:return:[无用词汇,]
"""
with open(self.stopWords_path, encoding='utf8') as file:
stopWords = file.readlines()
for i in range(len(stopWords)):
stopWords[i] = stopWords[i].strip()
with open(self.negativeWords_path, encoding='gbk') as file:
negativeWords = file.readlines()
for i in range(len(negativeWords)):
negativeWords[i] = negativeWords[i].strip()
with open(self.positiveWords_path, encoding='gbk') as file:
positiveWords = file.readlines()
for i in range(len(positiveWords)):
positiveWords[i] = positiveWords[i].strip()
punctuationList = list(r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~“”?,!【】()、。:;’‘……¥·""")
emptyList = ['']
return stopWords + negativeWords + positiveWords + punctuationList + emptyList
def remove_rubbish(self, data, rubbishList):
"""
移除文本列表中的垃圾词汇
:param data: 待移除垃圾词汇的列表[文本,]
:param rubbishList: 垃圾词汇列表
:return: 移除垃圾词汇后的文本列表
"""
tmp = data
for i in tmp:
if i.strip() in rubbishList or self.is_number(i.strip()):
tmp.remove(i)
return tmp
def is_number(self, n):
"""
判断参数是否为数字
:param n:传入待判断参数
:return: 若为数字则True,其他False
"""
try:
float(n)
except:
return False
return True
def remove_duplicate(self, data):
"""
去除列表重复值
:param:data:传入待去重列表
:return: 返回去重后的列表
"""
tmp = set(data)
return list(tmp)
def write_dic(self, data):
"""
将金融词典写入FinanceWordDic.txt
:param data: 去除垃圾词后的词汇列表
"""
with open(self.financeDic_path, 'w', encoding='utf8') as file:
for i in data:
file.write(i + '\n')
def build_dic(self):
"""
建立金融行业相关字典
:return: 字典
"""
data = self.loadContent()
rubbishDic = self.rubbish_dic()
wordList = []
for d in data:
print(d)
wordList += self.split_word(d[0])
wordList = self.remove_attribute(wordList)
wordList = self.remove_duplicate(wordList)
self.remove_rubbish(wordList, rubbishDic)
self.write_dic(wordList)
print('字典构建已完成!')
return wordList
# if __name__ == '__main__':
# test = DicBuilder()
# test.build_dic()
| 24.741935 | 90 | 0.538722 | 4,143 | 0.960807 | 0 | 0 | 0 | 0 | 0 | 0 | 1,685 | 0.39077 |
50dda263e8c9ba571f0162aea021e38ba614e662 | 6,781 | py | Python | adalam/ransac.py | happylin0427/AdaLAM | 5d7748fda7f76683e60c7053a0792a2ae9ef8800 | [
"BSD-3-Clause"
] | 1 | 2021-08-12T15:56:33.000Z | 2021-08-12T15:56:33.000Z | adalam/ransac.py | happylin0427/AdaLAM | 5d7748fda7f76683e60c7053a0792a2ae9ef8800 | [
"BSD-3-Clause"
] | null | null | null | adalam/ransac.py | happylin0427/AdaLAM | 5d7748fda7f76683e60c7053a0792a2ae9ef8800 | [
"BSD-3-Clause"
] | null | null | null | from .utils import draw_first_k_couples, batch_2x2_inv, batch_2x2_ellipse, arange_sequence, piecewise_arange
import torch
def stable_sort_residuals(residuals, ransidx):
logres = torch.log(residuals + 1e-10)
minlogres = torch.min(logres)
maxlogres = torch.max(logres)
sorting_score = ransidx.unsqueeze(0).float() + 0.99 * (logres - minlogres) / (maxlogres - minlogres)
sorting_idxes = torch.argsort(sorting_score, dim=-1) # (niters, numsamples)
iters_range = torch.arange(residuals.shape[0], device=residuals.device)
return residuals[iters_range.unsqueeze(-1), sorting_idxes], sorting_idxes
def group_sum_and_cumsum(scores_mat, end_group_idx, group_idx=None):
cumulative_scores = torch.cumsum(scores_mat, dim=1)
ending_cumusums = cumulative_scores[:, end_group_idx]
shifted_ending_cumusums = torch.cat(
[torch.zeros(size=(ending_cumusums.shape[0], 1), dtype=ending_cumusums.dtype, device=scores_mat.device),
ending_cumusums[:, :-1]], dim=1)
grouped_sums = ending_cumusums - shifted_ending_cumusums
if group_idx is not None:
grouped_cumsums = cumulative_scores - shifted_ending_cumusums[:, group_idx]
return grouped_sums, grouped_cumsums
return grouped_sums, None
def confidence_based_inlier_selection(residuals, ransidx, rdims, idxoffsets, dv, min_confidence):
numransacs = rdims.shape[0]
numiters = residuals.shape[0]
sorted_res, sorting_idxes = stable_sort_residuals(residuals, ransidx)
sorted_res_sqr = sorted_res ** 2
too_perfect_fits = sorted_res_sqr <= 1e-8
end_rans_indexing = torch.cumsum(rdims, dim=0)-1
_, inv_indices, res_dup_counts = torch.unique_consecutive(sorted_res_sqr.half().float(), dim=1, return_counts=True, return_inverse=True)
duplicates_per_sample = res_dup_counts[inv_indices]
inlier_weights = (1./duplicates_per_sample).repeat(numiters, 1)
inlier_weights[too_perfect_fits] = 0.
balanced_rdims, weights_cumsums = group_sum_and_cumsum(inlier_weights, end_rans_indexing, ransidx)
progressive_inl_rates = weights_cumsums.float() / (balanced_rdims.repeat_interleave(rdims, dim=1)).float()
good_inl_mask = (sorted_res_sqr * min_confidence <= progressive_inl_rates) | too_perfect_fits
inlier_weights[~good_inl_mask] = 0.
inlier_counts_matrix, _ = group_sum_and_cumsum(inlier_weights, end_rans_indexing)
inl_counts, inl_iters = torch.max(inlier_counts_matrix.long(), dim=0)
relative_inl_idxes = arange_sequence(inl_counts)
inl_ransidx = torch.arange(numransacs, device=dv).repeat_interleave(inl_counts)
inl_sampleidx = sorting_idxes[inl_iters.repeat_interleave(inl_counts),
idxoffsets[inl_ransidx] + relative_inl_idxes]
highest_accepted_sqr_residuals = sorted_res_sqr[inl_iters, idxoffsets + inl_counts - 1]
expected_extra_inl = balanced_rdims[inl_iters, torch.arange(numransacs, device=dv)].float() * highest_accepted_sqr_residuals
return inl_ransidx, inl_sampleidx, inl_counts, inl_iters, inl_counts.float()/expected_extra_inl
def sample_padded_inliers(xsamples, ysamples, inlier_counts, inl_ransidx, inl_sampleidx, numransacs, dv):
maxinliers = torch.max(inlier_counts).item()
padded_inlier_x = torch.zeros(size=(numransacs, maxinliers, 2), device=dv)
padded_inlier_y = torch.zeros(size=(numransacs, maxinliers, 2), device=dv)
padded_inlier_x[inl_ransidx, piecewise_arange(inl_ransidx)] = xsamples[inl_sampleidx]
padded_inlier_y[inl_ransidx, piecewise_arange(inl_ransidx)] = ysamples[inl_sampleidx]
return padded_inlier_x, padded_inlier_y
def ransac(xsamples, ysamples, rdims, config, iters=128, refit=True):
DET_THR = config['detected_scale_rate_threshold']
MIN_CONFIDENCE = config['min_confidence']
dv = config['device']
numransacs = rdims.shape[0]
numsamples = xsamples.shape[0]
ransidx = torch.arange(numransacs, device=dv).repeat_interleave(rdims)
idxoffsets = torch.cat([torch.tensor([0], device=dv), torch.cumsum(rdims[:-1], dim=0)], dim=0)
rand_samples_rel = draw_first_k_couples(iters, rdims, dv)
rand_samples_abs = rand_samples_rel + idxoffsets
sampled_x = torch.transpose(xsamples[rand_samples_abs], dim0=1,
dim1=2) # (niters, 2, numransacs, 2) -> (niters, numransacs, 2, 2)
sampled_y = torch.transpose(ysamples[rand_samples_abs], dim0=1, dim1=2)
# minimal fit for sampled_x @ A^T = sampled_y
affinities_fit = torch.transpose(batch_2x2_inv(sampled_x, check_dets=True) @ sampled_y, -1, -2)
if not refit:
eigenvals, eigenvecs = batch_2x2_ellipse(affinities_fit)
bad_ones = (eigenvals[..., 1] < 1/DET_THR**2) | (eigenvals[..., 0] > DET_THR**2)
affinities_fit[bad_ones] = torch.eye(2, device=dv)
y_pred = (affinities_fit[:, ransidx] @ xsamples.unsqueeze(-1)).squeeze(-1)
residuals = torch.norm(y_pred - ysamples, dim=-1) # (niters, numsamples)
inl_ransidx, inl_sampleidx, \
inl_counts, inl_iters, \
inl_confidence = confidence_based_inlier_selection(residuals, ransidx,
rdims, idxoffsets, dv=dv, min_confidence=MIN_CONFIDENCE)
if len(inl_sampleidx) == 0:
# If no inliers have been found, there is nothing to re-fit!
refit = False
if not refit:
return inl_sampleidx, \
affinities_fit[inl_iters, torch.arange(inl_iters.shape[0], device=dv)], \
inl_confidence, inl_counts
# Organize inliers found into a matrix for efficient GPU re-fitting.
# Cope with the irregular number of inliers per sample by padding with zeros
padded_inlier_x, padded_inlier_y = sample_padded_inliers(xsamples, ysamples, inl_counts, inl_ransidx, inl_sampleidx,
numransacs, dv)
# A @ pad_x.T = pad_y.T
# A = pad_y.T @ pad_x @ (pad_x.T @ pad_x)^-1
refit_affinity = padded_inlier_y.transpose(-2, -1) @ padded_inlier_x @ batch_2x2_inv(
padded_inlier_x.transpose(-2, -1) @ padded_inlier_x, check_dets=True)
# Filter out degenerate affinities with large scale changes
eigenvals, eigenvecs = batch_2x2_ellipse(refit_affinity)
bad_ones = (eigenvals[..., 1] < 1/DET_THR**2) | (eigenvals[..., 0] > DET_THR**2)
refit_affinity[bad_ones] = torch.eye(2, device=dv)
y_pred = (refit_affinity[ransidx] @ xsamples.unsqueeze(-1)).squeeze(-1)
residuals = torch.norm(y_pred - ysamples, dim=-1)
inl_ransidx, inl_sampleidx, \
inl_counts, inl_iters, inl_confidence = confidence_based_inlier_selection(residuals.unsqueeze(0), ransidx,
rdims, idxoffsets, dv=dv, min_confidence=MIN_CONFIDENCE)
return inl_sampleidx, refit_affinity, inl_confidence, inl_counts
| 48.092199 | 140 | 0.720543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 532 | 0.078455 |
50dfeb4080e6537a7028d39ac5a866d9104a49fa | 1,705 | py | Python | 85/ninja.py | jsh/pybites | 73c79ed962c15247cead173b17f69f248ea51b96 | [
"MIT"
] | null | null | null | 85/ninja.py | jsh/pybites | 73c79ed962c15247cead173b17f69f248ea51b96 | [
"MIT"
] | null | null | null | 85/ninja.py | jsh/pybites | 73c79ed962c15247cead173b17f69f248ea51b96 | [
"MIT"
] | null | null | null | """Bite 85. Write a score property."""
from typing import Optional
SCORES = [10, 50, 100, 175, 250, 400, 600, 800, 1000]
RANKS = "white yellow orange green blue brown black paneled red".split()
BELTS = dict(zip(SCORES, RANKS))
class NinjaBelt:
"""Keep track of Ninja belts and points."""
def __init__(self, score: int = 0) -> None:
"""Create the object."""
self._score = score
self._last_earned_belt: Optional[str] = None
def _get_belt(self, new_score: int) -> Optional[str]:
"""Check to see what belt new_score earns."""
earned_belt = self._last_earned_belt
for cutoff, belt in BELTS.items():
if new_score < cutoff:
break
earned_belt = belt
return earned_belt
def _get_score(self) -> int:
"""Get the score."""
return self._score
def _set_score(self, new_score: int) -> None:
"""Set a new score."""
if not isinstance(new_score, int):
raise ValueError("Score takes an int")
if new_score < self._score:
raise ValueError("Cannot lower score")
self._score = new_score
earned_belt = self._get_belt(new_score)
if earned_belt == self._last_earned_belt:
message = f"Set new score to {new_score}"
else:
self._last_earned_belt = earned_belt
message = f"Congrats, you earned {new_score} points"
if self._last_earned_belt:
belt = self._last_earned_belt.title()
message += f" obtaining the PyBites Ninja {belt} Belt"
print(message)
score = property(_get_score, _set_score, None, "I am the 'score' property.")
| 34.1 | 80 | 0.606452 | 1,473 | 0.86393 | 0 | 0 | 0 | 0 | 0 | 0 | 432 | 0.253372 |
50dff37f03053b54880a5058a34de49805426364 | 671 | py | Python | python_base/binary_array_to_number.py | GlacierBo/python_learn | f604029a1ec449d8d24bb5d40215f25df2a2b027 | [
"MIT"
] | 1 | 2018-02-12T04:56:08.000Z | 2018-02-12T04:56:08.000Z | python_base/binary_array_to_number.py | GlacierBo/python_learn | f604029a1ec449d8d24bb5d40215f25df2a2b027 | [
"MIT"
] | null | null | null | python_base/binary_array_to_number.py | GlacierBo/python_learn | f604029a1ec449d8d24bb5d40215f25df2a2b027 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/12/15 11:26
# @Author : glacier
# @Site : 二进制计算
# @File : binary_array_to_number.py
# @Software: PyCharm Edu
# def binary_array_to_number(arr):
# sum = 0
# index = len(arr)-1
# i = 0
# while(index >= 0):
# sum += mul(arr[i],index)
# i += 1
# index -=1
# print(sum)
#
# def mul(index,arg2):
# print(index,arg2)
# for i in range(arg2):
# index = index * 2
# return index
def binary_array_to_number(arr):
# return int("".join(map(str,arr)),2)
print("".join(map(str,arr)),2)
binary_array_to_number([0,0,1,1])
| 19.171429 | 42 | 0.535022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 535 | 0.785609 |
50e1a65ca98824ba44c4c4b031d3410bbf590547 | 5,630 | py | Python | py_reportit/shared/config/container.py | fedus/py_reportit | 46422cabb652571d8cce6c8e91a229009dcca141 | [
"MIT"
] | 1 | 2021-12-05T19:16:16.000Z | 2021-12-05T19:16:16.000Z | py_reportit/shared/config/container.py | fedus/py_reportit | 46422cabb652571d8cce6c8e91a229009dcca141 | [
"MIT"
] | null | null | null | py_reportit/shared/config/container.py | fedus/py_reportit | 46422cabb652571d8cce6c8e91a229009dcca141 | [
"MIT"
] | null | null | null | from dependency_injector import containers, providers
from pytz import timezone as pytz_timezone
from py_reportit.shared.config import config
from py_reportit.shared.config.db import Database
from py_reportit.shared.config.requests_session import get_requests_session
from py_reportit.shared.repository.category import CategoryRepository
from py_reportit.shared.repository.category_vote import CategoryVoteRepository
from py_reportit.shared.repository.crawl import CrawlRepository
from py_reportit.shared.repository.crawl_item import CrawlItemRepository
from py_reportit.shared.repository.report import ReportRepository
from py_reportit.shared.repository.meta import MetaRepository
from py_reportit.shared.repository.report_answer import ReportAnswerRepository
from py_reportit.shared.repository.user import UserRepository
from py_reportit.crawler.service.crawler import CrawlerService
from py_reportit.crawler.service.reportit_api import ReportItService
from py_reportit.crawler.service.geocoder import GeocoderService
from py_reportit.crawler.service.photo import PhotoService
from py_reportit.crawler.post_processors.abstract_pp import PostProcessorDispatcher
from py_reportit.crawler.post_processors.twitter_pp import Twitter
from py_reportit.crawler.post_processors.geocode_pp import Geocode
from py_reportit.shared.service.vote_service import VoteService
from py_reportit.shared.service.cache_service import CacheService
post_processors = [Geocode, Twitter]
class Container(containers.DeclarativeContainer):
config = providers.Configuration()
# Database & session
db = providers.Singleton(
Database,
db_user=config.DB_USER,
db_password=config.DB_PASSWORD,
db_host=config.DB_HOST,
db_port=config.DB_PORT,
db_database=config.DB_DATABASE,
log_db=config.LOG_DB
)
sessionmaker = providers.Singleton(db.provided.sqlalchemy_sessionmaker)
requests_session = providers.Resource(get_requests_session, config=config)
timezone = providers.Factory(pytz_timezone, zone=config.TIMEZONE)
# Repositories
report_repository = providers.Factory(ReportRepository)
meta_repository = providers.Factory(MetaRepository)
report_answer_repository = providers.Factory(ReportAnswerRepository)
crawl_repository = providers.Factory(CrawlRepository)
crawl_item_repository = providers.Factory(CrawlItemRepository)
category_repository = providers.Factory(CategoryRepository)
category_vote_repository = providers.Factory(CategoryVoteRepository)
user_repository = providers.Factory(UserRepository)
# Services
cache_service = providers.Singleton(CacheService)
reportit_service = providers.Factory(
ReportItService,
config=config,
requests_session=requests_session,
cache_service=cache_service
)
geocoder_service = providers.Factory(GeocoderService, config=config, requests_session=requests_session)
photo_service = providers.Factory(PhotoService, config=config)
vote_service = providers.Factory(
VoteService,
config=config,
meta_repository=meta_repository,
category_vote_repository=category_vote_repository,
category_repository=category_repository
)
# Helper function to work around scope limitations with class variables and list comprehension
# see https://stackoverflow.com/questions/13905741/accessing-class-variables-from-a-list-comprehension-in-the-class-definition
def inflate_post_processors(
config,
reportit_service,
geocoder_service,
report_repository,
meta_repository,
report_answer_repository,
):
return [providers.Factory(
pp,
config=config,
api_service=reportit_service,
geocoder_service=geocoder_service,
report_repository=report_repository,
meta_repository=meta_repository,
report_answer_repository=report_answer_repository,
) for pp in post_processors]
# PostProcessors
post_processor_dispatcher = providers.Factory(
PostProcessorDispatcher,
post_processors=providers.List(
*inflate_post_processors(
config=config,
reportit_service=reportit_service,
geocoder_service=geocoder_service,
report_repository=report_repository,
meta_repository=meta_repository,
report_answer_repository=report_answer_repository,
)
),
)
# Needed separately because specifically required in bulk geocoding util
geocode_pp = providers.Factory(
Geocode,
config=config,
api_service=reportit_service,
geocoder_service=geocoder_service,
report_repository=report_repository,
meta_repository=meta_repository,
report_answer_repository=report_answer_repository,
)
crawler_service = providers.Factory(
CrawlerService,
config=config,
api_service=reportit_service,
photo_service=photo_service,
report_repository=report_repository,
meta_repository=meta_repository,
report_answer_repository=report_answer_repository,
crawl_repository=crawl_repository,
crawl_item_repository=crawl_item_repository,
timezone=timezone
)
def build_container_for_crawler() -> Container:
container = Container()
container.config.from_dict(config)
container.wire(modules=["__main__", ".py_reportit", ".celery.tasks"], from_package="py_reportit.crawler")
return container
| 39.097222 | 130 | 0.758615 | 3,910 | 0.694494 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.073179 |
50e348bba044ee5e4a0d44816e957948a33e2934 | 4,837 | py | Python | event_loop/select_loop.py | mapogolions/pyreact-event-loop | 6bfdf34c73a8ef1ce89adb3af69ac07314a0ae3e | [
"MIT"
] | null | null | null | event_loop/select_loop.py | mapogolions/pyreact-event-loop | 6bfdf34c73a8ef1ce89adb3af69ac07314a0ae3e | [
"MIT"
] | 2 | 2019-05-21T14:46:25.000Z | 2021-06-01T23:54:24.000Z | event_loop/select_loop.py | mapogolions/pyreact | 8d91f8397e6bec3fb02140015c68b50f6cb8ea62 | [
"MIT"
] | null | null | null | import select
import signal
import time
import event_loop.tick
import event_loop.timer
import event_loop.signal
MICROSECONDS_PER_SECOND = 10 ** 6
MAX_TIMEOUT = int(2 ** 63 / 10 ** 9)
class SelectLoop:
def __init__(self):
self.future_tick_queue = event_loop.tick.FutureTickQueue()
self.timers = event_loop.timer.Timers()
self.read_streams = []
self.read_listeners = {}
self.write_streams = []
self.write_listeners = {}
self.running = False
self.signals = event_loop.signal.Signals()
self.pcntl_signals = []
def add_read_stream(self, stream, listener):
if stream.fileno() == -1:
raise ValueError
key = hash(stream)
if key not in self.read_listeners:
self.read_streams.append(stream)
self.read_listeners[key] = listener
def add_write_stream(self, stream, listener):
if stream.fileno() == -1:
raise ValueError
key = hash(stream)
if key not in self.write_listeners:
self.write_streams.append(stream)
self.write_listeners[key] = listener
def remove_read_stream(self, stream):
key = hash(stream)
if key in self.read_listeners:
self.read_streams.remove(stream)
del self.read_listeners[key]
def remove_write_stream(self, stream):
key = hash(stream)
if key in self.write_listeners:
self.write_streams.remove(stream)
del self.write_listeners[key]
def add_timer(self, interval, callback):
timer = event_loop.timer.Timer(interval, callback, periodic=False)
self.timers.add(timer)
return timer
def add_periodic_timer(self, interval, callback):
timer = event_loop.timer.Timer(interval, callback, periodic=True)
self.timers.add(timer)
return timer
def cancel_timer(self, timer):
return self.timers.cancel(timer)
def future_tick(self, listener):
self.future_tick_queue.add(listener)
def pcntl_signal_dispatch(self):
for signum in self.pcntl_signals:
self.signals.call(signum)
self.pcntl_signals = []
def add_signal(self, signum, listener):
self.signals.add(signum, listener)
if self.signals.count(signum) == 1:
signal.signal(
signum,
lambda *args: self.pcntl_signals.append(signum)
)
def remove_signal(self, signum, listener):
if not self.signals.count(signum):
return
self.signals.remove(signum, listener)
if not self.signals.count(signum):
signal.signal(signum, signal.SIG_DFL)
def stop(self):
self.running = False
def next_tick(self):
self.future_tick(lambda *args: self.stop())
self.run()
def run(self):
self.running = True
while self.running:
self.future_tick_queue.tick()
self.timers.tick()
self.pcntl_signal_dispatch()
has_pending_callbacks = not self.future_tick_queue.empty()
was_just_stopped = not self.running
has_pending_timer = self.timers.get_first()
has_pending_io = self.read_streams or self.write_streams
has_pending_signals = not self.signals.empty()
if was_just_stopped or has_pending_callbacks:
self.notify(self.select_stream(timeout=0))
elif has_pending_timer:
self.wait_for_timers(has_pending_timer)
elif has_pending_io:
self.notify(self.select_stream(timeout=None))
elif has_pending_signals:
signal.pause()
else:
break
def wait_for_timers(self, pending_timer):
scheduled_at, _ = pending_timer
timeout = self.time_to_sleep(scheduled_at - self.timers.get_time())
if self.read_streams or self.write_streams:
self.notify(self.select_stream(timeout=timeout))
else:
time.sleep(timeout)
def time_to_sleep(self, timeout):
if timeout < 0:
return 0
timeout /= MICROSECONDS_PER_SECOND
return MAX_TIMEOUT if timeout > MAX_TIMEOUT else timeout
def select_stream(self, timeout):
return select.select(
self.read_streams,
self.write_streams,
[],
timeout
)
def notify(self, streams):
if streams:
ready_to_read, ready_to_write, _ = streams
for stream in ready_to_read:
listener = self.read_listeners[hash(stream)]
listener(stream)
for stream in ready_to_write:
listener = self.write_listeners[hash(stream)]
listener(stream)
| 31.822368 | 75 | 0.612777 | 4,648 | 0.960926 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
50e3b3228607ed7e01edb42284e3a1cc6116992e | 2,393 | py | Python | scripts/publisher.py | kavyadevd/Toy_Car_Simulation | 56609f0b1ae6eb0bb9578615a83bab9ae01d6247 | [
"MIT"
] | null | null | null | scripts/publisher.py | kavyadevd/Toy_Car_Simulation | 56609f0b1ae6eb0bb9578615a83bab9ae01d6247 | [
"MIT"
] | null | null | null | scripts/publisher.py | kavyadevd/Toy_Car_Simulation | 56609f0b1ae6eb0bb9578615a83bab9ae01d6247 | [
"MIT"
] | null | null | null | #!/usr/bin/python2
import rospy
from std_msgs.msg import String
from std_msgs.msg import Float64
def pub_sub(left_speed,right_speed):
pub_right = rospy.Publisher('right_speed', Float64, queue_size=10)
pub_left = rospy.Publisher('left_speed', Float64, queue_size=10)
send = "The robot status: %s" % left_speed
rospy.loginfo(send)
pub_right.publish(right_speed)
pub_left.publish(left_speed)
def move():
# Starts a new node
rospy.init_node('Motion')
pub_movfr = rospy.Publisher('/car_control/joint_right_controller/command', Float64, queue_size=10)
pub_movfl = rospy.Publisher('/car_control/joint_left_controller/command', Float64, queue_size=10)
pub_movbr = rospy.Publisher('/car_control/joint_back_left_controller/command', Float64, queue_size=10)
pub_movbl = rospy.Publisher('/car_control/joint_back_right_controller/command', Float64, queue_size=10)
rate = rospy.Rate(10) # 10hz
stop=0.0
status = 0
target_speed = 0.0
left_speed = 0.0
right_speed = 0.0
left_dir = 1
right_dir = 1
control_speed = 0.0
run = True
mode = ""
#Receiveing the user's input
print("Let's move the robot")
speed = 5
distance = 100
left_speed =left_dir*speed
right_speed =right_dir*speed
while not rospy.is_shutdown():
#Setting the current time for distance calculus
t0 = rospy.Time.now().to_sec()
current_distance = 0
while not rospy.is_shutdown():
#Loop to move the turtle in an specified distance
while(True):
#Publish the velocity
pub_movfl.publish(left_speed)
pub_movfr.publish(right_speed)
pub_movbl.publish(left_speed)
pub_movbr.publish(right_speed)
pub_sub(left_speed,right_speed)
#Takes actual time to velocity calculus
t1=rospy.Time.now().to_sec()
#Calculates distancePoseStamped
current_distance= speed*(t1-t0)
#After the loop, stops the robot
#Force the robot to stop
pub_movfl.publish(stop) # publish the turn command.
pub_movfr.publish(stop) # publish the turn command.
pub_movbl.publish(stop) # publish the turn command.
pub_movbr.publish(stop) # publish the turn command.
pub_sub(stop,stop)
run = False
if __name__ == '__main__':
try:
#Testing our function
move()
except rospy.ROSInterruptException: pass
| 30.679487 | 107 | 0.692018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 720 | 0.300878 |
50e48167c59bb5236d8e48f7dc69c203da793f28 | 2,638 | py | Python | asdf/commands/extension.py | zanecodes/asdf | c1f6cf915409da5372c47ac725dc922b4bd52f7d | [
"BSD-3-Clause"
] | 66 | 2020-06-24T14:10:32.000Z | 2022-03-18T11:46:11.000Z | asdf/commands/extension.py | zanecodes/asdf | c1f6cf915409da5372c47ac725dc922b4bd52f7d | [
"BSD-3-Clause"
] | 179 | 2020-06-22T18:48:31.000Z | 2022-03-31T22:52:19.000Z | asdf/commands/extension.py | zanecodes/asdf | c1f6cf915409da5372c47ac725dc922b4bd52f7d | [
"BSD-3-Clause"
] | 13 | 2020-07-21T16:11:20.000Z | 2022-03-18T20:41:41.000Z | """
Implementation of command for reporting information about installed extensions.
"""
from .main import Command
from ..entry_points import get_extensions
__all__ = ['find_extensions']
class QueryExtension(Command): # pragma: no cover
"""This class is the plugin implementation for the asdftool runner."""
@classmethod
def setup_arguments(cls, subparsers):
parser = subparsers.add_parser(
"extensions", help="Show information about installed extensions",
description="""Reports information about installed ASDF extensions""")
display_group = parser.add_mutually_exclusive_group()
display_group.add_argument(
"-s", "--summary", action="store_true",
help="Display only the installed extensions themselves")
display_group.add_argument(
"-t", "--tags-only", action="store_true",
help="Display tags from installed extensions, but no other information")
parser.set_defaults(func=cls.run)
return parser
@classmethod
def run(cls, args):
return find_extensions(args.summary, args.tags_only)
def _format_extension(ext):
if ext.extension_uri is None:
uri = "(none)"
else:
uri = f"'{ext.extension_uri}'"
return "Extension URI: {} package: {} ({}) class: {}".format(
uri, ext.package_name, ext.package_version, ext.class_name
)
def _format_type_name(typ):
if isinstance(typ, str):
return typ
else:
return "{}.{}".format(typ.__module__, typ.__name__)
def _print_extension_details(ext, tags_only):
tag_uris = [t.tag_uri for t in ext.tags]
for typ in ext.types:
if isinstance(typ.name, list):
for name in typ.name:
tag_uris.append(typ.make_yaml_tag(name))
elif typ.name is not None:
tag_uris.append(typ.make_yaml_tag(typ.name))
if len(tag_uris) > 0:
print("tags:")
for tag_uri in sorted(tag_uris):
print(" - " + tag_uri)
if not tags_only:
types = []
for converter in ext.converters:
for typ in converter.types:
types.append(typ)
for typ in ext.types:
types.extend(typ.types)
if len(types) > 0:
print("types:")
for typ in sorted(types, key=_format_type_name):
print(" - " + _format_type_name((typ)))
def find_extensions(summary, tags_only):
for ext in get_extensions():
print(_format_extension(ext))
if not summary:
_print_extension_details(ext, tags_only)
print()
| 30.321839 | 84 | 0.623578 | 947 | 0.358984 | 0 | 0 | 812 | 0.307809 | 0 | 0 | 590 | 0.223654 |
50e4cae3441c9a1ac3fa340462a6470844f434cf | 7,956 | py | Python | workflow/Analytical_return_flow_generator.py | IMMM-SFA/ferencz-tidwell_2022_frontiers | bfd4961cc8376e99f1f56d23ee3c43d94818df4b | [
"BSD-2-Clause"
] | null | null | null | workflow/Analytical_return_flow_generator.py | IMMM-SFA/ferencz-tidwell_2022_frontiers | bfd4961cc8376e99f1f56d23ee3c43d94818df4b | [
"BSD-2-Clause"
] | null | null | null | workflow/Analytical_return_flow_generator.py | IMMM-SFA/ferencz-tidwell_2022_frontiers | bfd4961cc8376e99f1f56d23ee3c43d94818df4b | [
"BSD-2-Clause"
] | 1 | 2021-12-02T22:48:55.000Z | 2021-12-02T22:48:55.000Z | import math
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
mpl.rc('axes', titlesize=14)
plt.rc('font', size= 16)
os.chdir('C:/Users/sbferen/Documents/Frontiers_Paper/Analytical_solution_comparison')
data = pd.read_csv("Modflow_statemod_comparison.csv")
params = pd.read_csv("Analytical_params.csv")
bins = np.asarray([np.arange(0,390,30)])
monthly_ret_fraction = np.zeros([12,2])
for i in range(2):
for j in range(12):
monthly_ret_fraction[j,i] = 1/(8640*30) * \
np.sum(data.iloc[bins[0,j]:bins[0,j]+31, i+2])
frac_slow = np.sum(monthly_ret_fraction[0:11,1])
frac_fast = np.sum(monthly_ret_fraction[0:11,0])
#### Analytical Solution Script ####
mpl.rc('axes', titlesize=14)
plt.rc('font', size= 14)
length = 20
# Parameters
t = np.arange(360*length)+1
base = np.zeros([12,360*length])
term_1 = np.zeros([12,360*length])
term_2 = np.zeros([12,360*length])
term_3 = np.zeros([12,360*length])
term_4 = np.zeros([12,360*length])
term_5 = np.zeros([12,360*length])
term_6 = np.zeros([12,360*length])
for j in range(12):
for i in range(360*length):
base[j,i] = math.erfc(params.a[j]/(2*(params.D[j]*t[i])**.5))
term_1[j,i] = (-1)**2*(math.erfc((2*1*params.c[j]-params.a[j])/(2*(params.D[j]*t[i])**0.5))- \
math.erfc((2*1*params.c[j]+params.a[j])/(2*(params.D[j]*t[i])**0.5)))
term_2[j,i] = (-1)**3*(math.erfc((2*2*params.c[j]-params.a[j])/(2*(params.D[j]*t[i])**0.5)) - \
math.erfc((2*2*params.c[j]+params.a[j])/(2*(params.D[j]*t[i])**0.5)))
term_3[j,i] = (-1)**4*(math.erfc((2*3*params.c[j]-params.a[j])/(2*(params.D[j]*t[i])**0.5)) - \
math.erfc((2*3*params.c[j]+params.a[j])/(2*(params.D[j]*t[i])**0.5)))
term_4[j,i] = (-1)**5*(math.erfc((2*4*params.c[j]-params.a[j])/(2*(params.D[j]*t[i])**0.5)) - \
math.erfc((2*4*params.c[j]+params.a[j])/(2*(params.D[j]*t[i])**0.5)))
term_5[j,i] = (-1)**6*(math.erfc((2*5*params.c[j]-params.a[j])/(2*(params.D[j]*t[i])**0.5)) - \
math.erfc((2*5*params.c[j]+params.a[j])/(2*(params.D[j]*t[i])**0.5)))
term_6[j,i] = (-1)**7*(math.erfc((2*6*params.c[j]-params.a[j])/(2*(params.D[j]*t[i])**0.5)) - \
math.erfc((2*6*params.c[j]+params.a[j])/(2*(params.D[j]*t[i])**0.5)))
total = -1*(base+term_1+term_2+term_3+term_4+term_5+term_6)
return_flow = np.copy(total)
return_flow[:,31:] = return_flow[:,31:]-return_flow[:,0:360*length-31]
bins = np.asarray([np.arange(0,360*length+30,30)])
monthly_return_flow = np.zeros([12,12*length])
for i in range(12):
for j in range(12*length):
monthly_return_flow[i,j] = np.sum(return_flow[i,bins[0,j]:bins[0,j+1]])/30
five_yr_return_frac= np.sum(monthly_return_flow, axis = 1)
# Figures for analytical solutions
fig, axs = plt.subplots(nrows=3, ncols=1, figsize=(12, 10))
c = ['blue','green','orange','red']
for i in range(3):
for j in range(4):
axs[i].plot(-1*return_flow[i*4+j,:], color = c[j])
axs[i].grid(axis = 'y',color = 'grey')
axs[i].set_xbound(0,365)
axs[i].set_xticks([0,30,60,90,120,150,180,210,240,270,300,330, \
360]) #,390,420,450,480,510,540,570,600,630,660, \
#690,720])
#axs[0].legend(['3','10','30','100'], title = 'm/d', loc = 'right')
# Return flow with valley boundary
fig, axs = plt.subplots(nrows=3, ncols=1, figsize=(12, 10))
c = ['blue','green','orange','red']
for i in range(3):
for j in range(4):
axs[i].plot(-1*total[i*4+j,:], color = c[j])
axs[i].grid(axis = 'y',color = 'grey')
# Return flow without valley boundary
fig, axs = plt.subplots(nrows=3, ncols=1, figsize=(12, 10))
c = ['blue','green','orange','red']
for i in range(3):
for j in range(4):
axs[i].plot(base[i*4+j,:], color = c[j])
axs[i].grid(axis = 'y',color = 'grey')
axs[i].set_xbound(0,length*12*30)
#axs[i].set_xticks([0,30,60,90,120,150,180,210,240,270,300,330, \
# 360,390,420,450,480,510,540,570,600,630,660, \
# 690,720])
# Monthly lag function
fig, axs = plt.subplots(nrows=3, ncols=1, figsize=(12, 10))
c = ['black','green','orange','red']
for i in range(3):
for j in range(1):
axs[i].plot(np.arange(240)+1,-1*monthly_return_flow[i*4+j+1,:], color = c[j])
axs[i].grid(axis = 'y',color = 'grey')
axs[i].set_xbound(1,12*1)
axs[i].set_xticks([1,2,3,4,5,6,7,8,9,10,11,12]) #, \
# 360,390,420,450,480,510,540,570,600,630,660, \
# 690,720])
# Lag function example
fig, axs = plt.subplots(nrows=3, ncols=1, figsize=(12, 10))
c = ['blue','gold','orange','purple','cyan']
coef = [500,1000, 2000, 1000, 500,]
axs[0].plot(np.arange(24)+1, [0,0,0,500,1000,2000,1000,500,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], color = 'k')
axs[0].set_xticks([2,4,6,8,10,12,14,16,18,20,22,24])
axs[0].set_xticks([1,3,5,7,9,11,13,15,17,19,21,23], minor = True)
axs[0].set_xbound(1,24*1)
axs[0].grid(axis = 'y',color = 'grey')
ret_sum = np.zeros([1,240])
for j in range(5):
axs[1].plot(np.arange(240)+j+4,-1*coef[j]*monthly_return_flow[1,0:240], color = c[j])
axs[1].grid(axis = 'y',color = 'grey')
axs[1].set_xbound(1,24*1)
axs[1].set_xticks([2,4,6,8,10,12,14,16,18,20,22,24])
axs[1].set_xticks([1,3,5,7,9,11,13,15,17,19,21,23], minor = True)
ret_sum[0,j+4:] += -1*coef[j]*monthly_return_flow[1,0:240-4-j]
axs[2].plot(np.arange(240),ret_sum[0,:], color = 'k')
axs[2].set_xticks([2,4,6,8,10,12,14,16,18,20,22,24])
axs[2].set_xticks([1,3,5,7,9,11,13,15,17,19,21,23], minor = True)
axs[2].set_xbound(1,24*1)
axs[2].grid(axis = 'y',color = 'grey')
########## Analytical return flows calculations using superposition #########
a = np.zeros([36,144+120]) # empty array to populate of dimensions unused_V data (minus field headers)
ret_pattern = np.array([1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7,8,8,8, \
9,9,9,10,10,10,11,11,11,12,12,12])-1 # return flow pattern code # for each DIV, from dds
# Define fractional return patterns - 12 patterns from 'return_flow' array
bins = pd.read_csv("Modflow_stress_periods.csv")
irrigation_ts = pd.DataFrame(np.zeros([9,12*12]))
geom_area = np.array([50000, 100000, 200000]).astype('float')
irr_scaling_coef = [1/3, 2/3, 1]
for i in range(3):
for k in range(3):
for j in range(144):
irrigation_ts.iloc[3*i+k,j] = irr_scaling_coef[k]*geom_area[i] \
*bins.Irrigation_baseline[j]*(bins.End[j]-bins.Start[j])
annual_irr_total = np.sum(irrigation_ts.iloc[:,9*12:10*12], axis = 1)
irr_pattern = np.array([1,2,3,1,2,3,1,2,3,1,2,3,4,5,6,4,5,6,4,5,6,4,5,6,7,8,9, \
7,8,9,7,8,9,7,8,9])-1
for i in range (36): # iterate across scenarios (rows)
for j in range(144): # iterate through time (columns) # rows of unused_V array
b = np.zeros([1,144+120]) # number of columns in irrigation array
ret = (irrigation_ts.iloc[irr_pattern[i],j] * -1 * \
monthly_return_flow[ret_pattern[i],:]) # return time series for month j
ret.shape = (1,120)
b[0,j:(j+120)] = ret # assign returns from month j to correct indexes in time
c = np.copy(a[i,:]) # copy current DIV to add new returns to
c.shape = (1,144+120)
c = c + b # add returns to temp return time series c
c = c.flatten()
a[i,:] = c # add c to master array of return flows
superposed_returns = pd.DataFrame(np.copy(a))
superposed_returns.to_csv("Superposed_return_flows.csv")
| 43.23913 | 113 | 0.568502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,585 | 0.199221 |
50e51c505990ca8f05a03573ffb38d38c07be03c | 3,776 | py | Python | data_toy/bnmtf/generate_bnmtf.py | ThomasBrouwer/BNMTF | 34df0c3cebc5e67a5e39762b9305b75d73a2a0e0 | [
"Apache-2.0"
] | 16 | 2017-04-19T12:04:47.000Z | 2021-12-03T00:50:43.000Z | data_toy/bnmtf/generate_bnmtf.py | ThomasBrouwer/BNMTF | 34df0c3cebc5e67a5e39762b9305b75d73a2a0e0 | [
"Apache-2.0"
] | 1 | 2017-04-20T11:26:16.000Z | 2017-04-20T11:26:16.000Z | data_toy/bnmtf/generate_bnmtf.py | ThomasBrouwer/BNMTF | 34df0c3cebc5e67a5e39762b9305b75d73a2a0e0 | [
"Apache-2.0"
] | 8 | 2015-12-15T05:29:43.000Z | 2019-06-05T03:14:11.000Z | """
Generate a toy dataset for the matrix tri-factorisation case, and store it.
We use dimensions 100 by 50 for the dataset, 10 row clusters, and 5 column clusters.
As the prior for F, G we take value 1 for all entries (so exp 1), and for S value 2 (so exp 1/2).
As a result, each value in R has a value of around 25, and a variance of .
For contrast, the Sanger dataset is as follows:
Shape: (622,139). Fraction observed: 0.811307224317.
Mean: 11.9726909789. Variance: 34.1503768785. Maximum: 23.5959612058.
We add Gaussian noise of precision tau = 1 (prior for gamma: alpha=1,beta=1).
(Simply using the expectation of our Gamma distribution over tau)
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../"
sys.path.append(project_location)
from BNMTF.code.models.distributions.exponential import exponential_draw
from BNMTF.code.models.distributions.normal import normal_draw
from BNMTF.code.cross_validation.mask import generate_M
import numpy, itertools, matplotlib.pyplot as plt
def generate_dataset(I,J,K,L,lambdaF,lambdaS,lambdaG,tau):
# Generate U, V
F = numpy.zeros((I,K))
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
F[i,k] = exponential_draw(lambdaF[i,k])
S = numpy.zeros((K,L))
for k,l in itertools.product(xrange(0,K),xrange(0,L)):
S[k,l] = exponential_draw(lambdaS[k,l])
G = numpy.zeros((J,L))
for j,l in itertools.product(xrange(0,J),xrange(0,L)):
G[j,l] = exponential_draw(lambdaG[j,l])
# Generate R
true_R = numpy.dot(F,numpy.dot(S,G.T))
R = add_noise(true_R,tau)
return (F,S,G,tau,true_R,R)
def add_noise(true_R,tau):
if numpy.isinf(tau):
return numpy.copy(true_R)
(I,J) = true_R.shape
R = numpy.zeros((I,J))
for i,j in itertools.product(xrange(0,I),xrange(0,J)):
R[i,j] = normal_draw(true_R[i,j],tau)
return R
def try_generate_M(I,J,fraction_unknown,attempts):
for attempt in range(1,attempts+1):
try:
M = generate_M(I,J,fraction_unknown)
sums_columns = M.sum(axis=0)
sums_rows = M.sum(axis=1)
for i,c in enumerate(sums_rows):
assert c != 0, "Fully unobserved row in M, row %s. Fraction %s." % (i,fraction_unknown)
for j,c in enumerate(sums_columns):
assert c != 0, "Fully unobserved column in M, column %s. Fraction %s." % (j,fraction_unknown)
print "Took %s attempts to generate M." % attempt
return M
except AssertionError:
pass
raise Exception("Tried to generate M %s times, with I=%s, J=%s, fraction=%s, but failed." % (attempts,I,J,fraction_unknown))
##########
if __name__ == "__main__":
output_folder = project_location+"BNMTF/data_toy/bnmtf/"
I,J,K,L = 100, 80, 5, 5
fraction_unknown = 0.1
alpha, beta = 1., 1.
lambdaF = numpy.ones((I,K))
lambdaS = numpy.ones((K,L))
lambdaG = numpy.ones((J,L))
tau = alpha / beta
(F,S,G,tau,true_R,R) = generate_dataset(I,J,K,L,lambdaF,lambdaS,lambdaG,tau)
# Try to generate M
M = try_generate_M(I,J,fraction_unknown,attempts=1000)
# Store all matrices in text files
numpy.savetxt(open(output_folder+"F.txt",'w'),F)
numpy.savetxt(open(output_folder+"S.txt",'w'),S)
numpy.savetxt(open(output_folder+"G.txt",'w'),G)
numpy.savetxt(open(output_folder+"R_true.txt",'w'),true_R)
numpy.savetxt(open(output_folder+"R.txt",'w'),R)
numpy.savetxt(open(output_folder+"M.txt",'w'),M)
print "Mean R: %s. Variance R: %s. Min R: %s. Max R: %s." % (numpy.mean(R),numpy.var(R),R.min(),R.max())
fig = plt.figure()
plt.hist(R.flatten(),bins=range(0,int(R.max())+1))
plt.show() | 37.019608 | 128 | 0.640625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,131 | 0.299523 |
50e5a28d8d3dff6cd20e630e0c995cafb0c49a98 | 11,544 | py | Python | bot.py | areapergrimm/therejectbot | 308a948d0f7fb38edbec55fe55aac62016c59bb7 | [
"MIT"
] | null | null | null | bot.py | areapergrimm/therejectbot | 308a948d0f7fb38edbec55fe55aac62016c59bb7 | [
"MIT"
] | null | null | null | bot.py | areapergrimm/therejectbot | 308a948d0f7fb38edbec55fe55aac62016c59bb7 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import random
import codenames
import trrutils
import asyncio
description = "The Reject Bot."
PREFIX = "."
CHANNELNAME = "spam"
bot = commands.Bot(command_prefix=PREFIX, description=description)
TOKEN = open('TOKEN.txt', 'r').read()
PERMLIST = open('PERMLIST.txt', 'r')
gameBoard = None
lobbyDict = {}
mastersLink = False
gameProgress = False
teamLists = []
mastersList = []
teamTurn = False
mastersTurn = False
hintNumber = 0
teamDict = {False:"B", True:"I"}
remainingWords = [8,7]
def permCheck(authorID):
PERMLIST.seek(0)
for line in PERMLIST:
if str(authorID) == line:
return True
return False
def checkTurn():
if gameProgress == False:
return "There is no game in progress!"
strAnnounce = "Current Turn: "
if teamTurn == False:
strAnnounce += "**Bold** "
else:
strAnnounce += "__*Italic*__ "
if mastersTurn == False:
strAnnounce += "codemaster ," + teamList[int(teamTurn)][0] + ". Use .hint [hint word] [number of guesses] to register your hint."
else:
strAnnounce += "team ," + teamList[int(teamTurn)][1:] + "Use .guess [word] to register your guess, or .passturn to end your turn."
strAnnounce += "Words remaining: Bold ({}) - ({}) Italics".format(remainingWords[0],remainingWords[1])
return strAnnounce
def checkGuess(guess):
if guess in gameDict:
gameDict[guess] = gameDict[guess].upper()
if teamDict[teamTurn] == gameDict[guess]:
return "Correct"
elif teamDict[not teamTurn] == gameDict[guess]:
return "Opponent"
elif gameDict[guess] == "O":
return "Incorrect"
elif gameDict[guess] == "X":
return "Death"
return "Invalid"
def resetGame():
global gameBoard
global lobbyDict
global mastersLink
global gameProgress
global teamLists
global mastersList
global teamTurn
global mastersTurn
global hintNumber
global teamDict
global remainingWords
gameBoard = None
lobbyDict = {}
mastersLink = False
gameProgress = False
teamLists = []
mastersList = []
teamTurn = False
mastersTurn = False
hintNumber = 0
teamDict = {False:"B", True:"I"}
remainingWords = [8,7]
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
@bot.command(pass_context=True)
async def exit(ctx):
if (CHANNELNAME in ctx.message.channel.name) == False:
return
if permCheck(ctx.message.author.id) == True:
await bot.say("Bye!")
exit
@bot.command(pass_context=True)
async def forcereset(ctx):
if (CHANNELNAME in ctx.message.channel.name) == False:
return
if permCheck(ctx.message.author.id) == True:
resetGame()
await bot.say("Game reset.")
'''codewords'''
@bot.command(pass_context=True)
async def refresh(ctx):
if (CHANNELNAME in ctx.message.channel.name) == False:
return
if permCheck(ctx.message.author.id) == True:
tally = codenames.cleanup()
await bot.say("The codewords file has been refreshed, with " + str(tally) + " words.")
@bot.command(pass_context=True)
async def blacklist(ctx, arg):
if (CHANNELNAME in ctx.message.channel.name) == False:
return
if permCheck(ctx.message.author.id) == True:
codenames.blacklist(arg)
await bot.say(arg + " has been added to the blacklist. Use !refreshcodewords to remove blacklisted words from the game.")
@bot.command(pass_context=True)
async def whitelist(ctx, arg):
if (CHANNELNAME in ctx.message.channel.name) == False:
return
if permCheck(ctx.message.author.id) == True:
codenames.whitelist(arg)
await bot.say(arg + " has been added to the whitelist. Use !refreshcodewords to add whitelisted words into the game.")
@bot.command(pass_context=True)
async def join(ctx, arg=None):
if (CHANNELNAME in ctx.message.channel.name) == False:
return
authorMember = ctx.message.author
if gameProgress == True:
await bot.say("There is a game already in progress! Joining in the middle of the game is currently not implemented.")
if (arg in ["b", "i", "B", "I", "r", "R"]) == False:
await bot.say(".join [team] requires a team preference: 'b', 'i', 'r'. r is for random assignment. Use capital letters ('B' or 'I') to preference being that team's codemaster.")
elif authorMember in lobbyDict:
lobbyDict[authorMember] = arg
await bot.say(str(authorMember) + " has changed their team preference.")
else:
lobbyDict[authorMember] = arg
await bot.say(str(authorMember) + " has joined the Codenames lobby.")
@bot.command(pass_context=True)
async def leave(ctx):
authorMember = ctx.message.author
if authorMember not in lobbyDict:
await bot.say("You are not in the Codenames lobby.")
return
lobbyDict.pop(authorMember, None)
await bot.say(str(authorMember) + " has left the Codenames lobby.")
@bot.command(pass_context=True)
async def lobby(ctx):
if (CHANNELNAME in ctx.message.channel.name) == False:
return
lobbyStr = ""
for member in lobbyDict:
lobbyStr += (str(member) + " (" + str(lobbyDict[member]) + ") ")
if lobbyStr == "":
await bot.say("No-one in the lobby yet. Use '.join b' or '.join i' to join a team, or '.join r' to be randomly assigned. Use capital letters to preference being a codemaster.")
else:
await bot.say(lobbyStr)
@bot.command(pass_context=True)
async def start(ctx):
if (CHANNELNAME in ctx.message.channel.name) == False:
return
global gameProgress
if gameProgress == True:
await bot.say("A game is running already!")
return
if ctx.message.author not in lobbyDict:
await bot.say("You have not joined the Codenames lobby yet! Use '.join b' or '.join i' to join a team, or '.join r' to be randomly assigned. Use capital letters to preference being a codemaster.")
return
if len(lobbyDict) < 4:
await bot.say("At least four players must join the lobby. Currently in the lobby: " + str(len(lobbyDict)) + ".")
return
teamLists = codenames.generateTeams(lobbyDict)
if len(teamLists[0]) < 2:
await bot.say("There are not enough players in **Bold** team! Use .join b to switch to or join them, or .join r to let the bot assign you.")
return
if len(teamLists[1]) < 2:
await bot.say("There are not enough players in __*Italic*__ team! Use .join i to switch to or join them, or .join r to let the bot assign you.")
return
gameProgress = True
gameDict = codenames.initGame()
boardDrawn = codenames.drawBoard(gameDict)
masterBoard = codenames.displayBoard(boardDrawn)
mastersList.append(teamLists[0][0])
mastersList.append(teamLists[1][0])
for masterUser in mastersList:
await bot.send_message(masterUser, "New game started! Good luck, codemaster. Private messages to this bot will automatically be sent to your opposing codemaster <This functionality still in development>.")
await bot.send_message(masterUser, masterBoard)
mastersLink = True
announceStr = ("A game of Codenames has started. Going first, {} leads the **bold** team: {}. {} leads the __*italic*__ team: {}.").format(mastersList[0],teamLists[0][1:],mastersList[1],teamLists[1][1:])
await bot.say(announceStr)
gameBoard = codenames.displayBoard(gameDict)
await bot.say(gameBoard)
await bot.say(checkTurn())
@bot.command(pass_context=True)
async def hint(ctx, arg, param):
if (CHANNELNAME in ctx.message.channel.name) == False:
return
if (mastersTurn == True):
if ctx.message.author == mastersList[int(teamTurn)][0]:
hintNumber = param + 1
mastersTurn = not mastersTurn
await bot.say("The hint is " + arg + " for " + str(param) + " guesses, and 1 bonus guess. Use .guess to guess, and .passturn to end guessing.")
else:
await bot.say(checkTurn())
@bot.command(pass_context=True)
async def guess(ctx, arg):
if (CHANNELNAME in ctx.message.channel.name) == False:
return
if (mastersTurn == False):
if ctx.message.author in mastersList[int(teamTurn)][1:]:
response = checkGuess(arg)
if response == "Correct":
hintNumber += -1
remainingWords[int(teamTurn)] += -1
if remainingWords[int(teamTurn)] == 0:
await bot.say("Game over! " + ctx.message.author + "has revealed their last codeword. " + teamlist[int(teamTurn)][0] + "'s team has won the game!")
resetGame()
if hintNumber == 0:
await bot.say(arg + " is one of your words! You have no more guesses for this turn.")
mastersTurn = not mastersTurn
teamTurn = not teamTurn
newBoard = codenames.progressBoard(gameDict)
await bot.say(checkTurn())
await bot.say(newBoard)
else:
await bot.say(arg + " is one of your words! Remaining guesses: " + str(hintNumber))
elif response == "Incorrect":
await bot.say(arg + " is a neutral word! Your turn has ended.")
mastersTurn = not mastersTurn
teamTurn = not teamTurn
await bot.say(checkTurn())
elif response == "Opponent":
remainingWords[int(not teamTurn)] += -1
if remainingWords[int(not teamTurn)] == 0:
await bot.say("Game over! " + ctx.message.author + "has revealed their opponents' last codeword. " + teamList[int(not teamTurn)][0] + "'s team has won the game!")
resetGame()
await bot.say(arg + " is one of your opponents' words! Your turn has ended.")
mastersTurn = not mastersTurn
teamTurn = not teamTurn
await bot.say(checkTurn())
elif response == "Death":
newBoard = codenames.progressBoard(gameDict)
await bot.say(progressBoard(newBoard))
await bot.say("Game over!" + arg + " is the double agent!" + teamList[teamTurn][0] + "'s team has lost!")
resetGame()
else:
await bot.say(arg + " is not a valid guess! Try again.")
else:
await bot.say(checkTurn())
@bot.command(pass_context=True)
async def passturn(ctx, arg):
if (CHANNELNAME in ctx.message.channel.name) == False:
return
if (mastersTurn == False):
if ctx.message.author in mastersList[int(teamTurn)][1:]:
mastersTurn = not mastersTurn
teamTurn = not teamTurn
await bot.say(str(ctx.message.author) + " has passed the turn.")
await bot.say(checkTurn())
@bot.command(pass_context=True)
async def turn():
if (CHANNELNAME in ctx.message.channel.name) == False:
return
await bot.say(checkTurn())
'''end codewords'''
bot.run(TOKEN)
| 40.083333 | 214 | 0.610707 | 0 | 0 | 0 | 0 | 8,991 | 0.778846 | 8,550 | 0.740644 | 2,763 | 0.239345 |
50e70782136080b3f977a391e6c43c7f88a423c7 | 13,391 | py | Python | kaggle_ensemble/prob_extend_kaggle.py | shayan223/Reverse_GoL | cd71df7b270eabbd57016f0cc72d19d64a9c60b8 | [
"MIT"
] | null | null | null | kaggle_ensemble/prob_extend_kaggle.py | shayan223/Reverse_GoL | cd71df7b270eabbd57016f0cc72d19d64a9c60b8 | [
"MIT"
] | null | null | null | kaggle_ensemble/prob_extend_kaggle.py | shayan223/Reverse_GoL | cd71df7b270eabbd57016f0cc72d19d64a9c60b8 | [
"MIT"
] | null | null | null | '''Code found on this kaggle submission
https://www.kaggle.com/yakuben/crgl-probability-extension-true-target-problem'''
import pandas as pd
import torch
import numpy as np
from torch import FloatTensor
from torch.utils.data import IterableDataset, DataLoader
import torch.nn as nn
from torch.nn import BCELoss
from torch.optim import Adam
from tqdm.notebook import trange, tqdm
torch.cuda.empty_cache()
neighbors_roll_axes = [(i,j) for i in range(-1,2) for j in range(-1, 2) if not (i==0 and j==0)]
def binary_forward_iteration(grid, delta=1):
for _ in range(delta):
neighbor_sum = torch.cat([torch.roll(torch.roll(grid, i, 2), j, 3) for i,j in neighbors_roll_axes], dim=1)
neighbor_sum = neighbor_sum.sum(dim=1, keepdim=True)
grid = ((neighbor_sum == 3) | ((grid==1) & (neighbor_sum == 2)))
return grid
neighbors_roll_axes = [(i,j) for i in range(-1,2) for j in range(-1, 2) if not (i==0 and j==0)]
combination_alive2 = [(i,j) for i in range(8) for j in range(i)]
combination_alive2_dead6 = [([i,j]+[8+k for k in range(8) if (k!=i and k!=j)]) for i,j in combination_alive2]
combination_alive3 = [(i,j,k) for i in range(8) for j in range(i) for k in range(j)]
combination_alive3_dead5 = [([i,j,k]+[8+l for l in range(8) if (l!=i and l!=j and l!=k)]) for i,j,k in combination_alive3]
def get_neighbors(grid):
return torch.stack([torch.roll(torch.roll(grid, i, 2), j, 3) for i,j in neighbors_roll_axes])
def n_neigbors_nearby_prob(neighbors, neighbor_nearby=2):
if neighbor_nearby==2:
combination = combination_alive2_dead6
else:
combination = combination_alive3_dead5
neighbors = torch.cat([neighbors, 1 - neighbors])
return torch.stack([neighbors[c].prod(dim=0) for c in combination]).sum(dim=0)
def probabilistic_forward_iteration_autograd(grid):
neighbors = get_neighbors(grid)
neighbors_p2 = n_neigbors_nearby_prob(neighbors, 2)
neighbors_p3 = n_neigbors_nearby_prob(neighbors, 3)
alive_prob = neighbors_p3 + neighbors_p2*grid
return alive_prob
neighbor_alive2_cell_alive = {}
neighbor_alive2_cell_dead = {}
neighbor_alive3_cell_alive = {}
neighbor_alive3_cell_dead = {}
for cell in range(8):
neighbor_alive2_cell_alive[cell] = [(cell,j) for j in range(8) if j!=cell]
neighbor_alive2_cell_alive[cell] = [([j]+[8+k for k in range(8) if (k!=i and k!=j)]) for i,j in neighbor_alive2_cell_alive[cell]]
neighbor_alive2_cell_dead[cell] = [(i,j) for i in range(8) for j in range(i) if i!=cell and j!=cell]
neighbor_alive2_cell_dead[cell] = [([i,j]+[8+k for k in range(8) if (k!=i and k!=j and k!=cell)]) for i,j in neighbor_alive2_cell_dead[cell]]
neighbor_alive3_cell_alive[cell] = [(i,j,cell) for i in range(8) for j in range(i) if i!=cell and j!=cell]
neighbor_alive3_cell_alive[cell] = [([i,j]+[8+l for l in range(8) if (l!=i and l!=j and l!=k)]) for i,j,k in neighbor_alive3_cell_alive[cell]]
neighbor_alive3_cell_dead[cell] = [(i,j,k) for i in range(8) for j in range(i) for k in range(j) if i!=cell and j!=cell and k!=cell]
neighbor_alive3_cell_dead[cell] = [([i,j,k]+[8+l for l in range(8) if (l!=i and l!=j and l!=k and l!=cell)]) for i,j,k in neighbor_alive3_cell_dead[cell]]
def get_neighbors_backward(grad_output):
return torch.stack([torch.roll(torch.roll(grad_output[idx], -i, 2), -j, 3) for idx, (i,j) in enumerate(neighbors_roll_axes)]).sum(dim=0)
def n_neigbors_nearby_prob_backward(grad_output, neighbors, neighbor_nearby=2):
if neighbor_nearby==2:
combination_cell_alive = neighbor_alive2_cell_alive
combination_cell_dead = neighbor_alive2_cell_dead
else:
combination_cell_alive = neighbor_alive3_cell_alive
combination_cell_dead = neighbor_alive3_cell_dead
neighbors = torch.cat([neighbors, 1 - neighbors])
coef = []
for cell in range(8):
cell_live_coef = torch.stack([neighbors[l].prod(dim=0) for l in combination_cell_alive[cell]]).sum(dim=0)
cell_dead_coef = torch.stack([neighbors[d].prod(dim=0) for d in combination_cell_dead[cell]]).sum(dim=0)
coef.append(cell_live_coef-cell_dead_coef)
coef = torch.stack(coef)
return coef*grad_output
class ProbabilisticForwardIteration(torch.autograd.Function):
@staticmethod
def forward(ctx, grid, delta=1):
ctx.grid = grid
return probabilistic_forward_iteration_autograd(grid)
@staticmethod
def backward(ctx, grad_out):
grid = ctx.grid
neighbors = get_neighbors(grid)
neighbors_p2 = n_neigbors_nearby_prob(neighbors, neighbor_nearby=2)
grad_n2_out = grad_out*grid
grad_n3_out = grad_out
grad_n2_inp = n_neigbors_nearby_prob_backward(grad_n2_out, neighbors, neighbor_nearby=2)
grad_n3_inp = n_neigbors_nearby_prob_backward(grad_n3_out, neighbors, neighbor_nearby=3)
grad_neighbors_out = grad_n2_inp + grad_n3_inp
grad_neighbors_inp = get_neighbors_backward(grad_neighbors_out)
grad_inp = grad_neighbors_inp + neighbors_p2*grad_out
return grad_inp, None
def probabilistic_forward_iteration(grid, delta=1, autograd=True):
"""autograd=False slower but use less memory"""
if autograd:
for _ in range(delta):
grid = probabilistic_forward_iteration_autograd(grid)
else:
for _ in range(delta):
grid = ProbabilisticForwardIteration.apply(grid)
return grid
neighbors_roll_axes = [(i,j) for i in range(-1,2) for j in range(-1, 2) if not (i==0 and j==0)]
def generate_random_start_batch(batch_size):
return np.random.randint(low=0, high=2, size=(batch_size, 1, 25, 25), dtype=bool)
def straight_iter_binary_numpy(grid, delta=1):
for _ in range(delta):
neighbor_sum = np.concatenate([np.roll(np.roll(grid, i, 2), j, 3) for i,j in neighbors_roll_axes], axis=1)
neighbor_sum = neighbor_sum.sum(axis=1, keepdims=True)
grid = ((neighbor_sum == 3) | ((grid==1) & (neighbor_sum == 2)))
return grid
class DataStream():
def __init__(self, delta=None, batch_size=128, drop_empty=False, drop_ch_dim=False):
self.init_delta = delta
self.batch_size = batch_size
self.drop_empty= drop_empty
self.drop_ch_dim = drop_ch_dim
def __iter__(self):
while True:
x = generate_random_start_batch(self.batch_size)
delta = self.init_delta if self.init_delta else np.random.randint(1,6)
x = straight_iter_binary_numpy(x, 5+delta)
if self.drop_empty:
x = x[x.any(axis=2).any(axis=2).reshape(-1)]
if self.drop_ch_dim:
x = x[:,0,:,:]
yield x.astype(float), delta
class DataStreamTorch(IterableDataset):
def __init__(self, delta=None, batch_size=128, drop_empty=False, drop_ch_dim=False):
self.ds = DataStream(delta, batch_size, drop_empty, drop_ch_dim)
def __iter__(self):
for x, delta in self.ds:
yield FloatTensor(x), delta
def pass_collate(batch):
return batch[0]
def get_datastream_loader(delta=None, batch_size=128, drop_empty=False, drop_ch_dim=False, num_workers=0):
dataset = DataStreamTorch(delta, batch_size, drop_empty, drop_ch_dim)
dataloader = DataLoader(dataset, batch_size=1, collate_fn=pass_collate, num_workers=num_workers)
return dataloader
class Model(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1, 512, 7, padding=3, padding_mode='circular')
self.conv2 = nn.Conv2d(512, 256, 5, padding=2, padding_mode='circular')
self.conv3 = nn.Conv2d(256, 256, 3, padding=1, padding_mode='circular')
self.conv4 = nn.Conv2d(256, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.sigmoid(self.conv4(x))
return x
class FixPredictBlock(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(5, 256, 5, padding=2, padding_mode='circular')
self.conv2 = nn.Conv2d(256, 256, 3, padding=1, padding_mode='circular')
self.conv3 = nn.Conv2d(256, 256, 1)
self.conv4 = nn.Conv2d(256, 1, 3, padding=1, padding_mode='circular')
self.sigmoid = nn.Sigmoid()
def forward(self, x, x_prev_pred):
with torch.no_grad():
x_prev_pred_bin = x_prev_pred>0.5
x_pred_bin = binary_forward_iteration(x_prev_pred_bin)
x_pred = probabilistic_forward_iteration(x_prev_pred)
x = torch.cat([x, x_prev_pred, x_prev_pred_bin.float(), x_pred, x_pred_bin.float()], dim=1)
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.sigmoid(self.conv4(x))
return x
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fix_pred = FixPredictBlock()
def forward(self, x, n_it=5):
x_prev_pred = x
for i in range(n_it):
x_prev_pred = self.fix_pred(x, x_prev_pred)
return x_prev_pred
N_iter = 2#2000
device = 'cpu' #'cuda'
loader = get_datastream_loader(batch_size=128, num_workers=0, drop_empty=True, delta=1)
model = Model().to(device)
criterion = BCELoss()
optimizer = Adam(model.parameters(), lr=1e-3)
tqdm_loader = tqdm(loader)
for i, (stop_state, _) in enumerate(tqdm_loader):
print(i)
stop_state = stop_state.to(device)
optimizer.zero_grad()
start_state_prediction = model(stop_state)
stop_state_prediction = probabilistic_forward_iteration(start_state_prediction)
loss = criterion(stop_state_prediction, stop_state)
loss.backward()
optimizer.step()
with torch.no_grad():
bce = loss.item()
start_state_alive = (start_state_prediction>0.5).float().mean().item()
accuracy = ((stop_state_prediction > 0.5) == (stop_state>0.5)).float().mean().item()
accuracy_true = (binary_forward_iteration(start_state_prediction>0.5)==(stop_state>0.5)).float().mean().item()
tqdm_loader.postfix = 'bce: {:0.10f} | start_state_alive: {:0.5f} | accuracy: {:0.10f} | accuracy_true: {:0.10f}'\
.format(bce, start_state_alive, accuracy, accuracy_true)
if i > N_iter:
tqdm_loader.close()
break
for param in model.parameters():
param.requires_grad = False
model.eval()
for batch in loader:
stop_state = batch[0]#.cuda()#TODO enable this when using cuda instead of cpu
break
for n_iter in [1,10,100]:
acc = (stop_state == binary_forward_iteration(model(stop_state, n_iter) > 0.5)).float().mean().item()
print(f'model n_iter={n_iter} accuracy: {acc}')
def direct_gradient_optimization(batch, n_iter, lr, device='cuda', reduse_alife=False):
stop_state = batch
start_state = nn.Parameter(torch.rand(stop_state.shape).to(device)-1)
criterion = BCELoss()
optimizer = Adam([start_state], lr=lr,)
tqdm_loader = trange(n_iter)
for _ in tqdm_loader:
optimizer.zero_grad()
start_state_prob = torch.sigmoid(start_state)
stop_state_prediction = probabilistic_forward_iteration(start_state_prob, autograd=False)
bce_loss = criterion(stop_state_prediction, stop_state)
start_state_alive = start_state_prob.mean()
if reduse_alife and start_state_alive.item() > 0:
loss = bce_loss + start_state_alive
else:
loss = bce_loss
loss.backward()
optimizer.step()
with torch.no_grad():
bce = bce_loss.item()
alive_cells = start_state_alive.item()
accuracy = ((stop_state_prediction > 0.5) == (stop_state>0.5)).float().mean().item()
accuracy_true = (binary_forward_iteration(start_state_prob>0.5)==(stop_state>0.5)).float().mean().item()
tqdm_loader.postfix = 'bce: {:0.10f} | start_state_alive: {:0.5f} | accuracy: {:0.10f} | accuracy_true: {:0.10f}'.format(bce, alive_cells, accuracy, accuracy_true)
return torch.sigmoid(start_state.detach())#.cpu().reshape(-1,625)
def direct_gradient_optimization_predict(data, delta, n_iter=100, lr=1, device='cuda'):
data = FloatTensor(np.array(data)).reshape((-1, 1, 25, 25)).to(device)
for i in range(delta-1):
data = direct_gradient_optimization(data, n_iter, lr, reduse_alife=True, device=device)
data = (data>0.5).float()
data = direct_gradient_optimization(data, n_iter, 1, reduse_alife=False, device=device)
return (data>0.5).detach().cpu().int().reshape(-1,625).numpy()
test = pd.read_csv('../data/test.csv', index_col='id')
submission = pd.read_csv('./sample_submission.csv', index_col='id')
for delta in range(1,6):
mask = test['delta']==delta
data = test[mask].iloc[:,1:]
submission[mask] = direct_gradient_optimization_predict(data, delta, n_iter=4, lr=1,device=device)
submission.to_csv('prob_extend_kaggle.csv')
| 36.191892 | 171 | 0.659697 | 3,845 | 0.287133 | 571 | 0.042641 | 852 | 0.063625 | 0 | 0 | 639 | 0.047719 |
50e86f54b6c39a0053edb593684658b152073127 | 1,704 | py | Python | scripts/file_renamer.py | mmenietti/python_code | 74df6acf8f950b72f65e27b2ac98854483084b6f | [
"MIT"
] | null | null | null | scripts/file_renamer.py | mmenietti/python_code | 74df6acf8f950b72f65e27b2ac98854483084b6f | [
"MIT"
] | null | null | null | scripts/file_renamer.py | mmenietti/python_code | 74df6acf8f950b72f65e27b2ac98854483084b6f | [
"MIT"
] | null | null | null | #------------------------------------------------------------
# Dependencies
#------------------------------------------------------------
import pathlib
import os
import argparse
#------------------------------------------------------------
#
#------------------------------------------------------------
def is_video_file(in_file):
if in_file.suffix == '.avi':
return True
elif in_file.suffix == '.mp4':
return True
elif in_file.suffix == '.mkv':
return True
elif in_file.suffix == '.wmv':
return True
else:
return False
def rename_video(sub_dir):
counter = 0
for x in sub_dir.iterdir():
counter = counter + 1
if is_video_file(x):
print(x.name + ' is video')
new_name_path = x.with_name(sub_dir.name)
new_name_path = new_name_path.with_suffix(x.suffix)
new_path_path = new_name_path.parents[1].joinpath(new_name_path.name)
while new_path_path.exists():
new_path_path = new_path_path.with_name(sub_dir.name + ' ' + str(counter) + new_path_path.suffix)
print(new_path_path.parts)
x.rename(new_path_path)
# else:
# print(x.name + ' is not video')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process path string.')
parser.add_argument('input_directory')
args = parser.parse_args()
input_directory_path = pathlib.Path(args.input_directory)
for x in input_directory_path.iterdir():
if x.is_dir():
rename_video(x) | 32.150943 | 113 | 0.49061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 391 | 0.22946 |
50e8877f0f4dda0b297436942622d7085f968969 | 135 | py | Python | mordae/gpc/__init__.py | mordae/gpc | 1de74b9038c576066eccd9ec4c3d0c1769b1ff47 | [
"MIT"
] | 2 | 2016-11-11T13:45:54.000Z | 2021-12-09T14:35:45.000Z | mordae/gpc/__init__.py | mordae/gpc | 1de74b9038c576066eccd9ec4c3d0c1769b1ff47 | [
"MIT"
] | null | null | null | mordae/gpc/__init__.py | mordae/gpc | 1de74b9038c576066eccd9ec4c3d0c1769b1ff47 | [
"MIT"
] | 3 | 2016-11-11T13:46:11.000Z | 2021-02-10T15:05:13.000Z | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
from mordae.gpc.manager import *
from mordae.gpc.site import *
# vim:set sw=4 ts=4 et:
| 16.875 | 32 | 0.644444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.496296 |
50eb9232360bd6dafdfc0f9b1de58c1d985a20fc | 5,206 | py | Python | eznlp/vectors.py | syuoni/eznlp | c0380c6c30d68b4df1769150424735c04ea9d714 | [
"Apache-2.0"
] | 9 | 2021-08-06T07:12:55.000Z | 2022-03-26T08:20:59.000Z | eznlp/vectors.py | Hhx1999/eznlp | 9d1397d8e9630c099295712cbcffa495353a3268 | [
"Apache-2.0"
] | 1 | 2022-03-11T13:27:29.000Z | 2022-03-16T11:52:14.000Z | eznlp/vectors.py | Hhx1999/eznlp | 9d1397d8e9630c099295712cbcffa495353a3268 | [
"Apache-2.0"
] | 3 | 2021-11-15T03:24:24.000Z | 2022-03-09T09:36:05.000Z | # -*- coding: utf-8 -*-
from typing import Union, List
import os
import tqdm
import logging
import torch
logger = logging.getLogger(__name__)
def _parse_line(line: bytes):
w, *vector = line.rstrip().split(b" ")
return w, [float(v) for v in vector]
def _infer_shape(path: str, skiprows: List[int]):
vec_dim = None
with open(path, 'rb') as f:
for i, line in enumerate(f):
if i in skiprows:
continue
if vec_dim is None:
w, vector = _parse_line(line)
vec_dim = len(vector)
return i+1, vec_dim
def _load_from_file(path: str, encoding=None, skiprows: Union[int, List[int]]=None, verbose=False):
logger.info(f"Loading vectors from {path}")
if skiprows is None:
skiprows = []
elif isinstance(skiprows, int):
skiprows = [skiprows]
assert all(isinstance(row, int) for row in skiprows)
words, vectors = [], []
num_lines, vec_dim = _infer_shape(path, skiprows)
with open(path, 'rb') as f:
num_bad_lines = 0
for i, line in tqdm.tqdm(enumerate(f), total=num_lines, disable=not verbose, ncols=100, desc="Loading vectors"):
if i in skiprows:
continue
try:
w, vector = _parse_line(line)
words.append(w.decode(encoding))
assert len(vector) == vec_dim
vectors.append(vector)
except KeyboardInterrupt as e:
raise e
except:
num_bad_lines += 1
logger.warning(f"Bad line detected: {line.rstrip()}")
if num_bad_lines > 0:
logger.warning(f"Totally {num_bad_lines} bad lines exist and were skipped")
vectors = torch.tensor(vectors)
return words, vectors
class Vectors(object):
def __init__(self, itos: List[str], vectors: torch.FloatTensor, unk_init=None):
if len(itos) != vectors.size(0):
raise ValueError(f"Vocaburaly size {len(itos)} does not match vector size {vectors.size(0)}")
self.itos = itos
self.vectors = vectors
self.unk_init = torch.zeros if unk_init is None else unk_init
@property
def itos(self):
return self._itos
@itos.setter
def itos(self, itos: List[str]):
self._itos = itos
self.stoi = {w: i for i, w in enumerate(itos)}
def __getitem__(self, token: str):
if token in self.stoi:
return self.vectors[self.stoi[token]]
else:
return self.unk_init(self.emb_dim)
def lookup(self, token: str):
tried_set = set()
# Backup tokens
for possible_token in [token, token.lower(), token.title(), token.upper()]:
if possible_token in tried_set:
continue
if possible_token in self.stoi:
return self.vectors[self.stoi[possible_token]]
else:
tried_set.add(possible_token)
return None
def __repr__(self):
return f"{self.__class__.__name__}({self.voc_dim}, {self.emb_dim})"
def __len__(self):
return self.vectors.size(0)
@property
def voc_dim(self):
return self.vectors.size(0)
@property
def emb_dim(self):
return self.vectors.size(1)
@staticmethod
def save_to_cache(path: str, itos: List[str], vectors: torch.FloatTensor):
logger.info(f"Saving vectors to {path}.pt")
torch.save((itos, vectors), f"{path}.pt")
@staticmethod
def load_from_cache(path: str):
logger.info(f"Loading vectors from {path}.pt")
itos, vectors = torch.load(f"{path}.pt")
return itos, vectors
@classmethod
def load(cls, path: str, encoding=None, **kwargs):
if os.path.exists(f"{path}.pt"):
itos, vectors = cls.load_from_cache(path)
else:
itos, vectors = _load_from_file(path, encoding, **kwargs)
cls.save_to_cache(path, itos, vectors)
return cls(itos, vectors)
class GloVe(Vectors):
"""
https://nlp.stanford.edu/projects/glove/
"""
def __init__(self, path: str, encoding=None, **kwargs):
if os.path.exists(f"{path}.pt"):
itos, vectors = self.load_from_cache(path)
else:
itos, vectors = _load_from_file(path, encoding)
self.save_to_cache(path, itos, vectors)
super().__init__(itos, vectors, **kwargs)
class Senna(Vectors):
def __init__(self, path: str, **kwargs):
if os.path.exists(f"{path}.pt"):
itos, vectors = self.load_from_cache(path)
else:
with open(f"{path}/hash/words.lst", 'r') as f:
itos = [w.strip() for w in f.readlines()]
with open(f"{path}/embeddings/embeddings.txt", 'r') as f:
vectors = [[float(v) for v in vector.strip().split()] for vector in f.readlines()]
vectors = torch.tensor(vectors)
self.save_to_cache(path, itos, vectors)
super().__init__(itos, vectors, **kwargs)
| 31.743902 | 120 | 0.573569 | 3,384 | 0.650019 | 0 | 0 | 1,028 | 0.197464 | 0 | 0 | 572 | 0.109873 |
50ebe457a1b78802dec0ab678d488621c9afcd56 | 2,389 | py | Python | tools/Kline2HDF5.py | wzf92/rqalpha | 63a328f10d0488a4b06cd13773e506d4ee96628b | [
"Apache-2.0"
] | null | null | null | tools/Kline2HDF5.py | wzf92/rqalpha | 63a328f10d0488a4b06cd13773e506d4ee96628b | [
"Apache-2.0"
] | null | null | null | tools/Kline2HDF5.py | wzf92/rqalpha | 63a328f10d0488a4b06cd13773e506d4ee96628b | [
"Apache-2.0"
] | null | null | null | import os
import datetime
import h5py
import numpy as np
DEFAULT_DTYPE = np.dtype([
('datetime', np.int64),
('open', np.float),
('close', np.float),
('high', np.float),
('low', np.float),
('limit_up', np.float),
('limit_down', np.float),
('volume', np.float),
('total_turnover', np.float),
('settlement', np.float),
('prev_settlement', np.float),
])
class Kline2HDF5:
def __init__(self, fo_name):
self._timeformat = "%Y-%m-%d %H:%M:%S"
self._create_output_file(fo_name)
def _create_output_file(self, fo_name):
self._fo = h5py.File(fo_name, "w")
def finished(self):
self._fo.close()
def translate(self, fi_name, symbol=None):
print(fi_name, symbol)
fi = open(fi_name, 'r')
if not symbol:
symbol = os.path.basename(fi_name).split('.')[0]
res = []
lines = fi.readlines()
last_dt = None
for line in lines:
vars = line.strip('\n').split(',')
datetime_array = datetime.datetime.fromtimestamp(int(vars[0]))
if last_dt:
delta = datetime_array - last_dt
if delta.days >= 1 and 20 <= datetime_array.hour <= 24:
# datetime_array = datetime_array - datetime.timedelta(days=1)
datetime_array = datetime_array.replace(day=last_dt.day, month=last_dt.month)
if datetime_array <= last_dt:
print(line)
print(datetime_array)
print(datetime_array.second)
print(last_dt)
print(last_dt.hour)
assert False
datetime_str = datetime_array.strftime("%Y%m%d%H%M%S")
# t = int(vars[0])
o = float(vars[1])
h = float(vars[2])
l = float(vars[3])
c = float(vars[4])
v = float(vars[5])
res.append((datetime_str, o, c, h, l, o * 1.1, o * 0.9, v, -1, -1, -1))
last_dt = datetime_array
fi.close()
res_array = np.asarray(res, dtype=DEFAULT_DTYPE)
self._fo.create_dataset(symbol, data=res_array)
return True
if __name__ == '__main__':
rq2h5 = Kline2HDF5("futures_min_test.h5")
rq2h5.translate("/Users/zhifeng/rqalpha/data/rqdata/I88-4.csv", "I88")
rq2h5.finished()
| 32.726027 | 97 | 0.544161 | 1,821 | 0.762244 | 0 | 0 | 0 | 0 | 0 | 0 | 320 | 0.133947 |
50ec400b28d273845661df97a4f47b5842de0d09 | 496 | py | Python | src/types.py | fabianobizarro/lispy | 61f092fc9bebf16ec1963df33388e57af2df5e76 | [
"MIT"
] | null | null | null | src/types.py | fabianobizarro/lispy | 61f092fc9bebf16ec1963df33388e57af2df5e76 | [
"MIT"
] | null | null | null | src/types.py | fabianobizarro/lispy | 61f092fc9bebf16ec1963df33388e57af2df5e76 | [
"MIT"
] | null | null | null |
# Types
Symbol = str # A Lisp Symbol is implemented as a Python str
List = list # A Lisp List is implemented as a Python list
Number = (int, float) # A Lisp Number is implemented as a Python int or float
# Exp = Union[Symbol, Exp]
def atom(token):
"Numbers become numbers; every other token is a symbol."
try:
return int(token)
except ValueError:
try:
return float(token)
except ValueError:
return Symbol(token)
| 23.619048 | 78 | 0.620968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.47379 |
50ec7093b69902578058e5325a13292f0fd0aef1 | 2,086 | py | Python | app/logic/mailing/tests/tests_management_SendMail.py | imvu/bluesteel | ab52133249a693b3cd2d8593c5d47408a3b0fce6 | [
"MIT"
] | 10 | 2017-01-13T06:28:04.000Z | 2020-11-18T13:00:26.000Z | app/logic/mailing/tests/tests_management_SendMail.py | imvu/bluesteel | ab52133249a693b3cd2d8593c5d47408a3b0fce6 | [
"MIT"
] | null | null | null | app/logic/mailing/tests/tests_management_SendMail.py | imvu/bluesteel | ab52133249a693b3cd2d8593c5d47408a3b0fce6 | [
"MIT"
] | 2 | 2018-03-29T14:10:53.000Z | 2019-11-20T08:21:57.000Z | """ StackedMail management command tests """
from django.test import TestCase
from django.test import Client
from django.conf import settings
from django.core import mail
from django.core.management import call_command
from django.utils.six import StringIO
from app.logic.mailing.models.StackedMailModel import StackedMailEntry
from app.logic.httpcommon import res
import os
import shutil
class ManagementStackedMailTestCase(TestCase):
def setUp(self):
self.client = Client()
self.tmp_folder = os.path.join(settings.TMP_ROOT)
if not os.path.exists(self.tmp_folder):
os.makedirs(self.tmp_folder)
def tearDown(self):
if os.path.exists(self.tmp_folder):
shutil.rmtree(self.tmp_folder)
def create_stacked_email(self, sender, receiver, title, msg, sent):
StackedMailEntry.objects.create(
receiver=receiver,
sender=sender,
title=title,
content=msg,
is_sent=sent
)
def test_command_output(self):
self.create_stacked_email('s1@test.com', 'r1@test.com', 'Title1', 'Body1', True)
self.create_stacked_email('s2@test.com', 'r2@test.com', 'Title2', 'Body2', False)
self.create_stacked_email('s3@test.com', 'r3@test.com', 'Title3', 'Body3', False)
self.create_stacked_email('s4@test.com', 'r4@test.com', 'Title4', 'Body4', False)
out = StringIO()
call_command('sendmail', stdout=out)
self.assertIn('', out.getvalue())
mail.outbox.sort(key=lambda x: x.to[0])
self.assertEqual('r2@test.com', mail.outbox[0].to[0])
self.assertEqual('s2@test.com', mail.outbox[0].from_email)
self.assertEqual('r3@test.com', mail.outbox[1].to[0])
self.assertEqual('s3@test.com', mail.outbox[1].from_email)
self.assertEqual('r4@test.com', mail.outbox[2].to[0])
self.assertEqual('s4@test.com', mail.outbox[2].from_email)
emails = StackedMailEntry.objects.all()
for email in emails:
self.assertEqual(True, email.is_sent)
| 33.111111 | 89 | 0.660594 | 1,692 | 0.811122 | 0 | 0 | 0 | 0 | 0 | 0 | 298 | 0.142857 |
50ed09f4520aa4f2cf7555a9b4e50a474d33e658 | 26,637 | py | Python | source/deepsecurity/api/policy_firewall_rule_details_api.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-10-30T16:40:09.000Z | 2021-10-30T16:40:09.000Z | source/deepsecurity/api/policy_firewall_rule_details_api.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-07-28T20:19:03.000Z | 2021-07-28T20:19:03.000Z | source/deepsecurity/api/policy_firewall_rule_details_api.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-10-30T16:40:02.000Z | 2021-10-30T16:40:02.000Z | # coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from deepsecurity.api_client import ApiClient
class PolicyFirewallRuleDetailsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def describe_firewall_rule_on_policy(self, policy_id, firewall_rule_id, api_version, **kwargs): # noqa: E501
"""Describe a firewall rule # noqa: E501
Describe a firewall rule including policy-level overrides. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_firewall_rule_on_policy(policy_id, firewall_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.describe_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.describe_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, **kwargs) # noqa: E501
return data
def describe_firewall_rule_on_policy_with_http_info(self, policy_id, firewall_rule_id, api_version, **kwargs): # noqa: E501
"""Describe a firewall rule # noqa: E501
Describe a firewall rule including policy-level overrides. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'firewall_rule_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method describe_firewall_rule_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `describe_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'firewall_rule_id' is set
if ('firewall_rule_id' not in params or
params['firewall_rule_id'] is None):
raise ValueError("Missing the required parameter `firewall_rule_id` when calling `describe_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `describe_firewall_rule_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `describe_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'firewall_rule_id' in params and not re.search('\\d+', str(params['firewall_rule_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `firewall_rule_id` when calling `describe_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
if 'firewall_rule_id' in params:
path_params['firewallRuleID'] = params['firewall_rule_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/firewall/rules/{firewallRuleID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FirewallRule', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_firewall_rules_on_policy(self, policy_id, api_version, **kwargs): # noqa: E501
"""List firewall rules # noqa: E501
Lists all firewall rules assigned to a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_firewall_rules_on_policy(policy_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only rules assigned to the current policy.
:return: FirewallRules
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_firewall_rules_on_policy_with_http_info(policy_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.list_firewall_rules_on_policy_with_http_info(policy_id, api_version, **kwargs) # noqa: E501
return data
def list_firewall_rules_on_policy_with_http_info(self, policy_id, api_version, **kwargs): # noqa: E501
"""List firewall rules # noqa: E501
Lists all firewall rules assigned to a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_firewall_rules_on_policy_with_http_info(policy_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only rules assigned to the current policy.
:return: FirewallRules
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_firewall_rules_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `list_firewall_rules_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `list_firewall_rules_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `list_firewall_rules_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/firewall/rules', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FirewallRules', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_firewall_rule_on_policy(self, policy_id, firewall_rule_id, firewall_rule, api_version, **kwargs): # noqa: E501
"""Modify a firewall rule # noqa: E501
Modify a firewall rule assigned to a policy. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_firewall_rule_on_policy(policy_id, firewall_rule_id, firewall_rule, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule to modify. (required)
:param FirewallRule firewall_rule: The settings of the firewall rule to modify. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, firewall_rule, api_version, **kwargs) # noqa: E501
else:
(data) = self.modify_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, firewall_rule, api_version, **kwargs) # noqa: E501
return data
def modify_firewall_rule_on_policy_with_http_info(self, policy_id, firewall_rule_id, firewall_rule, api_version, **kwargs): # noqa: E501
"""Modify a firewall rule # noqa: E501
Modify a firewall rule assigned to a policy. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, firewall_rule, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule to modify. (required)
:param FirewallRule firewall_rule: The settings of the firewall rule to modify. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'firewall_rule_id', 'firewall_rule', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_firewall_rule_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `modify_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'firewall_rule_id' is set
if ('firewall_rule_id' not in params or
params['firewall_rule_id'] is None):
raise ValueError("Missing the required parameter `firewall_rule_id` when calling `modify_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'firewall_rule' is set
if ('firewall_rule' not in params or
params['firewall_rule'] is None):
raise ValueError("Missing the required parameter `firewall_rule` when calling `modify_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `modify_firewall_rule_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `modify_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'firewall_rule_id' in params and not re.search('\\d+', str(params['firewall_rule_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `firewall_rule_id` when calling `modify_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
if 'firewall_rule_id' in params:
path_params['firewallRuleID'] = params['firewall_rule_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'firewall_rule' in params:
body_params = params['firewall_rule']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/firewall/rules/{firewallRuleID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FirewallRule', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def reset_firewall_rule_on_policy(self, policy_id, firewall_rule_id, api_version, **kwargs): # noqa: E501
"""Reset firewall rule overrides # noqa: E501
Remove all overrides for a firewall rule from a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_firewall_rule_on_policy(policy_id, firewall_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule to reset. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.reset_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.reset_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, **kwargs) # noqa: E501
return data
def reset_firewall_rule_on_policy_with_http_info(self, policy_id, firewall_rule_id, api_version, **kwargs): # noqa: E501
"""Reset firewall rule overrides # noqa: E501
Remove all overrides for a firewall rule from a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule to reset. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'firewall_rule_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method reset_firewall_rule_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `reset_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'firewall_rule_id' is set
if ('firewall_rule_id' not in params or
params['firewall_rule_id'] is None):
raise ValueError("Missing the required parameter `firewall_rule_id` when calling `reset_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `reset_firewall_rule_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `reset_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'firewall_rule_id' in params and not re.search('\\d+', str(params['firewall_rule_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `firewall_rule_id` when calling `reset_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
if 'firewall_rule_id' in params:
path_params['firewallRuleID'] = params['firewall_rule_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/firewall/rules/{firewallRuleID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FirewallRule', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 50.737143 | 311 | 0.637497 | 25,958 | 0.974509 | 0 | 0 | 0 | 0 | 0 | 0 | 15,311 | 0.574802 |
50ed2f5909b9404a25f18a778eda2dc622034b2e | 1,957 | py | Python | server/middleware/log_requests.py | marirs/fastApi-simpleApiKey-boilerplate | 2989e6ec3865d3719df059369adf36c31c014ab3 | [
"MIT"
] | 3 | 2020-06-15T21:11:02.000Z | 2022-01-19T07:52:44.000Z | server/middleware/log_requests.py | marirs/fastApi-simpleApiKey-boilerplate | 2989e6ec3865d3719df059369adf36c31c014ab3 | [
"MIT"
] | 1 | 2020-08-24T07:49:55.000Z | 2020-08-24T07:49:55.000Z | server/middleware/log_requests.py | marirs/fastApi-simpleApiKey-boilerplate | 2989e6ec3865d3719df059369adf36c31c014ab3 | [
"MIT"
] | 2 | 2020-07-07T14:24:44.000Z | 2020-08-24T07:40:04.000Z | """
Log requests middleware
"""
import time
from fastapi import Request
import geoip2.database
from geoip2.errors import AddressNotFoundError
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from server.config import app_logger
from server.config import CITY_DB
logger = app_logger(__name__, 'requests.log')
geo_location = geoip2.database.Reader(CITY_DB)
class LogRequests(BaseHTTPMiddleware):
async def dispatch(self,
request: Request,
call_next: RequestResponseEndpoint,
):
start_time = time.time()
response = await call_next(request)
process_time = (time.time() - start_time) * 1000
formatted_process_time = '{0:.2f}'.format(process_time)
remote = ":".join(map(str, request.client))
x_forwarded_for = request.headers.get('x-forwarded-for', None)
x_real_ip = request.headers.get('x-real-ip', None)
cc = request.headers.get('cf-ipcountry', None)
if x_forwarded_for:
x_forwarded_for = x_forwarded_for.split(',')[0]
if "None" in remote:
if x_forwarded_for:
remote = x_forwarded_for
elif x_real_ip:
remote = x_real_ip
try:
remote_country = geo_location.city(remote.rsplit(':', 1)[0])
remote_country = remote_country.country.name
except AddressNotFoundError:
remote_country = 'Unknown'
logger.info({
"remote_ip": remote,
"remote_country": remote_country,
"user_agent": request.headers.get('user-agent', None),
'method': request.method,
"path": request.url.path,
"completed in (ms)": formatted_process_time,
"response code": response.status_code
})
response.headers["X-Process-Time"] = str(formatted_process_time)
return response
| 33.741379 | 81 | 0.62698 | 1,564 | 0.799182 | 0 | 0 | 0 | 0 | 1,521 | 0.77721 | 235 | 0.120082 |
50ee3e8e00f618a16eddadbc990257d3c3594577 | 244 | py | Python | example/src/pythonSnippet.py | hansehe/DockerBuildManagement | 774ddfb5184dc9c9ae0c307c7d5963a4ccb104f8 | [
"MIT"
] | 8 | 2019-04-03T13:40:30.000Z | 2020-11-29T09:20:13.000Z | example/src/pythonSnippet.py | hansehe/DockerBuildManagement | 774ddfb5184dc9c9ae0c307c7d5963a4ccb104f8 | [
"MIT"
] | 1 | 2019-02-06T16:05:06.000Z | 2019-02-24T22:59:43.000Z | example/src/pythonSnippet.py | hansehe/DockerBuildManagement | 774ddfb5184dc9c9ae0c307c7d5963a4ccb104f8 | [
"MIT"
] | 5 | 2018-12-15T19:03:25.000Z | 2021-09-22T23:42:33.000Z | def GetInfoMsg():
infoMsg = "This python snippet is triggered by the 'cmd' property.\r\n"
infoMsg += "Any command line may be triggered with the 'cmd' property.\r\n"
return infoMsg
if __name__ == "__main__":
print(GetInfoMsg()) | 34.857143 | 79 | 0.684426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.553279 |
50f08891336f7e5545f07eea041b3eca967010af | 42 | py | Python | python/testData/requirement/generation/keepMatchingVersion/main.py | Sajaki/intellij-community | 6748af2c40567839d11fd652ec77ba263c074aad | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/requirement/generation/keepMatchingVersion/main.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2022-02-19T09:45:05.000Z | 2022-02-27T20:32:55.000Z | python/testData/requirement/generation/keepMatchingVersion/main.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 1 | 2020-03-10T02:53:51.000Z | 2020-03-10T02:53:51.000Z | import django
import numpy
import requests | 14 | 15 | 0.880952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
50f24aafc2c271250aa2b186af0caf47f126a2f7 | 735 | py | Python | tools/search_product.py | shuangliu1993/PyScraper | c59f17ddf4bb7ddde8a8f08875b8743b35ab894a | [
"MIT"
] | null | null | null | tools/search_product.py | shuangliu1993/PyScraper | c59f17ddf4bb7ddde8a8f08875b8743b35ab894a | [
"MIT"
] | null | null | null | tools/search_product.py | shuangliu1993/PyScraper | c59f17ddf4bb7ddde8a8f08875b8743b35ab894a | [
"MIT"
] | null | null | null | import argparse
def parse_args():
parser = argparse.ArgumentParser(description="Search Products on Supported Platforms")
parser.add_argument(
"--platform", "-p", required=True, help="Online Retail Platform", type=str
)
parser.add_argument(
"--keyword", "-k", required=True, help="Product Keyword", type=str
)
args = parser.parse_args()
return args
def main():
args = parse_args()
platform = args.platform
if platform == "amazon":
from product_scrapper.amazon.amazon_search import AmazonSearch as Search
else:
raise NotImplementedError
search = Search(keyword=args.keyword)
search.scrape()
print(search)
if __name__ == "__main__":
main() | 24.5 | 90 | 0.663946 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.176871 |
50f3354c783dfac6ddb9456c55399677fd92b771 | 4,423 | py | Python | mros1_reasoner/test/test_level_1_functional_arch.py | JWijkhuizen/mc_mros_reasoner | 10f9ded02bf4f1c24ab57be83ed75e6e2a3815a8 | [
"Apache-2.0"
] | null | null | null | mros1_reasoner/test/test_level_1_functional_arch.py | JWijkhuizen/mc_mros_reasoner | 10f9ded02bf4f1c24ab57be83ed75e6e2a3815a8 | [
"Apache-2.0"
] | null | null | null | mros1_reasoner/test/test_level_1_functional_arch.py | JWijkhuizen/mc_mros_reasoner | 10f9ded02bf4f1c24ab57be83ed75e6e2a3815a8 | [
"Apache-2.0"
] | 1 | 2020-11-19T16:26:33.000Z | 2020-11-19T16:26:33.000Z |
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id: gossipbot.py 1013 2008-05-21 01:08:56Z sfkwc $
## Talker/listener demo validation
PKG = 'mros1_reasoner'
NAME = 'test_level_1_functions'
MSG_DELAY = 0.2
TIMEOUT = 5.0
import sys
import unittest
import rostest
import rospy
import time
from rosgraph_msgs.msg import Log
class TestLevel1Functions(unittest.TestCase):
def __init__(self, *args):
super(TestLevel1Functions, self).__init__(*args)
self.success = False
self.success_level_1 = 0
##############################################################################
def test_one_equals_one(self):
##############################################################################
rospy.loginfo("-D- test_one_equals_one")
self.assertEquals(1, 1, "1!=1")
##############################################################################
def test_1_level_functional_architecture(self):
##############################################################################
self.success = False
self.susbcriber_functional_architecture(TIMEOUT)
rospy.sleep(MSG_DELAY)
self.assertTrue(self.success)
##############################################################################
def log_callback_level_1(self, log_data):
##############################################################################
if log_data.name == '/reasoner':
if log_data.level == Log.INFO:
if (log_data.msg.startswith("Loaded ontology:")):
rospy.loginfo("OWL file loaded into KB: %s"%log_data.msg)
self.success_level_1 = self.success_level_1 + 1
if log_data.level == Log.WARN:
if (log_data.msg.startswith("Objective, NFRs and initial FG are generated from the OWL file")):
rospy.loginfo("KB initialized: %s"%log_data.msg)
self.success_level_1 = self.success_level_1 + 1
if (self.success_level_1 == 2):
rospy.loginfo("Got the 3 correct messages! - level_functional test OK!")
self.success = True
##############################################################################
def susbcriber_functional_architecture(self, timeout=5.0):
##############################################################################
rospy.init_node(NAME, anonymous=True)
rospy.Subscriber("/rosout", Log, self.log_callback_level_1)
timeout_t = time.time() + timeout # Default timeout 5 sec.
while not rospy.is_shutdown() and not self.success and time.time() < timeout_t:
time.sleep(MSG_DELAY)
if __name__ == '__main__':
rostest.rosrun(PKG, NAME, TestLevel1Functions, sys.argv) | 41.336449 | 111 | 0.591906 | 2,408 | 0.544427 | 0 | 0 | 0 | 0 | 0 | 0 | 2,636 | 0.595976 |
50f36717a52d72766e958dea02b6d3aadf86dd90 | 11,278 | py | Python | psana/psana/dgrammanager.py | valmar/lcls2 | 1c24da076a8cd252cf6601e125dd721fd2004f2a | [
"BSD-3-Clause-LBNL"
] | null | null | null | psana/psana/dgrammanager.py | valmar/lcls2 | 1c24da076a8cd252cf6601e125dd721fd2004f2a | [
"BSD-3-Clause-LBNL"
] | null | null | null | psana/psana/dgrammanager.py | valmar/lcls2 | 1c24da076a8cd252cf6601e125dd721fd2004f2a | [
"BSD-3-Clause-LBNL"
] | null | null | null | import sys, os
import time
import getopt
import pprint
try:
# doesn't exist on macos
from shmem import PyShmemClient
except:
pass
from psana import dgram
from psana.event import Event
from psana.detector import detectors
from psana.psexp.event_manager import TransitionId
import numpy as np
def dumpDict(dict,indent):
for k in sorted(dict.keys()):
if hasattr(dict[k],'__dict__'):
print(' '*indent,k)
dumpDict(dict[k].__dict__,indent+2)
else:
print(' '*indent,k,dict[k])
# method to dump dgrams to stdout. ideally this would move into dgram.cc
def dumpDgram(d):
dumpDict(d.__dict__,0)
FN_L = 200
# Warning: If XtcData::Dgram ever changes, this function will likely need to change
def _service(view):
iSvc = 2 # Index of service field, in units of uint32_t
return (np.array(view, copy=False).view(dtype=np.uint32)[iSvc] >> 24) & 0x0f
# Warning: If XtcData::Dgram ever changes, this function will likely need to change
def _dgSize(view):
iExt = 5 # Index of extent field, in units of uint32_t
txSize = 3 * 4 # sizeof(XtcData::TransitionBase)
return txSize + np.array(view, copy=False).view(dtype=np.uint32)[iExt]
class DgramManager(object):
def __init__(self, xtc_files, configs=[], fds=[],
tag=None, run=None, max_retries=0,
found_xtc2_callback=None):
""" Opens xtc_files and stores configs.
If file descriptors (fds) is given, reuse the given file descriptors.
"""
self.xtc_files = []
self.shmem_cli = None
self.shmem_kwargs = {'index':-1,'size':0,'cli_cptr':None}
self.configs = []
self._timestamps = [] # built when iterating
self._run = run
self.found_endrun = True
self.buffered_beginruns = []
self.max_retries = max_retries
self.chunk_ids = []
# Add ability for dgrammanager to check if xtc2 files exist (in case
# .inprogress file is use).
if found_xtc2_callback:
setattr(self, 'found_xtc2', found_xtc2_callback)
if isinstance(xtc_files, (str)):
self.xtc_files = np.array([xtc_files], dtype='U%s'%FN_L)
elif isinstance(xtc_files, (list, np.ndarray)):
if len(xtc_files) > 0: # handles smalldata-only case
if xtc_files[0] == 'shmem':
self.shmem_cli = PyShmemClient()
#establish connection to available server - blocking
status = int(self.shmem_cli.connect(tag,0))
assert not status,'shmem connect failure %d' % status
#wait for first configure datagram - blocking
view = self.shmem_cli.get(self.shmem_kwargs)
assert view
# Release shmem buffer after copying Transition data
# cpo: copy L1Accepts too because some shmem
# applications like AMI's pickN can hold references
# to dgrams for a long time, consuming the shmem buffers
# and creating a deadlock situation. could revisit this
# later and only deep-copy arrays inside pickN, for example
# but would be more fragile.
barray = bytes(view[:_dgSize(view)])
self.shmem_cli.freeByIndex(self.shmem_kwargs['index'], self.shmem_kwargs['size'])
view = memoryview(barray)
d = dgram.Dgram(view=view)
self.configs += [d]
else:
self.xtc_files = np.asarray(xtc_files, dtype='U%s'%FN_L)
self.given_fds = True if len(fds) > 0 else False
if self.given_fds:
self.fds = np.asarray(fds, dtype=np.int32)
else:
self.fds = np.array([os.open(xtc_file, os.O_RDONLY) for xtc_file in self.xtc_files], dtype=np.int32)
self.fds_map = {}
for fd, xtc_file in zip(self.fds, self.xtc_files):
self.fds_map[fd] = xtc_file
given_configs = True if len(configs) > 0 else False
if given_configs:
self.configs = configs
elif xtc_files[0] != 'shmem':
self.configs = [dgram.Dgram(file_descriptor=fd, max_retries=self.max_retries) for fd in self.fds]
self.calibconst = {} # initialize to empty dict - will be populated by run class
self.n_files = len(self.xtc_files)
self.set_chunk_ids()
def set_chunk_ids(self):
if len(self.xtc_files) == 0: return
if self.xtc_files[0] == 'shmem': return
for xtc_file in self.xtc_files:
filename = os.path.basename(xtc_file)
found = filename.find('-c')
if found >= 0:
found_e = filename.find('.xtc2')
self.chunk_ids.append(int(filename[found+2:found_e]))
def get_chunk_id(self, ind):
if not self.chunk_ids: return None
return self.chunk_ids[ind]
def set_chunk_id(self, ind, new_chunk_id):
self.chunk_ids[ind] = new_chunk_id
def close(self):
if not self.given_fds:
for fd in self.fds:
os.close(fd)
def __iter__(self):
return self
def _check_missing_endrun(self, beginruns=None):
fake_endruns = None
if not self.found_endrun: # there's no previous EndRun
sec = (self._timestamps[-1] >> 32) & 0xffffffff
usec = int((self._timestamps[-1] & 0xffffffff) * 1e3 + 1)
if beginruns:
self.buffered_beginruns = [dgram.Dgram(config=config,
view=d, offset=0, size=d._size)
for d, config in zip(beginruns, self.configs)]
fake_endruns = [dgram.Dgram(config=config, fake_endrun=1, \
fake_endrun_sec=sec, fake_endrun_usec=usec) \
for config in self.configs]
self.found_endrun = True
else:
self.found_endrun = False
return fake_endruns
def __next__(self):
""" only support sequential read - no event building"""
if self.buffered_beginruns:
self.found_endrun = False
evt = Event(self.buffered_beginruns, run=self.run())
self._timestamps += [evt.timestamp]
self.buffered_beginruns = []
return evt
if self.shmem_cli:
view = self.shmem_cli.get(self.shmem_kwargs)
if view:
# Release shmem buffer after copying Transition data
# cpo: copy L1Accepts too because some shmem
# applications like AMI's pickN can hold references
# to dgrams for a long time, consuming the shmem buffers
# and creating a deadlock situation. could revisit this
# later and only deep-copy arrays inside pickN, for example
# but would be more fragile.
barray = bytes(view[:_dgSize(view)])
self.shmem_cli.freeByIndex(self.shmem_kwargs['index'], self.shmem_kwargs['size'])
view = memoryview(barray)
# use the most recent configure datagram
config = self.configs[len(self.configs)-1]
d = dgram.Dgram(config=config,view=view)
dgrams = [d]
else:
raise StopIteration
else:
try:
dgrams = [dgram.Dgram(config=config, max_retries=self.max_retries) for config in self.configs]
except StopIteration as err:
fake_endruns = self._check_missing_endrun()
if fake_endruns:
dgrams = fake_endruns
else:
print(err)
raise StopIteration
# Check BeginRun - EndRun pairing
service = dgrams[0].service()
if service == TransitionId.BeginRun:
fake_endruns = self._check_missing_endrun(beginruns=dgrams)
if fake_endruns:
dgrams = fake_endruns
if service == TransitionId.EndRun:
self.found_endrun = True
evt = Event(dgrams, run=self.get_run())
self._timestamps += [evt.timestamp]
return evt
def jumps(self, dgram_i, offset, size):
if offset == 0 and size == 0:
d = None
else:
try:
d = dgram.Dgram(file_descriptor=self.fds[dgram_i],
config=self.configs[dgram_i],
offset=offset,
size=size,
max_retries=self.max_retries)
except StopIteration:
d = None
return d
def jump(self, offsets, sizes):
""" Jumps to the offset and reads out dgram on each xtc file.
This is used in normal mode (multiple detectors with MPI).
"""
assert len(offsets) > 0 and len(sizes) > 0
dgrams = [self.jumps(dgram_i, offset, size) for dgram_i, (offset, size)
in enumerate(zip(offsets, sizes))]
evt = Event(dgrams, run=self._run)
return evt
def get_timestamps(self):
return np.asarray(self._timestamps, dtype=np.uint64) # return numpy array for easy search later
def set_run(self, run):
self._run = run
def get_run(self):
return self._run
def parse_command_line():
opts, args_proper = getopt.getopt(sys.argv[1:], 'hvd:f:')
xtcdata_filename="data.xtc"
for option, parameter in opts:
if option=='-h': usage_error()
if option=='-f': xtcdata_filename = parameter
if xtcdata_filename is None:
xtcdata_filename="data.xtc"
return (args_proper, xtcdata_filename)
def getMemUsage():
pid=os.getpid()
ppid=os.getppid()
cmd="/usr/bin/ps -q %d --no-headers -eo size" % pid
p=os.popen(cmd)
size=int(p.read())
return size
def main():
args_proper, xtcdata_filename = parse_command_line()
ds=DgramManager(xtcdata_filename)
print("vars(ds):")
for var_name in sorted(vars(ds)):
print(" %s:" % var_name)
e=getattr(ds, var_name)
if not isinstance(e, (tuple, list, int, float, str)):
for key in sorted(e.__dict__.keys()):
print("%s: %s" % (key, e.__dict__[key]))
print()
count=0
for evt in ds:
print("evt:", count)
for dgram in evt:
for var_name in sorted(vars(dgram)):
val=getattr(dgram, var_name)
print(" %s: %s" % (var_name, type(val)))
a=dgram.xpphsd.raw.array0Pgp
try:
a[0][0]=999
except ValueError:
print("The dgram.xpphsd.raw.array0Pgp is read-only, as it should be.")
else:
print("Warning: the evt.array0_pgp array is writable")
print()
count+=1
return
def usage_error():
s="usage: python %s" % os.path.basename(sys.argv[0])
sys.stdout.write("%s [-h]\n" % s)
sys.stdout.write("%s [-f xtcdata_filename]\n" % (" "*len(s)))
sys.exit(1)
if __name__=='__main__':
main()
| 37.719064 | 112 | 0.572708 | 8,247 | 0.731247 | 0 | 0 | 0 | 0 | 0 | 0 | 2,291 | 0.203139 |
50f3732d55416656d33c30e2933737275d61bab9 | 297 | py | Python | pygamerogue/utils.py | mikolasan/pyroguelike | d51b01a566b5edb39792b59d683b4bf827399ba4 | [
"BSD-3-Clause"
] | null | null | null | pygamerogue/utils.py | mikolasan/pyroguelike | d51b01a566b5edb39792b59d683b4bf827399ba4 | [
"BSD-3-Clause"
] | 2 | 2020-06-17T05:23:02.000Z | 2020-06-17T05:29:41.000Z | pygamerogue/utils.py | mikolasan/pyroguelike | d51b01a566b5edb39792b59d683b4bf827399ba4 | [
"BSD-3-Clause"
] | 1 | 2020-09-26T17:16:59.000Z | 2020-09-26T17:16:59.000Z | def shift_rect(rect, direction, distance=48):
if direction == 'left':
rect.left -= distance
elif direction == 'right':
rect.left += distance
elif direction == 'up':
rect.top -= distance
elif direction == 'down':
rect.top += distance
return rect
| 27 | 45 | 0.582492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.077441 |
50f3ae5f1f858f1abc588b7f7e7a6693cdb88057 | 944 | py | Python | examples/flair_example/example_3.py | aethersoft/textkit-learn | 8b25b19d394fb361dde4427ed3b84d63552b7cc8 | [
"MIT"
] | null | null | null | examples/flair_example/example_3.py | aethersoft/textkit-learn | 8b25b19d394fb361dde4427ed3b84d63552b7cc8 | [
"MIT"
] | null | null | null | examples/flair_example/example_3.py | aethersoft/textkit-learn | 8b25b19d394fb361dde4427ed3b84d63552b7cc8 | [
"MIT"
] | null | null | null | from flair.models import TARSTagger
from flair.data import Sentence
# 1. Load zero-shot NER tagger
tars = TARSTagger.load('tars-ner')
# 2. Prepare some test sentences
sentences = [
Sentence("kill la kill is an anime"),
Sentence("The Humboldt University of Berlin is situated near the Spree in Berlin, Germany"),
Sentence("Bayern Munich played against Real Madrid"),
Sentence("I flew with an Airbus A380 to Peru to pick up my Porsche Cayenne"),
Sentence("Game of Thrones is my favorite series"),
]
# 3. Define some classes of named entities such as "soccer teams", "TV shows" and "rivers"
labels = ["Soccer Team", "University", "Vehicle", "River", "City", "Country", "Person", 'Anime', "Movie", "TV Show"]
tars.add_and_switch_to_new_task('task 1', labels, label_type='ner')
# 4. Predict for these classes and print results
for sentence in sentences:
tars.predict(sentence)
print(sentence.to_tagged_string("ner"))
| 39.333333 | 116 | 0.723517 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 569 | 0.602754 |
50f3b4598c7630a75082dc263f080621b3c51a76 | 1,061 | py | Python | techmanpy/clients/tmsta_client.py | julespalles/techman-python | 384ef92dc0601f93259e4a6e5a7e8b1c96876902 | [
"MIT"
] | 3 | 2020-12-28T08:28:23.000Z | 2022-03-27T18:52:02.000Z | techmanpy/clients/tmsta_client.py | julespalles/techman-python | 384ef92dc0601f93259e4a6e5a7e8b1c96876902 | [
"MIT"
] | 2 | 2021-09-06T12:56:08.000Z | 2021-11-02T12:52:02.000Z | techmanpy/clients/tmsta_client.py | jvdtoorn/techmanpy | 384ef92dc0601f93259e4a6e5a7e8b1c96876902 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import asyncio
from .stateless_client import *
from ..packets import *
from ..exceptions import *
class TMSTA_client(StatelessClient):
PORT=5890
def __init__(self, *, robot_ip, conn_timeout=3):
super().__init__(robot_ip=robot_ip, robot_port=self.PORT, conn_timeout=conn_timeout)
def _on_connection(self, reader, writer):
return TMSTA_connection(reader, writer, self._conn_timeout)
class TMSTA_connection(StatelessConnection):
async def is_listen_node_active(self):
# Build TMSTA packet
req = TMSTA_packet(TMSTA_type.IN_LISTEN_MODE, None)
# Submit
res = TMSTA_packet(await self.send(req))
# Parse response
assert res.ptype == TMSTA_type.IN_LISTEN_MODE
return res.params[0]
async def get_queue_tag_status(self, tag_id):
# Build TMSTA packet
req = TMSTA_packet(TMSTA_type.QUEUE_TAG, [tag_id])
# Submit
res = TMSTA_packet(await self.send(req))
# Parse response
assert res.ptype == TMSTA_type.QUEUE_TAG
return res.params[1]
| 27.921053 | 90 | 0.707823 | 935 | 0.881244 | 0 | 0 | 0 | 0 | 575 | 0.541942 | 109 | 0.102733 |
50f5d881c3d0d561188e06fdce8329c0abb29d96 | 1,537 | py | Python | rampwf/score_types/__init__.py | rth/ramp-workflow | e97a27235a8dbd68111ca6b0c9136ff35cab81f8 | [
"BSD-3-Clause"
] | null | null | null | rampwf/score_types/__init__.py | rth/ramp-workflow | e97a27235a8dbd68111ca6b0c9136ff35cab81f8 | [
"BSD-3-Clause"
] | 1 | 2020-01-18T09:47:03.000Z | 2020-01-20T15:33:11.000Z | rampwf/score_types/__init__.py | rth/ramp-workflow | e97a27235a8dbd68111ca6b0c9136ff35cab81f8 | [
"BSD-3-Clause"
] | null | null | null | from .accuracy import Accuracy
from .balanced_accuracy import BalancedAccuracy
from .base import BaseScoreType
from .brier_score import (
BrierScore, BrierSkillScore, BrierScoreReliability, BrierScoreResolution)
from .clustering_efficiency import ClusteringEfficiency
from .classification_error import ClassificationError
from .combined import Combined
from .detection import (
OSPA, SCP, DetectionPrecision, DetectionRecall, MADCenter, MADRadius,
AverageDetectionPrecision, DetectionAveragePrecision)
from .f1_above import F1Above
from .macro_averaged_recall import MacroAveragedRecall
from .make_combined import MakeCombined
from .mare import MARE
from .negative_log_likelihood import NegativeLogLikelihood
from .normalized_gini import NormalizedGini
from .normalized_rmse import NormalizedRMSE
from .relative_rmse import RelativeRMSE
from .rmse import RMSE
from .roc_auc import ROCAUC
from .soft_accuracy import SoftAccuracy
__all__ = [
'Accuracy',
'BalancedAccuracy',
'BaseScoreType',
'BrierScore',
'BrierScoreReliability',
'BrierScoreResolution',
'BrierSkillScore',
'ClassificationError',
'ClusteringEfficiency',
'Combined',
'DetectionPrecision',
'DetectionRecall',
'DetectionAveragePrecision',
'F1Above',
'MacroAveragedRecall',
'MakeCombined',
'MADCenter',
'MADRadius',
'MARE',
'NegativeLogLikelihood',
'NormalizedGini',
'NormalizedRMSE',
'OSPA',
'RelativeRMSE',
'RMSE',
'ROCAUC',
'SCP',
'SoftAccuracy',
]
| 28.462963 | 77 | 0.762524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 414 | 0.269356 |
50f6582522a07cfc18e387853c08f5bffb83bbd5 | 120 | py | Python | src/som/vm/current.py | SOM-st/PySOM | 65ef72f44252439b724a7429408dac7f8d1b1d98 | [
"MIT"
] | 22 | 2015-10-29T05:11:06.000Z | 2022-03-01T11:18:45.000Z | src/som/vm/current.py | smarr/PySOM | 65ef72f44252439b724a7429408dac7f8d1b1d98 | [
"MIT"
] | 16 | 2021-03-07T22:09:33.000Z | 2021-08-24T12:36:15.000Z | src/som/vm/current.py | SOM-st/PySOM | 65ef72f44252439b724a7429408dac7f8d1b1d98 | [
"MIT"
] | 5 | 2015-01-02T03:51:29.000Z | 2020-10-02T07:05:46.000Z | def _init():
from som.vm.universe import create_universe
return create_universe()
current_universe = _init()
| 15 | 47 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
50f65fae1a46d1d8123517234371efe7221d2e7d | 6,272 | py | Python | detectron2_backbone/backbone/shufflenetv2.py | cenchaojun/detectron2 | 03ca41a6873bb641764c4762d40d355f215e7ad9 | [
"Apache-2.0"
] | null | null | null | detectron2_backbone/backbone/shufflenetv2.py | cenchaojun/detectron2 | 03ca41a6873bb641764c4762d40d355f215e7ad9 | [
"Apache-2.0"
] | null | null | null | detectron2_backbone/backbone/shufflenetv2.py | cenchaojun/detectron2 | 03ca41a6873bb641764c4762d40d355f215e7ad9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Descripttion: https://github.com/sxhxliang/detectron2_backbone
# version: 0.0.1
# Author: Shihua Liang (sxhx.liang@gmail.com)
# FilePath: /detectron2_backbone/backbone/shufflenetv2.py
# Create: 2020-04-19 11:50:19
# LastAuthor: Shihua Liang
# lastTime: 2020-04-30 15:06:15
# --------------------------------------------------------
import torch
from torch import nn
from torch.nn import BatchNorm2d
import fvcore.nn.weight_init as weight_init
from detectron2.layers import Conv2d, FrozenBatchNorm2d, ShapeSpec
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.fpn import FPN, LastLevelMaxPool
from torchvision import models
from .fpn import LastLevelP6, LastLevelP6P7
__all__ = [
"ShuffleNetV2",
"build_shufflenet_v2_backbone",
"build_shufflenet_v2_fpn_backbone",
"build_fcos_shufflenet_v2_fpn_backbone"
]
ShuffleNetV2_cfg = {
'shufflenet_v2_x0_5': {'stages_repeats': [4, 8, 4],'stages_out_channels': [24, 48, 96, 192, 1024]},
'shufflenet_v2_x1_0': {'stages_repeats': [4, 8, 4],'stages_out_channels': [24, 116, 232, 464, 1024]},
'shufflenet_v2_x1_5': {'stages_repeats': [4, 8, 4],'stages_out_channels': [24, 176, 352, 704, 1024]},
'shufflenet_v2_x2_0': {'stages_repeats': [4, 8, 4],'stages_out_channels': [24, 244, 488, 976, 2048]}
}
class ShuffleNetV2(Backbone):
"""
Should freeze bn
"""
def __init__(self, cfg, n_class=1000, input_size=224, width_mult=1.):
super(ShuffleNetV2, self).__init__()
_model = models.shufflenet_v2_x1_0(True)
self.conv1 = _model.conv1
self.maxpool = _model.maxpool
self.stage2 = _model.stage2
self.stage3 = _model.stage3
self.stage4 = _model.stage4
self.conv5 = _model.conv5
# building first layer
assert input_size % 32 == 0
self.return_features_indices = [0, 1, 2, 4]
self.features = [self.maxpool, self.stage2, self.stage3, self.stage4, self.conv5]
# stages_out_channels = ShuffleNetV2_cfg['shufflenet_v2_x1_0']['stages_out_channels']
# self._out_feature_channels = { "res{}".format(i+2): stages_out_channels[indice] for (i, indice) in enumerate(self.return_features_indices)}
# self._out_feature_strides = {"res2": 4, "res3": 8, "res4": 16, "res5": 32}
self._initialize_weights()
self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_AT)
def _freeze_backbone(self, freeze_at):
for layer_index in range(freeze_at):
for p in self.features[layer_index].parameters():
p.requires_grad = False
def forward(self, x):
res = []
x = self.conv1(x)
for i, m in enumerate(self.features):
x = m(x)
if i in self.return_features_indices:
res.append(x)
return {'res{}'.format(i + 2): r for i, r in enumerate(res)}
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n) ** 0.5)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
@BACKBONE_REGISTRY.register()
def build_shufflenet_v2_backbone(cfg, input_shape):
"""
Create a ShuffleNetV2 instance from config.
Returns:
ShuffleNetV2: a :class:`ShuffleNetV2` instance.
"""
model = ShuffleNetV2(cfg)
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
stages_out_channels = ShuffleNetV2_cfg['shufflenet_v2_x1_0']['stages_out_channels']
out_feature_channels = { "res{}".format(i+2): stages_out_channels[indice] for (i, indice) in enumerate(model.return_features_indices)}
out_feature_strides = {"res2": 4, "res3": 8, "res4": 16, "res5": 32}
model._out_features = out_features
model._out_feature_channels = out_feature_channels
model._out_feature_strides = out_feature_strides
return model
@BACKBONE_REGISTRY.register()
def build_shufflenet_v2_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_shufflenet_v2_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_fcos_shufflenet_v2_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_shufflenet_v2_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
top_levels = cfg.MODEL.FCOS.TOP_LEVELS
in_channels_top = out_channels
if top_levels == 2:
top_block = LastLevelP6P7(in_channels_top, out_channels, "p5")
if top_levels == 1:
top_block = LastLevelP6(in_channels_top, out_channels, "p5")
elif top_levels == 0:
top_block = None
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=top_block,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
if __name__ == "__main__":
x = torch.ones(1, 3, 512, 512)
model = ShuffleNetV2(None)
print(model._out_feature_channels)
outs = model(x)
for o in outs:
print(o, outs[o].shape) | 35.039106 | 149 | 0.650032 | 2,171 | 0.346142 | 0 | 0 | 2,405 | 0.38345 | 0 | 0 | 1,664 | 0.265306 |
50f7c29fc8ab893b2f181271b7547aefe74fc256 | 2,720 | py | Python | client.py | iamdonmathew/CLI-Rest-API | 0df6463b9a4d5d22ec3acb6fffde47c6fa4bf8c0 | [
"MIT"
] | 1 | 2020-07-25T04:17:25.000Z | 2020-07-25T04:17:25.000Z | client.py | iamdonmathew/CLI-Rest-API | 0df6463b9a4d5d22ec3acb6fffde47c6fa4bf8c0 | [
"MIT"
] | null | null | null | client.py | iamdonmathew/CLI-Rest-API | 0df6463b9a4d5d22ec3acb6fffde47c6fa4bf8c0 | [
"MIT"
] | null | null | null | # author: <Don Mathew>
# Client side CLI
# Necessary imports.
import os
import json
from pathlib import Path
import requests
import click
from server import run
# Click package is used to turn the python script into CLI.
@click.command()
@click.option('--path',prompt="Enter the path", help='Specify the path, that you want to display', required=True)
@click.option('--port',prompt="Enter the PORT number that is used to start the server", help='Specify the PORT number, that you already used to start server', required=True)
# The value of @click.option (path and port) is passed to the function getfiles.
def getfiles(path, port):
"""Simple program that displays a list of files in the directory."""
# Initializing an empty list variable called store_list.
# Which is used to store the directory details.
store_list = []
# Checking whether the path is exist or not.
if os.path.exists(path):
# By using os.walk module we can generate files/folders from the provided path.
for root, directories, files in os.walk(path):
# We create a iterator known as take_file, which collects all the files under the directory.
for take_file in files:
# Collecting files other than hidden files or hidden directories.
if not take_file.startswith('.') and not os.path.basename(root).startswith('.'):
# To get the path of the current file.
pathname = os.path.join(root,take_file)
# To get the information of that particular file.
stat = os.stat(pathname)
# Creating a dictionary to store the informations.
dicts = {
'name': take_file,
'path': pathname,
'size': str(round(stat.st_size/ (1024*1024), 2))+'MB',
'extension': Path(take_file).suffix
}
store_list.append(dicts)
# Checking, whether there are files in the directory or not.
if len(store_list) == 0:
print('\nThere is no files inside the directory')
# Else, send the dictionary to the server, through the valid PORT number.
else:
try:
datas = json.dumps(store_list)
url = 'http://localhost:{}/'.format(port)
x = requests.post(url, data = datas)
print("Please wait...")
print('Sending data to server...')
print("Success!")
except:
print("Invalid PORT number!")
else:
print('\nInvalid path!')
| 44.590164 | 173 | 0.584559 | 0 | 0 | 0 | 0 | 2,493 | 0.916544 | 0 | 0 | 1,310 | 0.481618 |
50fa748f4b12b2cb785fef94eea4592ec4640a99 | 1,921 | py | Python | python_code/I_K_ATP_RMP_mg.py | mmaleck/chondrocyte | 5109102b8f476318dd03f39521cfbfd5b2edf719 | [
"MIT"
] | 1 | 2021-07-01T08:36:21.000Z | 2021-07-01T08:36:21.000Z | python_code/I_K_ATP_RMP_mg.py | mmaleck/chondrocyte | 5109102b8f476318dd03f39521cfbfd5b2edf719 | [
"MIT"
] | 2 | 2021-07-09T11:59:58.000Z | 2021-08-18T16:05:13.000Z | python_code/I_K_ATP_RMP_mg.py | mmaleck/chondrocyte | 5109102b8f476318dd03f39521cfbfd5b2edf719 | [
"MIT"
] | 1 | 2021-07-02T10:01:20.000Z | 2021-07-02T10:01:20.000Z | import numpy as np
from scipy.integrate.odepack import odeint
import matplotlib.pyplot as plt
import functions
from chondrocyte import Voltage_clamp
from params import params_dict
import matplotlib as mpl
"""
The code is used to create Figure 4B for submitted paper
"Probing the putative role of KATP channels and biological variability in a mathematical model of chondrocyte electrophysiology”
"""
mpl.rcParams['font.family'] = 'Avenir'
plt.rcParams['font.size'] = 18
plt.rcParams['axes.linewidth'] = 2
# define time span
params_dict.update(t_final=180)
t_final = params_dict["t_final"]
dt = params_dict["dt"]
t = np.linspace(0, t_final, int(t_final/dt))
params_dict.update(Mg_i=1)
# Define initial condition vector
y0 = (params_dict["V_0"], params_dict["Na_i_0"], params_dict["K_i_0"], params_dict["Ca_i_0"], params_dict["H_i_0"],
params_dict["Cl_i_0"], params_dict["a_ur_0"], params_dict["i_ur_0"], params_dict["vol_i_0"],
params_dict["cal_0"])
fig, ax = plt.subplots()
params_dict.update(K_o_0=7, Mg_i=0.1)
solution1 = odeint(functions.rhs, y0, t, args=(params_dict,))
ax.plot(t, solution1[:,0], label="$\mathrm{[Mg^{2+}]_i}$=0.1 mM", color="k")
params_dict.update(Mg_i=1.0)
solution2 = odeint(functions.rhs, y0, t, args=(params_dict,))
ax.plot(t, solution2[:,0], label="$\mathrm{[Mg^{2+}]_i}$=1.0 mM", color="b")
params_dict.update(Mg_i=10)
solution3 = odeint(functions.rhs, y0, t, args=(params_dict,))
ax.plot(t, solution3[:,0], label="$\mathrm{[Mg^{2+}]_i}$=10 mM", color="r")
ax.set_xlabel("Time [s]", fontsize=16)
ax.set_ylabel("Membrane Potential [mV]", fontsize=16)
ax.xaxis.set_tick_params(which='major', size=14, width=2, direction='out')
ax.yaxis.set_tick_params(which='major', size=14, width=2, direction='out')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.legend(loc='upper right')
# plt.savefig("Fig4_B.png", bbox_inches='tight')
plt.show()
| 33.12069 | 128 | 0.723061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 616 | 0.320333 |
50fb774ef289656b71e1121738fca6d6a94e36e7 | 10,299 | py | Python | Dataset4EO/datasets/_builtin/voc.py | GeoAI4EO/GeoData4EO | 8da7df7fd8375efc3c584c6603622060384fe906 | [
"Apache-2.0"
] | null | null | null | Dataset4EO/datasets/_builtin/voc.py | GeoAI4EO/GeoData4EO | 8da7df7fd8375efc3c584c6603622060384fe906 | [
"Apache-2.0"
] | null | null | null | Dataset4EO/datasets/_builtin/voc.py | GeoAI4EO/GeoData4EO | 8da7df7fd8375efc3c584c6603622060384fe906 | [
"Apache-2.0"
] | null | null | null | import enum
import pdb
import functools
import pathlib
from typing import Any, Dict, List, Optional, Tuple, BinaryIO, cast, Union
from xml.etree import ElementTree
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
Filter,
Demultiplexer,
IterKeyZipper,
LineReader,
)
from torchvision.datasets import VOCDetection
from Dataset4EO.datasets.utils import OnlineResource, HttpResource, Dataset
from Dataset4EO.datasets.utils._internal import (
path_accessor,
getitem,
INFINITE_BUFFER_SIZE,
path_comparator,
hint_sharding,
hint_shuffling,
read_categories_file,
)
from Dataset4EO.features import BoundingBox, Label, EncodedImage
from .._api import register_dataset, register_info
NAME = "voc"
CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
'train', 'tvmonitor')
PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class VOC(Dataset):
"""
- **homepage**: http://host.robots.ox.ac.uk/pascal/VOC/
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
year: str = "2012",
task: str = "segmentation",
skip_integrity_check: bool = False,
) -> None:
self._year = self._verify_str_arg(year, "year", ("2007", "2008", "2009", "2010", "2011", "2012"))
if split == "test" and year != "2007":
raise ValueError("`split='test'` is only available for `year='2007'`")
else:
self._split = self._verify_str_arg(split, "split", ("train", "val", "trainval", "test"))
self._task = self._verify_str_arg(task, "task", ("detection", "segmentation"))
self._anns_folder = "Annotations" if task == "detection" else "SegmentationClass"
self._split_folder = "Main" if task == "detection" else "Segmentation"
self._categories = _info()["categories"]
self.CLASSES = CLASSES
self.PALETTE = PALETTE
super().__init__(root, skip_integrity_check=skip_integrity_check)
_TRAIN_VAL_ARCHIVES = {
"2007": ("VOCtrainval_06-Nov-2007.tar", "7d8cd951101b0957ddfd7a530bdc8a94f06121cfc1e511bb5937e973020c7508"),
"2008": ("VOCtrainval_14-Jul-2008.tar", "7f0ca53c1b5a838fbe946965fc106c6e86832183240af5c88e3f6c306318d42e"),
"2009": ("VOCtrainval_11-May-2009.tar", "11cbe1741fb5bdadbbca3c08e9ec62cd95c14884845527d50847bc2cf57e7fd6"),
"2010": ("VOCtrainval_03-May-2010.tar", "1af4189cbe44323ab212bff7afbc7d0f55a267cc191eb3aac911037887e5c7d4"),
"2011": ("VOCtrainval_25-May-2011.tar", "0a7f5f5d154f7290ec65ec3f78b72ef72c6d93ff6d79acd40dc222a9ee5248ba"),
"2012": ("VOCtrainval_11-May-2012.tar", "e14f763270cf193d0b5f74b169f44157a4b0c6efa708f4dd0ff78ee691763bcb"),
}
_TEST_ARCHIVES = {
"2007": ("VOCtest_06-Nov-2007.tar", "6836888e2e01dca84577a849d339fa4f73e1e4f135d312430c4856b5609b4892")
}
def _resources(self) -> List[OnlineResource]:
file_name, sha256 = (self._TEST_ARCHIVES if self._split == "test" else self._TRAIN_VAL_ARCHIVES)[self._year]
archive = HttpResource(f"http://host.robots.ox.ac.uk/pascal/VOC/voc{self._year}/{file_name}", sha256=sha256)
return [archive]
def _is_in_folder(self, data: Tuple[str, Any], *, name: str, depth: int = 1) -> bool:
path = pathlib.Path(data[0])
return name in path.parent.parts[-depth:]
class _Demux(enum.IntEnum):
SPLIT = 0
IMAGES = 1
ANNS = 2
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
if self._is_in_folder(data, name="ImageSets", depth=2):
return self._Demux.SPLIT
elif self._is_in_folder(data, name="JPEGImages"):
return self._Demux.IMAGES
elif self._is_in_folder(data, name=self._anns_folder):
return self._Demux.ANNS
else:
return None
def _parse_detection_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
return cast(Dict[str, Any], VOCDetection.parse_voc_xml(ElementTree.parse(buffer).getroot())["annotation"])
def _prepare_detection_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
anns = self._parse_detection_ann(buffer)
instances = anns["object"]
return dict(
bounding_boxes=BoundingBox(
[
[int(instance["bndbox"][part]) for part in ("xmin", "ymin", "xmax", "ymax")]
for instance in instances
],
format="xyxy",
image_size=cast(Tuple[int, int], tuple(int(anns["size"][dim]) for dim in ("height", "width"))),
),
labels=Label(
[self._categories.index(instance["name"]) for instance in instances], categories=self._categories
),
)
def _prepare_segmentation_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
return dict(segmentation=EncodedImage.from_file(buffer))
def _prepare_sample(
self,
data: Tuple[Tuple[Tuple[str, str], Tuple[str, BinaryIO]], Tuple[str, BinaryIO]],
) -> Dict[str, Any]:
split_and_image_data, ann_data = data
_, image_data = split_and_image_data
image_path, image_buffer = image_data
ann_path, ann_buffer = ann_data
image_path = pathlib.PosixPath(image_path).name
ann_path = pathlib.PosixPath(ann_path).name
#{'img_info': {'filename': '2009_000801.jpg', 'ann': {'seg_map': '2009_000801.png'}}, 'ann_info': {'seg_map': '2009_000801.png'}}
img_info = dict({'filename':image_path, 'ann':dict({'seg_map':ann_path})})
return img_info
return dict(
(self._prepare_detection_ann if self._task == "detection" else self._prepare_segmentation_ann)(ann_buffer),
image_path=image_path,
image=EncodedImage.from_file(image_buffer),
ann_path=ann_path,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
split_dp, images_dp, anns_dp = Demultiplexer(
archive_dp,
3,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
split_dp = Filter(split_dp, functools.partial(self._is_in_folder, name=self._split_folder))
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True)
split_dp = hint_shuffling(split_dp)
split_dp = hint_sharding(split_dp)
dp = split_dp
for level, data_dp in enumerate((images_dp, anns_dp)):
dp = IterKeyZipper(
dp,
data_dp,
key_fn=getitem(*[0] * level, 1),
ref_key_fn=path_accessor("stem"),
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
("train", "2007", "detection"): 2_501,
("train", "2007", "segmentation"): 209,
("train", "2008", "detection"): 2_111,
("train", "2008", "segmentation"): 511,
("train", "2009", "detection"): 3_473,
("train", "2009", "segmentation"): 749,
("train", "2010", "detection"): 4_998,
("train", "2010", "segmentation"): 964,
("train", "2011", "detection"): 5_717,
("train", "2011", "segmentation"): 1_112,
("train", "2012", "detection"): 5_717,
("train", "2012", "segmentation"): 1_464,
("val", "2007", "detection"): 2_510,
("val", "2007", "segmentation"): 213,
("val", "2008", "detection"): 2_221,
("val", "2008", "segmentation"): 512,
("val", "2009", "detection"): 3_581,
("val", "2009", "segmentation"): 750,
("val", "2010", "detection"): 5_105,
("val", "2010", "segmentation"): 964,
("val", "2011", "detection"): 5_823,
("val", "2011", "segmentation"): 1_111,
("val", "2012", "detection"): 5_823,
("val", "2012", "segmentation"): 1_449,
("trainval", "2007", "detection"): 5_011,
("trainval", "2007", "segmentation"): 422,
("trainval", "2008", "detection"): 4_332,
("trainval", "2008", "segmentation"): 1_023,
("trainval", "2009", "detection"): 7_054,
("trainval", "2009", "segmentation"): 1_499,
("trainval", "2010", "detection"): 10_103,
("trainval", "2010", "segmentation"): 1_928,
("trainval", "2011", "detection"): 11_540,
("trainval", "2011", "segmentation"): 2_223,
("trainval", "2012", "detection"): 11_540,
("trainval", "2012", "segmentation"): 2_913,
("test", "2007", "detection"): 4_952,
("test", "2007", "segmentation"): 210,
}[(self._split, self._year, self._task)]
def _filter_anns(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == self._Demux.ANNS
def _generate_categories(self) -> List[str]:
self._task = "detection"
resources = self._resources()
archive_dp = resources[0].load(self._root)
dp = Filter(archive_dp, self._filter_anns)
dp = Mapper(dp, self._parse_detection_ann, input_col=1)
categories = sorted({instance["name"] for _, anns in dp for instance in anns["object"]})
# We add a background category to be used during segmentation
categories.insert(0, "__background__")
return categories
| 42.036735 | 137 | 0.595301 | 8,779 | 0.852413 | 0 | 0 | 8,909 | 0.865035 | 0 | 0 | 2,691 | 0.261288 |
50fc5d678ce5f53a6ac5266cf971694049a80f34 | 6,799 | py | Python | server/roster_updater/lib/course.py | HeavenFox/coursepad | cd7b1224f563271a2b89817ae4f2eccdc164d46b | [
"Apache-2.0"
] | 33 | 2016-09-30T03:58:01.000Z | 2021-01-09T16:12:25.000Z | server/roster_updater/lib/course.py | HeavenFox/coursepad | cd7b1224f563271a2b89817ae4f2eccdc164d46b | [
"Apache-2.0"
] | 8 | 2017-01-30T23:27:45.000Z | 2022-02-18T05:15:59.000Z | server/roster_updater/lib/course.py | HeavenFox/coursepad | cd7b1224f563271a2b89817ae4f2eccdc164d46b | [
"Apache-2.0"
] | 7 | 2016-09-30T03:59:50.000Z | 2018-10-21T02:17:12.000Z | import re
# crawl roster
faulty_prof = {
'Francis,J)' : 'Francis,J (jdf2)',
'Glathar,E)' : 'Glathar,E',
'Cady,B)' : 'Cady,B'
}
section_types = set()
day_pattern = {
'M': 1,
'T': 1<<1,
'W': 1<<2,
'R': 1<<3,
'F': 1<<4,
'S': 1<<5,
'U': 1<<6
}
def to_bool(s):
return True if s == 'Y' else False
def to_list(node):
return [a.text.strip() for a in node]
def set_if_truthy(obj, idx, value):
if value:
obj[idx] = value
def convert_crosslist(c):
if c is None:
return None
if len(c) > 0:
return [c.find('subject').text, int(c.find('catalog_nbr').text)]
return None
def get_s(node):
if node is None:
return None
return node.text
def maybe_float(s):
if s.find('.') > -1:
return float(s)
return int(s)
def convert_units(s):
return [maybe_float(a) for a in s.split('-')]
class CourseParser(object):
def __init__(self):
self.courses = []
self.profs = set()
def parse(self, node):
raise NotImplementedError()
class CourseParserJson(CourseParser):
def __init__(self):
super(CourseParserJson, self).__init__()
self.sessions = set()
self.locations = set()
self.facility = set()
@staticmethod
def crosslist(d):
if d:
return [d['subject'], int(d['catalogNbr']), d['type']]
return None
def convert_meeting(self, node, parent=None):
obj = {}
pattern = 0
pattern_desc = node.get('pattern', '').replace('Su', 'U')
if pattern_desc != 'TBA':
for c in pattern_desc:
pattern |= day_pattern[c]
set_if_truthy(obj, 'ptn', pattern)
facility = node.get('facilityDescrshort')
if facility and facility != 'TBA':
set_if_truthy(obj, 'bldg', facility[:3])
set_if_truthy(obj, 'rm', facility[3:])
set_if_truthy(obj, 'loc', node.get('facilityDescr'))
set_if_truthy(obj, 'st', node.get('timeStart'))
set_if_truthy(obj, 'et', node.get('timeEnd'))
set_if_truthy(obj, 'sd', node.get('startDt'))
set_if_truthy(obj, 'ed', node.get('endDt'))
set_if_truthy(obj, 'profs', [s['netid'] for s in node.get('instructors', [])])
set_if_truthy(obj, 'topic', node.get('meetingTopicDescription'))
return obj
def convert_section(self, node, parent=None):
comp = node.get('ssrComponent')
obj = {}
obj['nbr'] = int(node.get('classNbr'))
obj['sec'] = node.get('section')
# obj['loc'] = node.get('location')
# obj['campus'] = node.get('campus')
set_if_truthy(obj, 'topic', node.get('topicDescription'))
self.locations.add((node.get('location'), node.get('locationDescr'), node.get('campus'), node.get('campusDescr')))
set_if_truthy(obj, 'mt', [self.convert_meeting(s, node) for s in node.get('meetings', [])])
return comp, obj
def parse(self, node):
obj = {}
obj['sub'] = node.get('subject')
obj['nbr'] = int(node.get('catalogNbr'))
obj['title'] = node.get('titleLong')
for group in node.get('enrollGroups', []):
course = obj.copy()
if group['unitsMinimum'] == group['unitsMaximum']:
course['unit'] = [group['unitsMaximum']]
else:
course['unit'] = [group['unitsMinimum'], group['unitsMaximum']]
set_if_truthy(course, 'optcomp', group['componentsOptional'])
set_if_truthy(course, 'session', group['sessionCode'])
set_if_truthy(course, 'crosslists', [self.crosslist(d) for d in group.get('simpleCombinations', [])])
secs = {}
for sec in group['classSections']:
comp, sec = self.convert_section(sec, group)
if comp not in secs:
secs[comp] = []
secs[comp].append(sec)
course['secs'] = secs
self.courses.append(course)
self.sessions.add((group['sessionCode'], group['sessionBeginDt'], group['sessionEndDt'], group['sessionLong']))
class CourseParserXML(CourseParser):
def __init__(self):
self.courses = []
self.profs = set()
def parse(self, node):
self.courses.append(self.convert_course(node))
def parse_prof(self, name):
if name in faulty_prof:
name = faulty_prof[name]
result = re.search(r'\((.+)\)', name)
if result is None:
print "warning: %s dont have netid" % name
return name
else:
netid = result.group(1)
self.profs.add(netid)
return netid
def convert_meeting(self, node):
obj = {}
pattern = 0
pattern_desc = node.find('meeting_pattern_sdescr').text
if pattern_desc != 'TBA':
for c in pattern_desc:
pattern |= day_pattern[c]
set_if_truthy(obj, 'ptn', pattern)
set_if_truthy(obj, 'bldg', node.find('building_code').text)
set_if_truthy(obj, 'rm', node.find('room').text)
set_if_truthy(obj, 'st', node.find('start_time').text)
set_if_truthy(obj, 'et', node.find('end_time').text)
set_if_truthy(obj, 'sd', node.find('start_date').text)
set_if_truthy(obj, 'ed', node.find('end_date').text)
set_if_truthy(obj, 'profs', [self.parse_prof(s) for s in to_list(node.find('instructors') or [])])
return obj
def convert_section(self, node):
comp = node.get('ssr_component')
obj = {}
obj['nbr'] = int(node.get('class_number'))
obj['sec'] = node.get('class_section')
section_types.add(comp)
set_if_truthy(obj, 'consent', get_s(node.find('consent_ldescr')))
set_if_truthy(obj, 'note', get_s(node.find('notes')))
set_if_truthy(obj, 'mt', [self.convert_meeting(s) for s in node.findall('meeting')])
return comp, obj
def convert_course(self, node):
obj = {}
obj['sub'] = node.get('subject')
obj['nbr'] = int(node.get('catalog_nbr'))
obj['unit'] = convert_units(node.find('units').text)
obj['title'] = node.find('course_title').text
set_if_truthy(obj, 'topics', to_list(node.find('topics')))
set_if_truthy(obj, 'crosslists', [convert_crosslist(a) for a in node.find('crosslists') or []])
set_if_truthy(obj, 'comeetings', [convert_crosslist(a) for a in node.find('comeetings') or []])
secs = {}
for sec in node.find('sections'):
comp, sec = self.convert_section(sec)
if comp not in secs:
secs[comp] = []
secs[comp].append(sec)
obj['secs'] = secs
return obj
| 29.820175 | 123 | 0.564348 | 5,895 | 0.867039 | 0 | 0 | 136 | 0.020003 | 0 | 0 | 1,297 | 0.190763 |
50fcd7ae55a8d5d729cd12147281f7bebe4d3311 | 566 | py | Python | src/colusa/plugins/etr_infoq.py | huuhoa/symphony | f8a364649634b4d864771b2c8a3103b714b6b9e2 | [
"MIT"
] | 6 | 2020-08-29T04:14:15.000Z | 2020-09-18T10:53:59.000Z | src/colusa/plugins/etr_infoq.py | huuhoa/colusa | 07a0a60680c8085c5dca522e0237f7b5a5181dcb | [
"MIT"
] | 34 | 2021-09-07T15:17:38.000Z | 2022-03-25T15:16:40.000Z | src/colusa/plugins/etr_infoq.py | huuhoa/colusa | 07a0a60680c8085c5dca522e0237f7b5a5181dcb | [
"MIT"
] | 2 | 2020-08-29T04:21:35.000Z | 2020-09-13T17:36:06.000Z | from colusa.etr import Extractor, register_extractor
@register_extractor('//www.infoq.com')
class InfoQExtractor(Extractor):
def _find_main_content(self):
return self.bs.find('div', class_='article__content')
def cleanup(self):
self.remove_tag(self.main_content, 'div', attrs={'class': 'contentRatingWidget'})
self.remove_tag(self.main_content, 'div', attrs={'class': 'widget article__fromTopic topics'})
self.remove_tag(self.main_content, 'div', attrs={'class': 'nocontent'})
super(InfoQExtractor, self).cleanup()
| 40.428571 | 102 | 0.706714 | 471 | 0.832155 | 0 | 0 | 510 | 0.90106 | 0 | 0 | 142 | 0.250883 |
50fef9dc7c37378c2b89f5d5abd92f9996a28c56 | 162 | py | Python | covidapp/urls.py | babbarutkarsh/CovidTracker | 6bb1bcdf5b0e11208e8b0494028082ed32ff4573 | [
"MIT"
] | 1 | 2021-04-23T05:11:07.000Z | 2021-04-23T05:11:07.000Z | covidapp/urls.py | babbarutkarsh/CovidTracker | 6bb1bcdf5b0e11208e8b0494028082ed32ff4573 | [
"MIT"
] | null | null | null | covidapp/urls.py | babbarutkarsh/CovidTracker | 6bb1bcdf5b0e11208e8b0494028082ed32ff4573 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path,include
from .views import helloworldview
urlpatterns = [
path('helloworld/',helloworldview)
]
| 20.25 | 38 | 0.783951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.080247 |
50ff2d5fa93361a61b58feca15f43ba6967b76f5 | 320 | py | Python | src/cart/migrations/0009_remove_cartitem_user.py | phisyche/rydhubsolutions | 50cd35c9f2f6530bcc19358beb6c91cda5287f4f | [
"MIT"
] | 5 | 2020-09-07T15:30:10.000Z | 2021-01-21T19:25:22.000Z | src/cart/migrations/0009_remove_cartitem_user.py | phisyche/rydhubsolutions | 50cd35c9f2f6530bcc19358beb6c91cda5287f4f | [
"MIT"
] | 21 | 2019-12-04T22:49:42.000Z | 2022-02-12T09:17:42.000Z | src/cart/migrations/0009_remove_cartitem_user.py | phisyche/rydhubsolutions | 50cd35c9f2f6530bcc19358beb6c91cda5287f4f | [
"MIT"
] | 4 | 2020-03-25T05:50:39.000Z | 2021-08-08T20:59:20.000Z | # Generated by Django 2.2.9 on 2020-01-29 16:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cart', '0008_cartitem_user'),
]
operations = [
migrations.RemoveField(
model_name='cartitem',
name='user',
),
]
| 17.777778 | 47 | 0.58125 | 235 | 0.734375 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.278125 |
50ff56459c08497afa6fce21e2286f5a45fd1a2b | 10,344 | py | Python | tests/test_pendant_aws.py | clintval/reticle | 9be96e270970573ca981aa95ba0db1a1f7f46e39 | [
"MIT"
] | 1 | 2020-07-04T17:38:36.000Z | 2020-07-04T17:38:36.000Z | tests/test_pendant_aws.py | clintval/pendant | 9be96e270970573ca981aa95ba0db1a1f7f46e39 | [
"MIT"
] | 8 | 2018-12-03T18:59:14.000Z | 2018-12-10T19:33:53.000Z | tests/test_pendant_aws.py | clintval/reticle | 9be96e270970573ca981aa95ba0db1a1f7f46e39 | [
"MIT"
] | null | null | null | import os
from datetime import datetime
import botocore
import boto3
import moto
import pytest
from hypothesis import example, given
from hypothesis.strategies import integers, datetimes
from pendant.aws.batch import BatchJob, JobDefinition
from pendant.aws.exception import BatchJobSubmissionError, S3ObjectNotFoundError
from pendant.aws.logs import AwsLogUtil, LogEvent
from pendant.aws.response import SubmitJobResponse
from pendant.aws.s3 import S3Uri
from pendant.aws.s3 import s3api_head_object, s3api_object_exists, s3_object_exists
from pendant.util import format_ISO8601
RUNNING_IN_CI = True if os.environ.get('CI') == 'true' else False
TEST_BUCKET_NAME = 'TEST_BUCKET'
TEST_KEY_NAME = 'TEST_KEY'
TEST_BODY = 'TEST_BODY'
TEST_JOB_NAME = 'TEST_JOB_NAME'
TEST_SUBMIT_JOB_RESPONSE_JSON = {
'ResponseMetadata': {
'RequestId': '3dd6b227-623f-4749-87cv-c3674d7asdf18',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'date': 'Fri, 30 Nov 2018 01:54:30 GMT',
'content-type': 'application/json',
'content-length': '95',
'connection': 'keep-alive',
'x-amzn-requestid': '3dd6b227-623f-4749-87cv-c3674d7asdf18',
'x-amz-apigw-id': 'asdfsdaffsdfsdfsd',
'x-amzn-trace-id': 'Root=1-asdfasdfasdfsdfsdcsdcsdcsdc;Sampled=0',
},
'RetryAttempts': 0,
},
'jobName': '2018-11-29T17-54-28_job-name',
'jobId': '3dd6b227-623f-4749-87cv-c3674d7asdf18',
}
TEST_LOG_EVENT_RESPONSES = [
dict(
timestamp=1_543_809_952_329,
message="You have started up this demo job",
ingestionTime=1_543_809_957_080,
),
dict(
timestamp=1_543_809_955_437,
message="Configuration, we are loading from...",
ingestionTime=1_543_809_957_080,
),
dict(
timestamp=1_543_809_955_437,
message="Defaulting to approximate values",
ingestionTime=1_543_809_957_080,
),
dict(
timestamp=1_543_809_955_437,
message="Setting up logger, nothing to see here",
ingestionTime=1_543_809_957_080,
),
]
@pytest.fixture
def test_bucket():
with moto.mock_s3():
boto3.client('s3').create_bucket(Bucket=TEST_BUCKET_NAME)
yield boto3.resource('s3').Bucket(TEST_BUCKET_NAME)
@pytest.fixture
def test_s3_uri():
return S3Uri(f's3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}')
@pytest.fixture
def test_job_definition(test_s3_uri, test_bucket):
class DemoJobDefinition(JobDefinition):
def __init__(self, s3_uri: S3Uri):
self.s3_uri = s3_uri
@property
def name(self) -> str:
return TEST_JOB_NAME
def validate(self) -> None:
if not self.s3_uri.object_exists():
raise S3ObjectNotFoundError(f'S3 object does not exist: {self.s3_uri}')
return DemoJobDefinition(test_s3_uri)
@pytest.mark.xfail(RUNNING_IN_CI, reason='Running on TravisCI')
def test_aws_batch_batch_job(test_bucket, test_job_definition, test_s3_uri):
with pytest.raises(S3ObjectNotFoundError):
BatchJob(test_job_definition)
test_bucket.put_object(Key=TEST_KEY_NAME, Body=TEST_BODY)
job = BatchJob(test_job_definition)
assert job.definition.s3_uri.object_exists()
assert job.job_id is None
assert job.queue is None
assert job.container_overrides == {}
assert not job.is_submitted()
with pytest.raises(BatchJobSubmissionError):
job.status()
with pytest.raises(BatchJobSubmissionError):
job.log_stream_name()
with pytest.raises(BatchJobSubmissionError):
job.log_stream_events()
with pytest.raises(BatchJobSubmissionError):
assert not job.is_running()
with pytest.raises(BatchJobSubmissionError):
assert not job.is_runnable()
assert repr(job)
@pytest.mark.xfail(RUNNING_IN_CI, reason='Running on TravisCI')
def test_aws_batch_job_definition_validate(test_bucket, test_job_definition, test_s3_uri):
with pytest.raises(S3ObjectNotFoundError):
test_job_definition.validate()
test_bucket.put_object(Key=TEST_KEY_NAME, Body=TEST_BODY)
assert test_job_definition.validate() is None
def test_aws_batch_job_definition_default_values(test_job_definition):
assert test_job_definition.name == TEST_JOB_NAME
assert test_job_definition.parameters == ('s3_uri',)
assert test_job_definition.revision == '0'
assert str(test_job_definition) == f'{TEST_JOB_NAME}:0'
assert repr(test_job_definition) # TODO: Add test here
def test_aws_batch_job_definition_at_revision(test_job_definition):
assert test_job_definition.revision == '0'
assert str(test_job_definition) == f'{TEST_JOB_NAME}:0'
test_job_definition.at_revision('6')
assert test_job_definition.revision == '6'
assert str(test_job_definition) == f'{TEST_JOB_NAME}:6'
def test_aws_batch_job_definition_make_name(test_job_definition):
moment = datetime.now()
formatted_date = format_ISO8601(moment)
assert test_job_definition.make_job_name() == formatted_date + '_' + test_job_definition.name
def test_aws_batch_job_definition_to_dict(test_job_definition, test_s3_uri):
actual = test_job_definition.to_dict()
expected = dict(s3_uri=str(test_s3_uri))
assert actual == expected
@moto.mock_logs
@pytest.mark.xfail(
RUNNING_IN_CI, raises=botocore.exceptions.NoRegionError, reason='Running on TravisCI'
)
def test_aws_logs_log_util():
AwsLogUtil()
def test_aws_logs_event_log():
record = TEST_LOG_EVENT_RESPONSES[0]
log = LogEvent(record)
assert log.ingestion_time == record['ingestionTime']
assert log.message == record['message']
assert log.timestamp == record['timestamp']
assert (
repr(log)
== 'LogEvent(timestamp=1543809952329, message=\'You have started up this demo job\', ingestion_time=1543809957080)'
)
def test_aws_response_submit_job_response():
response = SubmitJobResponse(TEST_SUBMIT_JOB_RESPONSE_JSON)
assert response.http_code() == 200
assert response.is_ok()
assert response.job_name == '2018-11-29T17-54-28_job-name'
assert response.job_id == '3dd6b227-623f-4749-87cv-c3674d7asdf18'
def test_aws_response_submit_job_empty_response():
response = SubmitJobResponse({})
assert response.http_code() == 500
assert not response.is_ok()
assert response.job_name is None
assert response.job_id is None
@pytest.mark.xfail(RUNNING_IN_CI, reason='Running on TravisCI')
def test_aws_s3_s3uri_object_exists(test_bucket):
assert not S3Uri(f's3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}').object_exists()
test_bucket.put_object(Key=TEST_KEY_NAME, Body=TEST_BODY)
assert S3Uri(f's3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}').object_exists()
def test_aws_s3_s3uri_bad_pattern():
with pytest.raises(AssertionError):
S3Uri(f' s3://{TEST_BUCKET_NAME}/')
with pytest.raises(AssertionError):
S3Uri(f'h3://{TEST_BUCKET_NAME}/')
with pytest.raises(AssertionError):
S3Uri(f's3:/{TEST_BUCKET_NAME}/')
def test_aws_s3_s3uri_add_to_path():
base = S3Uri(f's3://{TEST_BUCKET_NAME}/')
assert f's3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}' == str(base + TEST_KEY_NAME)
assert f's3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}' == str(base.add_suffix(TEST_KEY_NAME))
with pytest.raises(TypeError):
S3Uri(f's3://{TEST_BUCKET_NAME}') + 2
with pytest.raises(TypeError):
S3Uri(f's3://{TEST_BUCKET_NAME}').add_suffix(2)
def test_aws_s3_s3uri_fancy_division():
assert f's3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}' == str(
S3Uri(f's3://{TEST_BUCKET_NAME}/') / TEST_KEY_NAME
)
assert f's3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}' == str(
S3Uri(f's3://{TEST_BUCKET_NAME}') / TEST_KEY_NAME
)
assert f's3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}' == str(
S3Uri(f's3://{TEST_BUCKET_NAME}/') // TEST_KEY_NAME
)
assert f's3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}' == str(
S3Uri(f's3://{TEST_BUCKET_NAME}') // TEST_KEY_NAME
)
with pytest.raises(TypeError):
S3Uri(f's3://{TEST_BUCKET_NAME}') / 2
with pytest.raises(TypeError):
S3Uri(f's3://{TEST_BUCKET_NAME}') // 2
with pytest.raises(TypeError):
S3Uri(f's3://{TEST_BUCKET_NAME}') / 2.0
with pytest.raises(TypeError):
S3Uri(f's3://{TEST_BUCKET_NAME}') // 2.0
def test_aws_s3_s3uri_scheme():
assert 's3://' == S3Uri(f's3://{TEST_BUCKET_NAME}/').scheme
def test_aws_s3_s3uri_bucket():
assert TEST_BUCKET_NAME == S3Uri(f's3://{TEST_BUCKET_NAME}/').bucket
def test_aws_s3_s3uri_key():
assert TEST_KEY_NAME == S3Uri(f's3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}').key
assert f'{TEST_KEY_NAME}/' == S3Uri(f's3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}/').key
assert '' == S3Uri(f's3://{TEST_BUCKET_NAME}').key
assert '' == S3Uri(f's3://{TEST_BUCKET_NAME}/').key
assert '' == S3Uri(f's3://').key
key = 'mykey/with/many/delimiters'
assert key == S3Uri(f's3://{TEST_BUCKET_NAME}/{key}').key
key = 'mykey/with/many/delimiters/'
assert key == S3Uri(f's3://{TEST_BUCKET_NAME}/{key}').key
def test_aws_s3_s3uri_str():
base = S3Uri(f's3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}')
assert f's3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}' == str(base)
assert f'S3Uri(\'s3://{TEST_BUCKET_NAME}/{TEST_KEY_NAME}\')' == repr(base)
@pytest.mark.xfail(RUNNING_IN_CI, reason='Running on TravisCI')
def test_aws_s3_s3api_head_object(test_bucket):
with pytest.raises(RuntimeError):
s3api_head_object(TEST_BUCKET_NAME, TEST_KEY_NAME)
test_bucket.put_object(Key=TEST_KEY_NAME, Body=TEST_BODY)
metadata = s3api_head_object(TEST_BUCKET_NAME, TEST_KEY_NAME)
assert metadata['ContentLength'] == 9
@pytest.mark.xfail(RUNNING_IN_CI, reason='Running on TravisCI')
def test_aws_s3_s3api_object_exists(test_bucket):
assert not s3api_object_exists(TEST_BUCKET_NAME, TEST_KEY_NAME)
test_bucket.put_object(Key=TEST_KEY_NAME, Body=TEST_BODY)
assert s3api_object_exists(TEST_BUCKET_NAME, TEST_KEY_NAME)
@pytest.mark.xfail(RUNNING_IN_CI, reason='Running on TravisCI')
def test_aws_s3_s3_object_exists(test_bucket):
assert not s3_object_exists(TEST_BUCKET_NAME, TEST_KEY_NAME)
test_bucket.put_object(Key=TEST_KEY_NAME, Body=TEST_BODY)
assert s3_object_exists(TEST_BUCKET_NAME, TEST_KEY_NAME)
| 34.828283 | 123 | 0.715971 | 371 | 0.035866 | 169 | 0.016338 | 3,543 | 0.342517 | 0 | 0 | 2,442 | 0.236079 |
0f95f5577340fe7d3a0b009ba5b2db437206b317 | 5,976 | py | Python | plot_progs/barsum_plot.py | bradlyke/dr16q | 5645491bc05806c5b956e76c5bcec939722c065f | [
"BSD-3-Clause"
] | 2 | 2020-09-21T23:12:09.000Z | 2020-09-23T03:54:06.000Z | plot_progs/barsum_plot.py | bradlyke/dr16q | 5645491bc05806c5b956e76c5bcec939722c065f | [
"BSD-3-Clause"
] | null | null | null | plot_progs/barsum_plot.py | bradlyke/dr16q | 5645491bc05806c5b956e76c5bcec939722c065f | [
"BSD-3-Clause"
] | null | null | null | """
This creates the plot that is used as Fig. 2 in Lyke et al. 2020.
Dependencies
----------
github repository : https://github.com/bradlyke/utilities
Note: To get these plots, LaTeX must be installed and
matplotlib must be able to compile LaTeX commands
Input file
----------
The most recent version of the DR16Q quasar-only catalog.
Parameters
----------
input_file : the file name for the DR16Q FITS file in the
../data folder. No path needed.
output_plot_name : the name of the plot written out
plot_fontsize : The fontsize for everything in the plot
For a 5x4 plot in twocolumn, 11 works best.
save_check : 0 - don't save, just plot
1 - don't plot, just save into ../plots/
Output
----------
If selected, an EPS file of the plot.
"""
from astropy.io import fits
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
matplotlib.rc('text',usetex=True)
import progressBar_fancy as pbf
import tmark
import sys
#This function creates a table of MJDs and the cumulative number of quasars
#observed as of that date. This overcomes the errors that might occur
#when some dates have 0 observations, or aren't represented in DR16Q.
def mjd_maker(infile):
dr = fits.open(infile)[1].data
tmark.tm('Making MJD Arrays')
mjd_min = np.amin(dr['MJD']) #Starting MJD
mjd_mpo = np.amax(dr['MJD'])+1 #Non-inclusive maximum MJD
mjd_arr = np.arange(mjd_min,mjd_mpo,1)
num_mjd = len(mjd_arr)
#This will hold the table of date vs. the cumulative number by that date.
data = np.zeros(num_mjd,dtype=[('MJD','int32'),('NUM_UNIQUE','int64')])
data['MJD'] = mjd_arr
#This array holds the first MJD that each quasar in DR16Q was observed on.
first_mjd = np.zeros(len(sd),dtype='int64')
first_mjd = sd['MJD']
#MJD can be stored either in the MJD column OR in the MJD_DUPLICATE column
#as a more recent observation may have been selected as the primary for
#a given quasar.
wdupe = np.where(sd['NSPEC']>0)[0] #Find quasars with more than one obs
tmark.tm('Finding Earlier MJDs')
#For all of the multiply-observed quasars, overwrite the primary obs MJD
#with the earliest
for i in range(len(wdupe)):
mjd_arr_temp = sd['MJD_DUPLICATE'][wdupe[i]]
wn1 = np.where(mjd_arr_temp>-1)[0]
mjd_min_temp = np.amin(mjd_arr_temp[wn1])
if mjd_min_temp < first_mjd[wdupe[i]]:
first_mjd[wdupe[i]] = mjd_min_temp
pbf.pbar(i,len(wdupe))
#Find the unique MJDs from the first_mjd array and count them
tmark.tm('Making Data Array')
for i in range(num_mjd):
w = np.where(first_mjd<=data['MJD'][i])[0]
data['NUM_UNIQUE'][i] = len(w) #Write the count to the output table
pbf.pbar(i,num_mjd)
#Write the output table of cumulative number of quasars per MJD.
data_hdu = fits.BinTableHDU.from_columns(data,name='TABLE')
outname = '../data/unique_mjd.fits'
data_hdu.writeto(outname)
return outname
#This function makes the plot. The user can choose to save it as an eps
#or have it plot on screen.
def sum_plot(infile,fname,fntsize,write_check):
data = fits.open(infile)[1].data
#Set up the plot parameters to be pretty
matplotlib.rc('font',size=fntsize)
matplotlib.rcParams['text.latex.preamble'] = [r'\boldmath']
#Set up the X and Y axis ticks.
max_mjd = np.amax(data['MJD'])
mjd_ticks = np.array([52000,53000,54000,55000,56000,57000,58000])
sum_ticks = np.array([0,1,2,3,4,5,6,7])
min_mjd = np.amin(data['MJD'])
ydata = data['NUM_UNIQUE']*1e-5
max_num = np.amax(ydata)
#Color code the plot. Using standard colors means other plots that
#use the same groups can be standardized across the paper.
sd12color = 'blue'
sd3color='pink'
sd4color='red'
#Making the plot
fig,ax = plt.subplots(figsize=(5,4))
ax.plot(data['MJD'],ydata,color='black',linewidth=0.8) #Plot the top line
#These fill in the area below the plot line for each SDSS observation campaign
ax.fill_between(data['MJD'],0,ydata,where=data['MJD']<54663,facecolor=sd12color)
ax.fill_between(data['MJD'],0,ydata,where=data['MJD']>=54663,facecolor=sd3color)
ax.fill_between(data['MJD'],0,ydata,where=data['MJD']>=56898,facecolor=sd4color)
#These set up the legend color blocks for labels later.
p12 = ax.fill(np.NaN,np.NaN,sd12color)
p3 = ax.fill(np.NaN,np.NaN,sd3color)
p4 = ax.fill(np.NaN,np.NaN,sd4color)
#Axes labels, ticks, and tick limits.
ax.set_xlabel(r'\textbf{Modified Julian Date}')
ax.set_ylabel(r'\textbf{Cumulative number of quasars (}$10^{5}$\textbf{)}')
ax.set_xlim([min_mjd,max_mjd])
ax.set_xticks(mjd_ticks)
ax.set_ylim([0,max_num])
ax.set_yticks(sum_ticks)
ax.tick_params(axis='both',direction='in')
ax.tick_params(top=True,right=True)
#This sets up the a legend box with no border or background color with the
#fill colors attached to the proper labels.
ax.legend([p12[0],p3[0],p4[0]],[r'\textbf{SDSS-I/II}',r'\textbf{SDSS-III}',r'\textbf{SDSS-IV}'],loc='upper left',facecolor='white',edgecolor='white')
#Write or view?
if write_check==1:
fig.savefig(fname,bbox_inches='tight',pad_inches=0.03,format='eps')
plt.close()
else:
plt.tight_layout()
plt.show()
#YOU HAVE TO PUT THIS INTO TWOCOLUMN AT EPSSCALE 1.18 AND IT'S PERFECT!!!
#Call from the command line with:
# python barsum_plot.py <DR16Q file name> <plot name> <font size> <save flag>
# Example:
# python barsum_plot.py DR16Q_v3.fits barsum.eps 11 1
if __name__=='__main__':
input_file = '../data/{}'.format(sys.argv[1])
output_plot_name = '../plots/{}'.format(sys.argv[2])
plot_fontsize = int(sys.argv[3])
save_check = int(sys.argv[4])
unique_mjd_filename = mjd_maker(input_file)
sum_plot(unique_mjd_filename,output_plot_name,plot_fontsize,save_check)
| 37.35 | 153 | 0.684572 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,072 | 0.514056 |
0f9934026df2e6f4b91b0975aecdc12c43dbf68d | 6,417 | py | Python | health/models.py | atiro/obesitydata | b0cc0759db03ee07c63b3b254bf04f59aeef965d | [
"BSD-3-Clause"
] | null | null | null | health/models.py | atiro/obesitydata | b0cc0759db03ee07c63b3b254bf04f59aeef965d | [
"BSD-3-Clause"
] | 15 | 2015-07-27T21:43:25.000Z | 2015-07-29T21:34:48.000Z | health/models.py | atiro/obesitydata | b0cc0759db03ee07c63b3b254bf04f59aeef965d | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
class Health(models.Model):
MALE = 'M'
FEMALE = 'F'
ALL = 'A'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(ALL, 'All')
)
class Meta:
abstract = True
class HealthActivity(models.Model):
MALE = 'M'
FEMALE = 'F'
ALL = 'A'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(ALL, 'All')
)
AGE_16_TO_24 = '16-24'
AGE_25_TO_34 = '25-34'
AGE_35_TO_44 = '35-44'
AGE_45_TO_54 = '45-54'
AGE_55_TO_64 = '55-64'
AGE_65_TO_74 = '65-74'
AGE_75_PLUS = '75+'
AGE_ALL = 'ALL'
AGE_CHOICES = (
(AGE_16_TO_24, '16-24'),
(AGE_25_TO_34, '25-34'),
(AGE_35_TO_44, '35-44'),
(AGE_45_TO_54, '45-54'),
(AGE_55_TO_64, '55-64'),
(AGE_65_TO_74, '65-74'),
(AGE_75_PLUS, '75+'),
(AGE_ALL, 'All Ages'),
)
ACTIVITY_MEETS = 'Meets'
ACTIVITY_SOME = 'Some'
ACTIVITY_LOW = 'Low'
ACTIVITY_BASES = 'Bases'
ACTIVITY_CHOICES = (
(ACTIVITY_MEETS, 'Meets Activity'),
(ACTIVITY_SOME, 'Some Activity'),
(ACTIVITY_LOW, 'Low Activity'),
(ACTIVITY_BASES, 'Bases'),
)
year = models.IntegerField()
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=MALE)
age = models.CharField(max_length=8, choices=AGE_CHOICES, default=AGE_16_TO_24)
activity = models.CharField(max_length=5, choices=ACTIVITY_CHOICES, default=ACTIVITY_MEETS)
percentage = models.FloatField(default=0.0)
class HealthWeight(models.Model):
MALE = 'M'
FEMALE = 'F'
ALL = 'A'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(ALL, 'All')
)
year = models.IntegerField()
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=MALE)
weight_mean = models.FloatField()
weight_stderr = models.FloatField()
base = models.IntegerField()
class HealthBMI(models.Model):
MALE = 'M'
FEMALE = 'F'
ALL = 'A'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(ALL, 'All')
)
AGE_16_TO_24 = '16-24'
AGE_25_TO_34 = '25-34'
AGE_35_TO_44 = '35-44'
AGE_45_TO_54 = '45-54'
AGE_55_TO_64 = '55-64'
AGE_65_TO_74 = '65-74'
AGE_75_PLUS = '75+'
AGE_ALL = 'ALL'
AGE_CHOICES = (
(AGE_16_TO_24, '16-24'),
(AGE_25_TO_34, '25-34'),
(AGE_35_TO_44, '35-44'),
(AGE_45_TO_54, '45-54'),
(AGE_55_TO_64, '55-64'),
(AGE_65_TO_74, '65-74'),
(AGE_75_PLUS, '75+'),
(AGE_ALL, 'All Ages'),
)
BMI_UNDERWEIGHT = 'U'
BMI_NORMAL = 'N'
BMI_OVERWEIGHT = 'O'
BMI_OBESE = 'B'
BMI_MORBIDLY_OBESE = 'M'
BMI_OVERWEIGHT_OBESE = 'W'
BMI_MEAN = 'E'
BMI_STDERR = 'S'
BMI_BASE = 'A'
BMI_ALL = 'L'
BMI_CHOICES = (
(BMI_UNDERWEIGHT, 'Underweight'),
(BMI_NORMAL, 'Normal'),
(BMI_OVERWEIGHT, 'Overweight'),
(BMI_OBESE, 'Obese'),
(BMI_MORBIDLY_OBESE, 'Morbidly Obese'),
(BMI_OVERWEIGHT_OBESE, 'Overweight including obese'),
(BMI_MEAN, 'Mean'),
(BMI_STDERR, 'Std error of the mean'),
(BMI_BASE, 'Base'),
(BMI_ALL, 'All'),
)
year = models.IntegerField()
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=MALE)
age = models.CharField(max_length=8, choices=AGE_CHOICES, default=AGE_16_TO_24)
bmi = models.CharField(max_length=1, choices=BMI_CHOICES, default=BMI_NORMAL)
percentage = models.FloatField(default=0.0)
# Create your models here.
class HealthFruitVeg(models.Model):
MALE = 'M'
FEMALE = 'F'
ALL = 'A'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(ALL, 'All')
)
AGE_16_TO_24 = '16-24'
AGE_25_TO_34 = '25-34'
AGE_35_TO_44 = '35-44'
AGE_45_TO_54 = '45-54'
AGE_55_TO_64 = '55-64'
AGE_65_TO_74 = '65-74'
AGE_75_PLUS = '75+'
AGE_ALL = 'ALL'
AGE_CHOICES = (
(AGE_16_TO_24, '16-24'),
(AGE_25_TO_34, '25-34'),
(AGE_35_TO_44, '35-44'),
(AGE_45_TO_54, '45-54'),
(AGE_55_TO_64, '55-64'),
(AGE_65_TO_74, '65-74'),
(AGE_75_PLUS, '75+'),
(AGE_ALL, 'All Ages'),
)
FRUITVEG_NONE = 'N'
FRUITVEG_LESS_1 = '1'
FRUITVEG_LESS_2 = '2'
FRUITVEG_LESS_3 = '3'
FRUITVEG_LESS_4 = '4'
FRUITVEG_LESS_5 = '5'
FRUITVEG_MORE_5 = '6'
FRUITVEG_MEAN = 'M'
FRUITVEG_STDERR = 'S'
FRUITVEG_MEDIAN = 'D'
FRUITVEG_BASE = 'B'
FRUITVEG_CHOICES = (
(FRUITVEG_NONE, 'No Fruit & Veg'),
(FRUITVEG_LESS_1, 'Under 1 portion'),
(FRUITVEG_LESS_2, '1-2 Portions'),
(FRUITVEG_LESS_3, '2-3 Portions'),
(FRUITVEG_LESS_4, '3-4 Portions'),
(FRUITVEG_LESS_5, '4-5 Portions'),
(FRUITVEG_MORE_5, '5+ Portions'),
(FRUITVEG_MEAN, 'Mean Portions'),
(FRUITVEG_STDERR, 'Standard error of the mean'),
(FRUITVEG_MEDIAN, 'Median Portions'),
(FRUITVEG_BASE, 'Standard error of the mean')
)
year = models.IntegerField()
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=MALE)
age = models.CharField(max_length=8, choices=AGE_CHOICES, default=AGE_16_TO_24)
fruitveg = models.CharField(max_length=1, choices=FRUITVEG_CHOICES, default=FRUITVEG_NONE)
percentage = models.FloatField(default=0.0)
# Create your models here.
class HealthHealth(models.Model):
MALE = 'M'
FEMALE = 'F'
ALL = 'A'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(ALL, 'All')
)
HEALTH_VG = 'VG'
HEALTH_VB = 'VB'
HEALTH_ILL = 'ILL'
HEALTH_SICK = 'SICK'
HEALTH_ALL = 'ALL'
HEALTH_BASE = 'BASE'
HEALTH_CHOICES = (
(HEALTH_VG, 'Very good/good health'),
(HEALTH_VB, 'Very bad/bad health'),
(HEALTH_ILL, 'At least one longstanding illness'),
(HEALTH_SICK, 'Acute sickness'),
(HEALTH_ALL, 'All'),
(HEALTH_BASE, 'Bases'),
)
year = models.IntegerField()
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=MALE)
health = models.CharField(max_length=4, choices=HEALTH_CHOICES, default=HEALTH_VG)
percentage = models.FloatField(default=0.0)
# Create your models here.
| 25.164706 | 95 | 0.58532 | 6,286 | 0.979585 | 0 | 0 | 0 | 0 | 0 | 0 | 1,164 | 0.181393 |
0f9a233de5e4a797f10da680378306ea2d873d20 | 5,112 | py | Python | src/rcwa_utils.py | scolburn54/rcwa_tf | 50c7f6e477b2784c32586d6eddb13ef9e4adec02 | [
"BSD-3-Clause"
] | 9 | 2021-04-06T11:58:14.000Z | 2021-12-28T18:37:28.000Z | src/rcwa_utils.py | scolburn54/rcwa_tf | 50c7f6e477b2784c32586d6eddb13ef9e4adec02 | [
"BSD-3-Clause"
] | null | null | null | src/rcwa_utils.py | scolburn54/rcwa_tf | 50c7f6e477b2784c32586d6eddb13ef9e4adec02 | [
"BSD-3-Clause"
] | 2 | 2021-04-08T13:38:23.000Z | 2021-05-03T17:34:27.000Z | # Copyright (c) 2020, Shane Colburn, University of Washington
# This file is part of rcwa_tf
# Written by Shane Colburn (Email: scolbur2@uw.edu)
import tensorflow as tf
import numpy as np
def convmat(A, P, Q):
'''
This function computes a convolution matrix for a real space matrix `A` that
represents either a relative permittivity or permeability distribution for a
set of pixels, layers, and batch.
Args:
A: A `tf.Tensor` of dtype `complex` and shape `(batchSize, pixelsX,
pixelsY, Nlayers, Nx, Ny)` specifying real space values on a Cartesian
grid.
P: A positive and odd `int` specifying the number of spatial harmonics
along `T1`.
Q: A positive and odd `int` specifying the number of spatial harmonics
along `T2`.
Returns:
A `tf.Tensor` of dtype `complex` and shape `(batchSize, pixelsX,
pixelsY, Nlayers, P * Q, P * Q)` representing a stack of convolution
matrices based on `A`.
'''
# Determine the shape of A.
batchSize, pixelsX, pixelsY, Nlayers, Nx, Ny = A.shape
# Compute indices of spatial harmonics.
NH = P * Q # total number of harmonics.
p_max = np.floor(P / 2.0)
q_max = np.floor(P / 2.0)
# Indices along T1 and T2.
p = np.linspace(-p_max, p_max, P)
q = np.linspace(-q_max, q_max, Q)
# Compute array indices of the center harmonic.
p0 = int(np.floor(Nx / 2))
q0 = int(np.floor(Ny / 2))
# Fourier transform the real space distributions.
A = tf.signal.fftshift(tf.signal.fft2d(A), axes = (4, 5)) / (Nx * Ny)
# Build the matrix.
firstCoeff = True
for qrow in range(Q):
for prow in range(P):
for qcol in range(Q):
for pcol in range(P):
pfft = int(p[prow] - p[pcol])
qfft = int(q[qrow] - q[qcol])
# Sequentially concatenate Fourier coefficients.
value = A[:, :, :, :, p0 + pfft, q0 + qfft]
value = value[:, :, :, :, tf.newaxis, tf.newaxis]
if firstCoeff:
firstCoeff = False
C = value
else:
C = tf.concat([C, value], axis = 5)
# Reshape the coefficients tensor into a stack of convolution matrices.
convMatrixShape = (batchSize, pixelsX, pixelsY, Nlayers, P * Q, P * Q)
matrixStack = tf.reshape(C, shape = convMatrixShape)
return matrixStack
def redheffer_star_product(SA, SB):
'''
This function computes the redheffer star product of two block matrices,
which is the result of combining the S-parameter of two systems.
Args:
SA: A `dict` of `tf.Tensor` values specifying the block matrix
corresponding to the S-parameters of a system. `SA` needs to have the
keys ('S11', 'S12', 'S21', 'S22'), where each key maps to a `tf.Tensor`
of shape `(batchSize, pixelsX, pixelsY, 2*NH, 2*NH)`, where NH is the
total number of spatial harmonics.
SB: A `dict` of `tf.Tensor` values specifying the block matrix
corresponding to the S-parameters of a second system. `SB` needs to have
the keys ('S11', 'S12', 'S21', 'S22'), where each key maps to a
`tf.Tensor` of shape `(batchSize, pixelsX, pixelsY, 2*NH, 2*NH)`, where
NH is the total number of spatial harmonics.
Returns:
A `dict` of `tf.Tensor` values specifying the block matrix
corresponding to the S-parameters of the combined system. `SA` needs
to have the keys ('S11', 'S12', 'S21', 'S22'), where each key maps to
a `tf.Tensor` of shape `(batchSize, pixelsX, pixelsY, 2*NH, 2*NH),
where NH is the total number of spatial harmonics.
'''
# Define the identity matrix.
batchSize, pixelsX, pixelsY, dim, _ = SA['S11'].shape
I = tf.eye(num_rows = dim, dtype = tf.complex64)
I = I[tf.newaxis, tf.newaxis, tf.newaxis, :, :]
I = tf.tile(I, multiples = (batchSize, pixelsX, pixelsY, 1, 1))
# Calculate S11.
S11 = tf.linalg.inv(I - tf.linalg.matmul(SB['S11'], SA['S22']))
S11 = tf.linalg.matmul(S11, SB['S11'])
S11 = tf.linalg.matmul(SA['S12'], S11)
S11 = SA['S11'] + tf.linalg.matmul(S11, SA['S21'])
# Calculate S12.
S12 = tf.linalg.inv(I - tf.linalg.matmul(SB['S11'], SA['S22']))
S12 = tf.linalg.matmul(S12, SB['S12'])
S12 = tf.linalg.matmul(SA['S12'], S12)
# Calculate S21.
S21 = tf.linalg.inv(I - tf.linalg.matmul(SA['S22'], SB['S11']))
S21 = tf.linalg.matmul(S21, SA['S21'])
S21 = tf.linalg.matmul(SB['S21'], S21)
# Calculate S22.
S22 = tf.linalg.inv(I - tf.linalg.matmul(SA['S22'], SB['S11']))
S22 = tf.linalg.matmul(S22, SA['S22'])
S22 = tf.linalg.matmul(SB['S21'], S22)
S22 = SB['S22'] + tf.linalg.matmul(S22, SB['S12'])
# Store S parameters in an output dictionary.
S = dict({})
S['S11'] = S11
S['S12'] = S12
S['S21'] = S21
S['S22'] = S22
return S
| 38.727273 | 80 | 0.586072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,805 | 0.548709 |
0f9b04b14d4ee24b9d02d994d9ee17da1f43cabe | 925 | py | Python | src/ui/setup_custom_dialog.py | AndreWohnsland/CocktailBerry | 60b2dfc3a4a6f3ef9ab2d946a97d14829e575a9d | [
"MIT"
] | 1 | 2022-03-06T23:50:34.000Z | 2022-03-06T23:50:34.000Z | src/ui/setup_custom_dialog.py | AndreWohnsland/CocktailBerry | 60b2dfc3a4a6f3ef9ab2d946a97d14829e575a9d | [
"MIT"
] | 4 | 2022-03-03T11:16:17.000Z | 2022-03-20T15:53:37.000Z | src/ui/setup_custom_dialog.py | AndreWohnsland/CocktailBerry | 60b2dfc3a4a6f3ef9ab2d946a97d14829e575a9d | [
"MIT"
] | null | null | null | from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QDialog
from src.ui_elements.customdialog import Ui_CustomDialog
from src.display_controller import DP_CONTROLLER
class CustomDialog(QDialog, Ui_CustomDialog):
""" Class for the Team selection Screen. """
def __init__(self, message: str, title: str = "Information", icon_path: str = None):
super().__init__()
self.setupUi(self)
DP_CONTROLLER.inject_stylesheet(self)
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
self.setAttribute(Qt.WA_DeleteOnClose)
self.informationLabel.setText(message)
self.setWindowTitle(title)
self.closeButton.clicked.connect(self.close_clicked)
self.setWindowIcon(QIcon(icon_path))
self.move(0, 0)
DP_CONTROLLER.set_display_settings(self)
def close_clicked(self):
self.close()
| 34.259259 | 88 | 0.724324 | 721 | 0.779459 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.061622 |
0f9b0b94a9c2c162c1af702148c3ee04656d8ab2 | 1,542 | py | Python | archived/twitter_filtering_deprecated_code/tw_keyword_search.py | mcpeixoto/Sentrade | 55f65508d6b565b99840c9ce5d757185f5027164 | [
"MIT"
] | 4 | 2020-09-28T18:40:47.000Z | 2021-12-01T08:29:29.000Z | archived/twitter_filtering_deprecated_code/tw_keyword_search.py | ZiyouZhang/Sentrade | c88d20a858de6d05649f99230ca2b44f4c76cd3c | [
"MIT"
] | null | null | null | archived/twitter_filtering_deprecated_code/tw_keyword_search.py | ZiyouZhang/Sentrade | c88d20a858de6d05649f99230ca2b44f4c76cd3c | [
"MIT"
] | 2 | 2021-08-10T22:32:52.000Z | 2022-02-03T21:28:47.000Z | __author__ = "Fengming Liu"
__status__ = "prototype"
import tarfile
import json
import re
import subprocess
import os
import nltk
import time
year = 2019
month = 9
day = 16
keyword_list = ["netflix", "amazon", "apple", "microsoft", "google", "tesla", "facebook"]
date = "{0}-{1}-{2}".format(year, str(month).zfill(2), str(day).zfill(2))
subprocess.run("rm ./keyword_search_time.log", shell=True)
time_log = open("./keyword_search_time.log", 'a')
time_log.write("{0:8s} {1}\n".format("keyword", "searching time(s)"))
for keyword in keyword_list:
start_tick = time.time()
# subprocess.run("rm " + "./subject_tw/raw/{0}_{1}.json".format(keyword, date), shell=True)
tw_text_file_keyword = open("./subject_tw/raw/{0}_{1}.json".format(keyword, date), 'a')
# for hour in range(6, 24):
# tw_folder = "./{0}/{1}/".format(str(day).zfill(2), str(hour).zfill(2))
# for filename in os.listdir(tw_folder):
# # load the json file into the memory
# with open(tw_folder + filename, 'r') as f:
# data = [json.loads(line) for line in f]
# # write the selected items into a new json file
# for item in data:
# keys = item.keys()
# if "text" in keys and "lang" in keys and item["lang"] == "en":
# if re.search(re.compile(keyword), item["text"].lower()): # search based on the lower case
# json.dump(item, tw_text_file_keyword)
# tw_text_file_keyword.write('\n')
tw_text_file_keyword.close()
end_tick = time.time()
time_log.write("{0:8s} {1:.3f}\n".format(keyword, end_tick - start_tick))
time_log.close() | 34.266667 | 96 | 0.666018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 954 | 0.618677 |
0f9b117a6372cf0f258615a3e1de5ab261a64bf4 | 2,480 | py | Python | tests/unit/callbacks/test_sqlalchemy.py | dunnkers/fseval | 49a11a63e09e65b1f14389b6ba3a9ae3aeae086d | [
"MIT"
] | 5 | 2020-07-08T11:58:46.000Z | 2022-01-26T13:58:00.000Z | tests/unit/callbacks/test_sqlalchemy.py | dunnkers/fseval | 49a11a63e09e65b1f14389b6ba3a9ae3aeae086d | [
"MIT"
] | 63 | 2021-05-09T06:18:24.000Z | 2022-03-27T18:05:58.000Z | tests/unit/callbacks/test_sqlalchemy.py | dunnkers/fseval | 49a11a63e09e65b1f14389b6ba3a9ae3aeae086d | [
"MIT"
] | 1 | 2022-02-11T03:24:14.000Z | 2022-02-11T03:24:14.000Z | import numpy as np
import pandas as pd
import pytest
from fseval.callbacks.sql_alchemy import SQLAlchemyCallback
from fseval.types import Task
from omegaconf import DictConfig, OmegaConf
def test_init_no_params():
"""Initialization should fail when no `engine` param was supplied."""
# no `engine`
with pytest.raises(AssertionError):
SQLAlchemyCallback()
# no `engine.url`
with pytest.raises(AssertionError):
SQLAlchemyCallback(engine={})
@pytest.fixture
def config() -> DictConfig:
config: DictConfig = OmegaConf.create(
{
"dataset": {
"name": "some_dataset",
"n": 10000,
"p": 5,
"task": Task.classification,
"group": "some_group",
"domain": "some_domain",
},
"ranker": {
"name": "some_ranker",
},
"validator": {"name": "some_validator"},
}
)
return config
@pytest.fixture
def sql_alch() -> SQLAlchemyCallback:
sql_alch = SQLAlchemyCallback(engine={"url": "sqlite://"})
return sql_alch
def test_on_begin(sql_alch: SQLAlchemyCallback, config: DictConfig):
"""Test whether `on_begin` pipeline callback correctly stores experiment information
in the `experiments` table."""
# pretend `starting` pipeline
sql_alch.on_begin(config)
# retrieve table using Pandas
df: pd.DataFrame = pd.read_sql("experiments", con=sql_alch.engine, index_col="id")
# assert columns
assert "dataset" in df.columns
assert "dataset/n" in df.columns
assert "dataset/p" in df.columns
# assert types
assert np.isscalar(df["dataset/n"][0])
assert np.isscalar(df["dataset/p"][0])
assert "datetime" in str(df.dtypes["date_created"])
def test_on_table(sql_alch: SQLAlchemyCallback, config: DictConfig):
"""Tests whether the callback successfully stores data in a table when calling the
`on_table` callback."""
# store dataframe in table
df_to_store = pd.DataFrame([{"some_metric": 343}])
name = "some_table"
sql_alch.on_begin(config) # necessary to set `self.id`
sql_alch.on_table(df_to_store, name)
# retrieve using Pandas
df: pd.DataFrame = pd.read_sql("some_table", con=sql_alch.engine, index_col="id")
# assert table columns and record types
assert "some_metric" in df.columns
assert np.isscalar(df["some_metric"][0])
assert len(df) == 1
| 28.837209 | 88 | 0.644758 | 0 | 0 | 0 | 0 | 655 | 0.264113 | 0 | 0 | 850 | 0.342742 |
0f9b32f06196acf472b687c9df115a7c46e39e8e | 8,170 | py | Python | pytesla/vehicle.py | jstenback/pytesla | 25707fd59bc3ba67f6db8d2a43fbaa7cb238f4e7 | [
"BSD-3-Clause"
] | null | null | null | pytesla/vehicle.py | jstenback/pytesla | 25707fd59bc3ba67f6db8d2a43fbaa7cb238f4e7 | [
"BSD-3-Clause"
] | null | null | null | pytesla/vehicle.py | jstenback/pytesla | 25707fd59bc3ba67f6db8d2a43fbaa7cb238f4e7 | [
"BSD-3-Clause"
] | 3 | 2016-12-06T14:04:34.000Z | 2020-01-10T22:03:03.000Z | from . import stream
class CommandError(Exception):
"""Tesla Model S vehicle command returned failure"""
pass
class Vehicle:
def __init__(self, vin, conn, payload, log):
assert payload['vin'] == vin
self._conn = conn
self._data = payload
self._log = log
def __repr__(self):
return "<Vehicle {}>".format(self.vin)
# Helpers
@property
def vin(self):
return self._data['vin']
@property
def id(self):
return self._data['id']
@property
def vehicle_id(self):
return self._data['vehicle_id']
@property
def state(self):
return self._data['state']
@property
def email(self):
return self._conn._email
@property
def auth_token(self):
return self._conn._auth_token
@property
def stream_auth_token(self):
return self._data['tokens'][0]
# Stream entry generator for events defined in StreamEvents
# (events should be an array of StreamEvents). This generator
# generates tuples of an array of the requested events (preceded
# by a timestamp) and a reference to the stream itself (which can
# be closed to stop receiving events). This generator will
# generate count number of events, or as many as it gets if count
# is 0.
def stream(self, events, count = 0):
return stream.Stream(self).read_stream(events, count)
def refresh(self):
self._conn.vehicles(True)
def request(self, verb):
return self._conn.request('/api/1/vehicles/{}/data_request/{}' \
.format(self.id, verb)).json()['response']
def command(self, verb, **kwargs):
p = self._conn.request('/api/1/vehicles/{}/command/{}' \
.format(self.id, verb), kwargs).json()
args = []
for a in kwargs:
args.append("{} = {}".format(a, kwargs[a]))
self._log.write("{}({}) called. Result was {}" \
.format(verb, ", ".join(args), p))
if 'response' not in p or not p['response']:
# Command returned failure, raise exception
raise CommandError(p['error'])
return p['response']
# API getter properties
@property
def mobile_enabled(self):
return self._conn.request('/api/1/vehicles/{}/mobile_enabled' \
.format(self.id)).json()['response']
@property
def data(self):
return self._conn.request('/api/1/vehicles/{}/data' \
.format(self.id)).json() #['response']
@property
def charge_state(self):
return self.request('charge_state')
@property
def climate_state(self):
return self.request('climate_state')
@property
def drive_state(self):
return self.request('drive_state')
@property
def gui_settings(self):
return self.request('gui_settings')
@property
def vehicle_state(self):
return self.request('vehicle_state')
# API commands
def charge_port_door_open(self):
return self.command('charge_port_door_open')
def charge_port_door_close(self):
return self.command('charge_port_door_close')
def charge_standard(self):
return self.command('charge_standard')
def charge_max_range(self):
return self.command('charge_max_range')
def charge_start(self):
return self.command('charge_start')
def charge_stop(self):
return self.command('charge_stop')
@property
def charge_limit(self):
return self.charge_state['charge_limit_soc']
@charge_limit.setter
def charge_limit(self, limit):
self.command('set_charge_limit', percent = limit)
def flash_lights(self):
return self.command('flash_lights')
def honk_horn(self):
return self.command('honk_horn')
def remote_start_drive(self, password):
return self.command('remote_start_drive', password = password)
@property
def speed_limit(self):
return self.vehicle_state['speed_limit_mode']
@speed_limit.setter
def speed_limit(self, limit):
return self.command('speed_limit_set_limit', limit_mph = limit)
def activate_speed_limit(self, pin):
return self.command('speed_limit_activate', pin = pin)
def deactivate_speed_limit(self, pin):
return self.command('speed_limit_deactivate', pin = pin)
def clear_speed_limit_pin(self, pin):
return self.command('speed_limit_clear_pin', pin = pin)
def valet_mode(self, on, pin):
return self.command('set_valet_mode', on = on, pin = pin)
def reset_valet_pin(self):
return self.command('reset_valet_pin')
def sentry_mode(self, on):
return self.command('set_sentry_mode', on = on)
@property
def locked(self):
return self.vehicle_state['locked']
@locked.setter
def locked(self, lock):
if lock:
return self.command('door_lock')
else:
return self.command('door_unlock')
def actuate_trunk(self):
return self.command('actuate_trunk', which_trunk = 'rear')
def actuate_frunk(self):
return self.command('actuate_trunk', which_trunk = 'front')
def sun_roof_control(self, state, percent = None):
args = {'state': state}
if state == 'move' and percent != None:
args['percent'] = percent
if state not in ('open', 'close', 'move', 'comfort', 'vent'):
raise ValueError("Invalid sunroof state")
return self.command('sun_roof_control', **args)
def set_temps(self, driver, passenger):
return self.command('set_temps', driver_temp = driver,
passenger_temp = passenger)
def remote_seat_heater(self, heater, level):
if heater not in range(0, 6):
raise ValueError("Invalid seat heater: {}".format(heater))
if level not in range(0, 4):
raise ValueError("Invalid seat heater level: {}".format(level))
return self.command('remote_seat_heater_request', heater = heater,
level = level)
def remote_steering_wheel_heater(self, on):
return self.command('remote_steering_wheel_heater_request', on = on)
def auto_conditioning_start(self):
return self.command('auto_conditioning_start')
def auto_conditioning_stop(self):
return self.command('auto_conditioning_stop')
def media_toggle_playback(self):
return self.command('media_toggle_playback')
def media_next_track(self):
return self.command('media_next_track')
def media_prev_track(self):
return self.command('media_prev_track')
def media_next_fav(self):
return self.command('media_next_fav')
def media_prev_fav(self):
return self.command('media_prev_fav')
def media_volume_up(self):
return self.command('media_volume_up')
def media_volume_down(self):
return self.command('media_volume_down')
def navigation_request(self, where):
return self.command('navigation_request',
type = 'share_ext_content_raw',
locale = 'en-US',
value = {
'android.intent.extra.TEXT': where
},
timestamp_ms = str(int(time.time())))
def schedule_software_update(self, offset_sec):
return self.command('schedule_software_update',
offset_sec = offset_sec)
def cancel_software_update(self_sec):
return self.command('cancel_software_update')
def wake_up(self):
d = self._conn.request('/api/1/vehicles/{}/wake_up' \
.format(self.id), {}).json()['response']
# Update vehicle tokens if they're different from our cached
# ones.
tokens = d['tokens']
if tokens != self._data['tokens']:
self._data['tokens'] = tokens
self._conn.save_state()
return d
| 29.709091 | 76 | 0.611506 | 8,145 | 0.99694 | 0 | 0 | 1,888 | 0.231089 | 0 | 0 | 1,973 | 0.241493 |
0f9c4737e945f102d4edb66eb31fa3dc5f138f03 | 1,425 | py | Python | modules/utility_date_time.py | Alex-Nalin/Support-Bot-v3 | 3d73e0596a279a3ef1c9cde60b88fecb93c33832 | [
"MIT"
] | 2 | 2019-01-15T11:13:20.000Z | 2021-11-26T09:26:40.000Z | modules/utility_date_time.py | Alex-Nalin/Support-Bot-v3 | 3d73e0596a279a3ef1c9cde60b88fecb93c33832 | [
"MIT"
] | null | null | null | modules/utility_date_time.py | Alex-Nalin/Support-Bot-v3 | 3d73e0596a279a3ef1c9cde60b88fecb93c33832 | [
"MIT"
] | 1 | 2019-02-19T08:56:13.000Z | 2019-02-19T08:56:13.000Z | import datetime
import re
from dateutil.parser import parse
def ConvertDateTimeToMilliseconds(inputDT: datetime.datetime):
return int(inputDT.timestamp()*1000)
def ConvertDTStringToMilliseconds(inputStr):
dt = parse(inputStr)
return ConvertDateTimeToMilliseconds(dt)
def ConvertShorthandToSeconds(shorthandStr):
# Identifies groups that match this patter: ##[d,h,m,s]
# Ex. 1d2h3m15s
regex = '\d+[d,h,m,s]'
patternEqual = re.compile(regex)
matchListEqual = patternEqual.finditer(shorthandStr)
offsetVal = 0
for m in matchListEqual:
subG = m.group(0)
# For each match group, extract the integer component.
# \b = "word boundary" => start of sentence, or after a space, comma or period
# \d+ = "match all integers, one or more times"
valueList = re.findall(r'\b\d+', subG)
# If an integer is found, let's attempt to translate that into a corresponding number of seconds
if len(valueList) > 0:
valStr = valueList[0]
unitStr = subG.replace(valStr, '')
val = int(valStr)
if unitStr == 's':
offsetVal += val
elif unitStr == 'm':
offsetVal += val * 60
elif unitStr == 'h':
offsetVal += val * 60 * 60
elif unitStr == 'd':
offsetVal += val * 24 * 60 * 60
return offsetVal | 28.5 | 104 | 0.602105 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 381 | 0.267368 |
0f9c5dc61dc683dbb784a1fe62685ebcd6df0bc2 | 232 | py | Python | Mundo-1-Fundamentos/016.py | TOPTOPUNIVERSE/CEV-PYTHON3 | 07e2c6b41cd33f3555e14545cdf6fc37325c8fd1 | [
"MIT"
] | null | null | null | Mundo-1-Fundamentos/016.py | TOPTOPUNIVERSE/CEV-PYTHON3 | 07e2c6b41cd33f3555e14545cdf6fc37325c8fd1 | [
"MIT"
] | null | null | null | Mundo-1-Fundamentos/016.py | TOPTOPUNIVERSE/CEV-PYTHON3 | 07e2c6b41cd33f3555e14545cdf6fc37325c8fd1 | [
"MIT"
] | null | null | null | """
Desafio 016
Problema: Crie um programa que leia um número Real qualquer
pelo teclado e mostre na tela a sua porção Inteira."""
n = float(input('Digite um valor:'))
print(f'O número {n} tem a parte inteira {int(n)}')
| 25.777778 | 64 | 0.681034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 206 | 0.872881 |
0f9d61f5e380aa3d41ab98b5c72a8faf138d7bbe | 586 | py | Python | algorithms/nesterov_random/nesterov_test.py | OptiMaL-PSE-Lab/Expensive-Black-Box-Optim-ChemEng | 19c34dcff8c983926df501b93152fa3b3b0305d6 | [
"MIT"
] | null | null | null | algorithms/nesterov_random/nesterov_test.py | OptiMaL-PSE-Lab/Expensive-Black-Box-Optim-ChemEng | 19c34dcff8c983926df501b93152fa3b3b0305d6 | [
"MIT"
] | null | null | null | algorithms/nesterov_random/nesterov_test.py | OptiMaL-PSE-Lab/Expensive-Black-Box-Optim-ChemEng | 19c34dcff8c983926df501b93152fa3b3b0305d6 | [
"MIT"
] | null | null | null | # import sys
# sys.path.insert(1, 'test_functions')
import numpy as np
from test_functions.quadratic_constrained import *
from test_functions.rosenbrock_constrained import *
from algorithms.nesterov_random.nesterov_random import nesterov_random
def problem_rosenbrock(x):
f = rosenbrock_f
g1 = rosenbrock_g1
g2 = rosenbrock_g2
return f(x),[g1(x),g2(x)]
d = 2
its = 50
bounds = np.array([[-1.5,1.5],[-1.5,1.5]])
x0 = [-0.5,1.5]
sol = nesterov_random(problem_rosenbrock,x0,bounds,max_iter=its,constraints=2, \
mu = 1e-3)
print(sol) | 24.416667 | 80 | 0.6843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.085324 |
0f9d62c9fc903008351f564ece40e730aa1af67b | 412 | py | Python | old/accent_analyser/rules/VoicedFricative1.py | stefantaubert/eng2ipa-accent-transformer | d620c70b06c83119402e255085046747ade87444 | [
"MIT"
] | null | null | null | old/accent_analyser/rules/VoicedFricative1.py | stefantaubert/eng2ipa-accent-transformer | d620c70b06c83119402e255085046747ade87444 | [
"MIT"
] | null | null | null | old/accent_analyser/rules/VoicedFricative1.py | stefantaubert/eng2ipa-accent-transformer | d620c70b06c83119402e255085046747ade87444 | [
"MIT"
] | null | null | null | from accent_analyser.rules.IpaRule import IpaRule
def replace(word: str):
return word.replace("v", "w")
class VoicedFricative1(IpaRule):
def __init__(self, likelihood=0.51):
super().__init__(likelihood)
self.name = "Voiced Fricative: [w] instead of /v/"
def _convert_core(self, words: list, current_index: int):
word = words[current_index].content
word = replace(word)
return word
| 24.235294 | 59 | 0.708738 | 301 | 0.730583 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.106796 |
0f9e4fd81922dc4d984f8126b9b15624ec6193f3 | 6,032 | py | Python | src/clients/python/api_v1/examples/ensemble_image_client.py | AliAzG/triton-inference-server | fbce250035d049d13f32c362e2d76a5cb787da51 | [
"BSD-3-Clause"
] | null | null | null | src/clients/python/api_v1/examples/ensemble_image_client.py | AliAzG/triton-inference-server | fbce250035d049d13f32c362e2d76a5cb787da51 | [
"BSD-3-Clause"
] | null | null | null | src/clients/python/api_v1/examples/ensemble_image_client.py | AliAzG/triton-inference-server | fbce250035d049d13f32c362e2d76a5cb787da51 | [
"BSD-3-Clause"
] | 1 | 2021-06-09T11:16:23.000Z | 2021-06-09T11:16:23.000Z | #!/usr/bin/env python
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import numpy as np
import os
from builtins import range
from PIL import Image
from tensorrtserver.api import *
import tensorrtserver.api.model_config_pb2 as model_config
FLAGS = None
def parse_model(url, protocol, model_name, verbose=False):
"""
Check the configuration of a model to make sure it meets the
requirements for an image classification network (as expected by
this client)
"""
ctx = ServerStatusContext(url, protocol, model_name, verbose)
server_status = ctx.get_server_status()
if model_name not in server_status.model_status:
raise Exception("unable to get status for '" + model_name + "'")
status = server_status.model_status[model_name]
config = status.config
if len(config.input) != 1:
raise Exception("expecting 1 input, got {}".format(len(config.input)))
if len(config.output) != 1:
raise Exception("expecting 1 output, got {}".format(len(config.output)))
input = config.input[0]
output = config.output[0]
return (input.name, output.name, config.max_batch_size)
def postprocess(results, filenames, batch_size):
"""
Post-process results to show classifications.
"""
if len(results) != 1:
raise Exception("expected 1 result, got {}".format(len(results)))
batched_result = list(results.values())[0]
if len(batched_result) != batch_size:
raise Exception("expected {} results, got {}".format(batch_size, len(batched_result)))
if len(filenames) != batch_size:
raise Exception("expected {} filenames, got {}".format(batch_size, len(filenames)))
for (index, result) in enumerate(batched_result):
print("Image '{}':".format(filenames[index]))
for cls in result:
print(" {} ({}) = {}".format(cls[0], cls[2], cls[1]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False,
help='Enable verbose output')
parser.add_argument('-c', '--classes', type=int, required=False, default=1,
help='Number of class results to report. Default is 1.')
parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8000',
help='Inference server URL. Default is localhost:8000.')
parser.add_argument('-i', '--protocol', type=str, required=False, default='HTTP',
help='Protocol (HTTP/gRPC) used to ' +
'communicate with inference service. Default is HTTP.')
parser.add_argument('image_filename', type=str, nargs='?', default=None,
help='Input image / Input folder.')
FLAGS = parser.parse_args()
protocol = ProtocolType.from_str(FLAGS.protocol)
input_name, output_name, batch_size = parse_model(
FLAGS.url, protocol, "preprocess_resnet50_ensemble", FLAGS.verbose)
ctx = InferContext(FLAGS.url, protocol, "preprocess_resnet50_ensemble",
-1, FLAGS.verbose)
filenames = []
if os.path.isdir(FLAGS.image_filename):
filenames = [os.path.join(FLAGS.image_filename, f)
for f in os.listdir(FLAGS.image_filename)
if os.path.isfile(os.path.join(FLAGS.image_filename, f))]
else:
filenames = [FLAGS.image_filename,]
filenames.sort()
# Set batch size to the smaller value of image size and max batch size
if len(filenames) <= batch_size:
batch_size = len(filenames)
else:
print("The number of images exceeds maximum batch size," \
"only the first {} images, sorted by name alphabetically," \
" will be processed".format(batch_size))
# Preprocess the images into input data according to model
# requirements
image_data = []
for idx in range(batch_size):
with open(filenames[idx], "rb") as fd:
image_data.append(np.array([fd.read()], dtype=bytes))
# Send requests of batch_size images.
input_filenames = []
input_batch = []
for idx in range(batch_size):
input_filenames.append(filenames[idx])
input_batch.append(image_data[idx])
# Send request
result = ctx.run(
{ input_name : input_batch },
{ output_name : (InferContext.ResultFormat.CLASS, FLAGS.classes) },
batch_size)
postprocess(result, input_filenames, batch_size)
| 41.888889 | 94 | 0.680869 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,703 | 0.44811 |
0f9f167571a194f1bc7acb964cd47daad5d38e0c | 146 | py | Python | mamonsu/plugins/system/linux/__init__.py | sgrinko/mamonsu | 03b8e1217b08e6580e08129fddd64dbc20265fd4 | [
"BSD-3-Clause"
] | 188 | 2016-01-31T09:05:59.000Z | 2022-03-22T16:49:12.000Z | mamonsu/plugins/system/linux/__init__.py | sgrinko/mamonsu | 03b8e1217b08e6580e08129fddd64dbc20265fd4 | [
"BSD-3-Clause"
] | 162 | 2016-02-02T13:49:14.000Z | 2022-02-22T08:45:42.000Z | mamonsu/plugins/system/linux/__init__.py | sgrinko/mamonsu | 03b8e1217b08e6580e08129fddd64dbc20265fd4 | [
"BSD-3-Clause"
] | 46 | 2016-01-31T21:23:37.000Z | 2022-02-07T10:59:54.000Z | __all__ = [
'proc_stat', 'disk_stats', 'disk_sizes',
'memory', 'uptime', 'open_files', 'net', 'la'
,'pg_probackup'
]
from . import *
| 18.25 | 49 | 0.582192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.589041 |
0f9f1cf0e2224146473d97b789d2a857c46d01b2 | 164 | py | Python | config.py | stultus/Flask-wado | 997c4b54640be121e9436950c053df60054b2c13 | [
"Apache-2.0"
] | 3 | 2015-07-09T18:49:20.000Z | 2021-04-19T03:20:12.000Z | config.py | stultus/Flask-wado | 997c4b54640be121e9436950c053df60054b2c13 | [
"Apache-2.0"
] | 1 | 2016-07-20T23:19:03.000Z | 2016-07-20T23:19:03.000Z | config.py | stultus/Flask-wado | 997c4b54640be121e9436950c053df60054b2c13 | [
"Apache-2.0"
] | 4 | 2015-05-13T07:26:57.000Z | 2022-01-25T23:19:00.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
basedir = os.path.abspath(os.path.dirname(__file__))
CSRF_ENABLED = True
SECRET_KEY='parayan-manasilla'
| 16.4 | 52 | 0.707317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.390244 |
0f9ff91b0ba1a7d133ebeb2192bf15298558d0cf | 14,797 | py | Python | piper/mayapy/pipernode.py | MongoWobbler/piper | 8e5185a34962096155dcce65da641feac8ca15e1 | [
"MIT"
] | null | null | null | piper/mayapy/pipernode.py | MongoWobbler/piper | 8e5185a34962096155dcce65da641feac8ca15e1 | [
"MIT"
] | 2 | 2021-03-22T02:25:56.000Z | 2022-03-20T16:16:24.000Z | piper/mayapy/pipernode.py | MongoWobbler/piper | 8e5185a34962096155dcce65da641feac8ca15e1 | [
"MIT"
] | 1 | 2021-04-20T07:36:30.000Z | 2021-04-20T07:36:30.000Z | # Copyright (c) 2021 Christian Corsica. All Rights Reserved.
import pymel.core as pm
import piper_config as pcfg
import piper.mayapy.util as myu
import piper.mayapy.convert as convert
import piper.mayapy.attribute as attribute
from .rig import curve # must do relative import in python 2
def get(node_type, ignore=None, search=True):
"""
Gets the selected given node type or all the given node types in the scene if none selected.
Args:
node_type (string): Type of node to get.
ignore (string): If given and piper node is a child of given ignore type, do not return the piper node.
search (boolean): If True, and nothing is selected, will attempt to search the scene for all of the given type.
Returns:
(list) All nodes of the given node type.
"""
piper_nodes = []
selected = pm.selected()
if selected:
# get only the piper nodes from selection
piper_nodes = pm.ls(selected, type=node_type)
# traverse hierarchy for piper nodes
if not piper_nodes:
piper_nodes = set()
for node in selected:
first_type_parent = myu.getFirstTypeParent(node, node_type)
piper_nodes.add(first_type_parent) if first_type_parent else None
# search the whole scene for the piper node
elif search:
piper_nodes = pm.ls(type=node_type)
# don't include any nodes that are a child of the given ignore type
if ignore:
piper_nodes = [node for node in piper_nodes if not myu.getFirstTypeParent(node, ignore)]
return piper_nodes
def multiply(transform, main_term=None, weight=None, inputs=None):
"""
Creates the multiply node and hooks up all the given given inputs to the given transform's scale.
Args:
transform (pm.nodetypes.Transform): Node to hook multiply onto its scale.
main_term (pm.general.Attribute): Attribute to connect onto the multiply main_term.
weight (pm.general.Attribute): Attribute to connect onto the multiply weight.
inputs (list): Attributes to connect to the input plug of the multiply node.
Returns:
(pm.nodetypes.piperMultiply): Multiply node created.
"""
multiply_node = pm.createNode('piperMultiply', n=transform.name(stripNamespace=True) + '_scaleMultiply')
multiply_node.output >> transform.scale
if main_term:
main_term >> multiply_node.mainTerm
if weight:
weight >> multiply_node.weight
if not inputs:
return multiply_node
[attr >> multiply_node.input[i] for i, attr in enumerate(inputs)]
return multiply_node
def divide(dividend=1.0, divisor=1.0, result_input=None):
"""
Creates a node that divides the given dividend by the given divisor.
Args:
dividend (pm.general.Attribute or float): Number that will be divided.
divisor (pm.general.Attribute or float): Number that will perform the division.
result_input (pm.general.Attribute): Attribute to plug in division output into.
Returns:
(pm.nodetypes.piperSafeDivide): Division node created.
"""
divide_node = pm.createNode('piperSafeDivide')
if isinstance(dividend, pm.general.Attribute):
dividend_name = dividend.name().split(':')[-1].replace('.', '_')
dividend >> divide_node.input1
else:
dividend_name = str(dividend)
divide_node.input1.set(dividend)
if isinstance(divisor, pm.general.Attribute):
divisor_name = divisor.name().split(':')[-1].replace('.', '_')
divisor >> divide_node.input2
else:
divisor_name = str(divisor)
divide_node.input2.set(divisor)
if result_input:
divide_node.output >> result_input
divide_node.rename(dividend_name + '_DIV_' + divisor_name)
return divide_node
def inputOutput(node_type, source=None, output=None):
"""
Creates a node that has an input and output attribute based on given node type.
Args:
node_type (string): Type of node to create.
source (pm.general.Attribute): Attribute to plug into node's input.
output (pm.general.Attribute): Attribute to plug node's output into.
Returns:
(pm.nodetypes.DependNode): Node created.
"""
name = source.node().name(stripNamespace=True) + '_' if source else ''
suffix = node_type.split('piper')[-1]
node = pm.createNode(node_type, name=name + suffix)
if source:
source >> node.input
if output:
node.output >> output
return node
def oneMinus(source=None, output=None):
"""
Creates a one minus node that turns a 0 to 1 range into a 1 to 0 or vice versa.
Args:
source (pm.general.Attribute): Attribute to plug into one minus input.
output (pm.general.Attribute): Attribute to plug one minus' output into.
Returns:
(pm.nodetypes.piperOneMinus): One minus node created.
"""
return inputOutput('piperOneMinus', source=source, output=output)
def reciprocal(source=None, output=None):
"""
Creates a node that takes in the given source attribute and output its reciprocal. Reciprocal == 1/X
Args:
source (pm.general.Attribute): Attribute to plug into reciprocal's input.
output (pm.general.Attribute): Attribute to plug reciprocal's output into.
Returns:
(pm.nodetypes.piperReciprocal): Reciprocal node created.
"""
return inputOutput('piperReciprocal', source=source, output=output)
def create(node_type, color=None, name=None, parent=None):
"""
Creates the given node type with the given color and given name/parent.
Args:
node_type (string): Node type to create.
color (string): Name of color to turn outliner text to. Currently supporting:
cyan, pink.
name (string): Name of node.
parent (PyNode or string): Parent of new node.
Returns:
(PyNode): Node created.
"""
name = name if name else node_type
piper_node = pm.createNode(node_type, name=name, parent=parent, skipSelect=True)
rgb = convert.colorToRGB(color)
if rgb:
piper_node.useOutlinerColor.set(True)
piper_node.outlinerColor.set(rgb)
return piper_node
def createShaped(node_type, name=None, control_shape=curve.circle):
"""
Creates piper IK transform with given control shape curve
Args:
node_type (string): Name for the type of node to create.
name (string): Name to give the transform node.
control_shape (method): Method that generates nurbs curve the transform will use.
Returns:
(PyNode): Transform node created with control shape curves as child(ren).
"""
transform = create(node_type, name=name)
transform._.lock()
ctrl = control_shape()
curves = ctrl.getChildren(type='nurbsCurve')
pm.parent(curves, transform, shape=True, add=True)
pm.delete(ctrl)
return transform
def createFK(name=None, control_shape=curve.circle):
"""
Creates piper FK transform with given control shape curve
Args:
name (string): Name for the piper IK nodes.
control_shape (method): Method that generates nurbs curve that Piper FK transform will use.
Returns:
(pm.nodetypes.piperFK): Piper FK node created.
"""
return createShaped('piperFK', name, control_shape)
def createIK(name=None, control_shape=curve.circle):
"""
Creates piper IK transform with given control shape curve
Args:
name (string): Name for the piper IK nodes.
control_shape (method): Method that generates nurbs curve that Piper IK transform will use.
Returns:
(pm.nodetypes.piperIK): Piper IK node created.
"""
return createShaped('piperIK', name, control_shape)
def createOrientMatrix(position, orientation, name=None):
"""
Creates a piper orient matrix node that keeps given position matrix, but maintains given orientation matrix.
Args:
position (pm.general.Attribute or pm.dt.Matrix): position to plug into orient matrix position attribute.
orientation (pm.general.Attribute or pm.dt.Matrix): orientation to plug into orient matrix orient attribute.
name (string): Name to give piper orient matrix node.
Returns:
(pm.nodetypes.piperOrientMatrix): Piper Orient Matrix node created.
"""
if not name:
name = 'orientMatrix'
node = pm.createNode('piperOrientMatrix', name=name)
if isinstance(position, pm.general.Attribute):
position >> node.positionMatrix
elif isinstance(position, pm.dt.Matrix):
node.positionMatrix.set(position)
if isinstance(orientation, pm.general.Attribute):
orientation >> node.orientMatrix
elif isinstance(orientation, pm.dt.Matrix):
node.orientMatrix.set(orientation)
return node
def createSwingTwist(driver, target, axis='y', swing=0, twist=1):
"""
Creates the swing twist node with given axis, swing, and twist attributes.
Args:
driver (pm.nodetypes.Transform): Node that will drive given target. Must have BIND used as rest matrix.
target (pm.nodetypes.Transform): Node that will be driven with twist/swing through offsetParentMatrix.
axis (string): Axis in which node will output twist.
swing (float): Weight of swing rotation.
twist (float): Weight of twist rotation.
Returns:
(pm.nodetypes.swingTwist): Swing Twist node created.
"""
name = target.name(stripNamespace=True) + '_ST'
swing_twist = pm.createNode('swingTwist', n=name)
axis_index = convert.axisToIndex(axis)
swing_twist.twistAxis.set(axis_index)
swing_twist.swing.set(swing)
swing_twist.twist.set(twist)
driver_bind = convert.toBind(driver, fail_display=pm.error)
driver.matrix >> swing_twist.driverMatrix
driver_bind.matrix >> swing_twist.driverRestMatrix
offset_driver = swing_twist.outMatrix
node_plug = attribute.getSourcePlug(target.offsetParentMatrix)
if node_plug:
mult_matrix = pm.createNode('multMatrix', n=name + '_MM')
swing_twist.outMatrix >> mult_matrix.matrixIn[0]
node_plug >> mult_matrix.matrixIn[1]
offset_driver = mult_matrix.matrixSum
offset_driver >> target.offsetParentMatrix
return swing_twist
def createMesh():
"""
Creates a piper mesh group(s) based on whether user has selection, shift held, and scene saved.
Returns:
(pm.nt.piperMesh or list): Usually PyNode created. If Shift held, will return list or all piperMesh(es) created.
"""
selected = pm.selected()
scene_name = pm.sceneName().namebase
if selected:
# if shift held, create a a piper mesh for each selected object.
if myu.isShiftHeld():
piper_meshes = []
for node in selected:
parent = node.getParent()
name = pcfg.mesh_prefix + node.nodeName()
piper_mesh = create('piperMesh', 'cyan', name=name, parent=parent)
pm.parent(node, piper_mesh)
piper_meshes.append(piper_mesh)
return piper_meshes
else:
# If user selected stuff that is not a mesh, warn the user.
non_mesh_transforms = [node for node in selected if not node.getShapes()]
if non_mesh_transforms:
pm.warning('The following are not meshes! \n' + '\n'.join(non_mesh_transforms))
# Get the parent roots and parent them under the piper mesh node to not mess up any hierarchies.
name = pcfg.mesh_prefix
name += scene_name if scene_name else selected[-1].nodeName()
piper_mesh = create('piperMesh', 'cyan', name=name)
parents = myu.getRootParents(selected)
pm.parent(parents, piper_mesh)
return piper_mesh
name = '' if scene_name.startswith(pcfg.mesh_prefix) else pcfg.mesh_prefix
name += scene_name if scene_name else 'piperMesh'
piper_mesh = create('piperMesh', 'cyan', name=name)
meshes = pm.ls(type='mesh')
parents = myu.getRootParents(meshes)
pm.parent(parents, piper_mesh)
return piper_mesh
def createSkinnedMesh():
"""
Creates a skinned mesh node for each root joint found in the skin clusters
Returns:
(list): PyNodes of nodes created.
"""
selected = pm.selected()
scene_name = pm.sceneName().namebase
if selected:
skin_clusters = set()
skin_clusters.update(set(pm.listConnections(selected, type='skinCluster')))
skin_clusters.update(set(pm.listHistory(selected, type='skinCluster')))
else:
skin_clusters = pm.ls(type='skinCluster')
if not skin_clusters:
pm.warning('No skin clusters found!')
piper_skinned_mesh = create('piperSkinnedMesh', 'pink', name=pcfg.skinned_mesh_prefix + 'piperSkinnedMesh')
return [piper_skinned_mesh]
piper_skinned_meshes = []
skinned_meshes = myu.getSkinnedMeshes(skin_clusters)
for root_joint, geometry in skinned_meshes.items():
name = '' if scene_name.startswith(pcfg.skinned_mesh_prefix) else pcfg.skinned_mesh_prefix
name += scene_name if scene_name else next(iter(geometry)).nodeName()
piper_skinned_mesh = create('piperSkinnedMesh', 'pink', name=name)
piper_skinned_meshes.append(piper_skinned_mesh)
geometry_parents = myu.getRootParents(geometry)
pm.parent(root_joint, geometry_parents, piper_skinned_mesh)
return piper_skinned_meshes
def createRig(name=''):
"""
Creates the node that houses all rig nodes.
Args:
name (string): If given, will use the given name as the name for the rig node.
Returns:
(pm.nodetypes.piperRig): Rig node created.
"""
name = name if name else 'piperRig'
piper_rig = create('piperRig', 'burnt orange', name=name)
piper_rig.addAttr(pcfg.message_root_control, at='message')
piper_rig._.lock()
attribute.nonKeyable(piper_rig.highPolyVisibility)
attribute.lockAndHideCompound(piper_rig)
attribute.addSeparator(piper_rig)
return piper_rig
def createAnimation():
"""
Creates the node that houses a rig. Used to export animation.
Returns:
(pm.nodetypes.piperAnimation): Animation node created.
"""
scene_name = pm.sceneName().namebase
name = scene_name if scene_name else 'piperAnimation'
piper_animation = create('piperAnimation', 'dark green', name=pcfg.animation_prefix + name)
attribute.lockAndHideCompound(piper_animation)
rigs = get('piperRig', ignore='piperAnimation')
pm.parent(rigs[0], piper_animation) if len(rigs) == 1 else pm.warning('{} rigs found!'.format(str(len(rigs))))
return piper_animation
| 33.029018 | 120 | 0.678246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,880 | 0.464959 |
0fa01c0382cec9db13e3b8d1049768cbc564bd7e | 9,449 | py | Python | ie_serving/server/rest_service.py | Doken-Tokuyama/OpenVINO-model-server | 06cdd2785fdf68cb089a8fb98840977eef0c77c2 | [
"Apache-2.0"
] | 1 | 2019-08-31T04:02:04.000Z | 2019-08-31T04:02:04.000Z | ie_serving/server/rest_service.py | Doken-Tokuyama/OpenVINO-model-server | 06cdd2785fdf68cb089a8fb98840977eef0c77c2 | [
"Apache-2.0"
] | null | null | null | ie_serving/server/rest_service.py | Doken-Tokuyama/OpenVINO-model-server | 06cdd2785fdf68cb089a8fb98840977eef0c77c2 | [
"Apache-2.0"
] | null | null | null | import datetime
import json
import falcon
from google.protobuf.json_format import MessageToJson
from tensorflow_serving.apis import get_model_metadata_pb2, \
get_model_status_pb2
from ie_serving.logger import get_logger
from ie_serving.server.constants import WRONG_MODEL_SPEC, INVALID_FORMAT, \
OUTPUT_REPRESENTATION
from ie_serving.server.get_model_metadata_utils import \
prepare_get_metadata_output
from ie_serving.server.predict_utils import prepare_input_data
from ie_serving.server.rest_msg_processing import preprocess_json_request, \
prepare_json_response
from ie_serving.server.rest_msg_validation import get_input_format
from ie_serving.server.service_utils import \
check_availability_of_requested_model, \
check_availability_of_requested_status, add_status_to_response
logger = get_logger(__name__)
class GetModelStatus(object):
def __init__(self, models):
self.models = models
def on_get(self, req, resp, model_name, requested_version=0):
logger.debug("MODEL_STATUS, get request: {}, {}"
.format(model_name, requested_version))
valid_model_status = check_availability_of_requested_status(
models=self.models, requested_version=requested_version,
model_name=model_name)
if not valid_model_status:
resp.status = falcon.HTTP_NOT_FOUND
logger.debug("MODEL_STATUS, invalid model spec from request")
err_out_json = {
'error': WRONG_MODEL_SPEC.format(model_name,
requested_version)
}
resp.body = json.dumps(err_out_json)
return
requested_version = int(requested_version)
response = get_model_status_pb2.GetModelStatusResponse()
if requested_version:
version_status = self.models[model_name].versions_statuses[
requested_version]
add_status_to_response(version_status, response)
else:
for version_status in self.models[model_name].versions_statuses. \
values():
add_status_to_response(version_status, response)
logger.debug("MODEL_STATUS created a response for {} - {}"
.format(model_name, requested_version))
resp.status = falcon.HTTP_200
resp.body = MessageToJson(response,
including_default_value_fields=True)
class GetModelMetadata(object):
def __init__(self, models):
self.models = models
def on_get(self, req, resp, model_name, requested_version=0):
logger.debug("MODEL_METADATA, get request: {}, {}"
.format(model_name, requested_version))
valid_model_spec, version = check_availability_of_requested_model(
models=self.models, requested_version=requested_version,
model_name=model_name)
if not valid_model_spec:
resp.status = falcon.HTTP_NOT_FOUND
logger.debug("MODEL_METADATA, invalid model spec from request")
err_out_json = {
'error': WRONG_MODEL_SPEC.format(model_name,
requested_version)
}
resp.body = json.dumps(err_out_json)
return
self.models[model_name].engines[version].in_use.acquire()
inputs = self.models[model_name].engines[version].input_tensors
outputs = self.models[model_name].engines[version].output_tensors
signature_def = prepare_get_metadata_output(inputs=inputs,
outputs=outputs,
model_keys=self.models
[model_name].
engines[version].
model_keys)
response = get_model_metadata_pb2.GetModelMetadataResponse()
model_data_map = get_model_metadata_pb2.SignatureDefMap()
model_data_map.signature_def['serving_default'].CopyFrom(
signature_def)
response.metadata['signature_def'].Pack(model_data_map)
response.model_spec.name = model_name
response.model_spec.version.value = version
logger.debug("MODEL_METADATA created a response for {} - {}"
.format(model_name, version))
self.models[model_name].engines[version].in_use.release()
resp.status = falcon.HTTP_200
resp.body = MessageToJson(response)
class Predict():
def __init__(self, models):
self.models = models
def on_post(self, req, resp, model_name, requested_version=0):
valid_model_spec, version = check_availability_of_requested_model(
models=self.models, requested_version=requested_version,
model_name=model_name)
if not valid_model_spec:
resp.status = falcon.HTTP_NOT_FOUND
logger.debug("PREDICT, invalid model spec from request, "
"{} - {}".format(model_name, requested_version))
err_out_json = {
'error': WRONG_MODEL_SPEC.format(model_name,
requested_version)
}
resp.body = json.dumps(err_out_json)
return
body = req.media
if type(body) is not dict:
resp.status = falcon.HTTP_400
resp.body = json.dumps({'error': 'Invalid JSON in request body'})
return
input_format = get_input_format(body, self.models[
model_name].engines[version].input_key_names)
if input_format == INVALID_FORMAT:
resp.status = falcon.HTTP_400
resp.body = json.dumps({'error': 'Invalid inputs in request '
'body'})
return
inputs = preprocess_json_request(body, input_format, self.models[
model_name].engines[version].input_key_names)
start_time = datetime.datetime.now()
occurred_problem, inference_input, batch_size, code = \
prepare_input_data(models=self.models, model_name=model_name,
version=version, data=inputs, rest=True)
deserialization_end_time = datetime.datetime.now()
duration = \
(deserialization_end_time - start_time).total_seconds() * 1000
logger.debug("PREDICT; input deserialization completed; {}; {}; {}ms"
.format(model_name, version, duration))
if occurred_problem:
resp.status = code
err_out_json = {'error': inference_input}
logger.debug("PREDICT, problem with input data. Exit code {}"
.format(code))
resp.body = json.dumps(err_out_json)
return
self.models[model_name].engines[version].in_use.acquire()
inference_start_time = datetime.datetime.now()
try:
inference_output = self.models[model_name].engines[version] \
.infer(inference_input, batch_size)
except ValueError as error:
resp.status = falcon.HTTP_400
err_out_json = {'error': 'Malformed input data'}
logger.debug("PREDICT, problem with inference. "
"Corrupted input: {}".format(error))
self.models[model_name].engines[version].in_use.release()
resp.body = json.dumps(err_out_json)
return
inference_end_time = datetime.datetime.now()
self.models[model_name].engines[version].in_use.release()
duration = \
(inference_end_time - inference_start_time).total_seconds() * 1000
logger.debug("PREDICT; inference execution completed; {}; {}; {}ms"
.format(model_name, version, duration))
for key, value in inference_output.items():
inference_output[key] = value.tolist()
response = prepare_json_response(
OUTPUT_REPRESENTATION[input_format], inference_output,
self.models[model_name].engines[version].model_keys['outputs'])
resp.status = falcon.HTTP_200
resp.body = json.dumps(response)
serialization_end_time = datetime.datetime.now()
duration = \
(serialization_end_time -
inference_end_time).total_seconds() * 1000
logger.debug("PREDICT; inference results serialization completed;"
" {}; {}; {}ms".format(model_name, version, duration))
return
def create_rest_api(models):
app = falcon.API()
get_model_status = GetModelStatus(models)
get_model_meta = GetModelMetadata(models)
predict = Predict(models)
app.add_route('/v1/models/{model_name}', get_model_status)
app.add_route('/v1/models/{model_name}/'
'versions/{requested_version}',
get_model_status)
app.add_route('/v1/models/{model_name}/metadata', get_model_meta)
app.add_route('/v1/models/{model_name}/'
'versions/{requested_version}/metadata',
get_model_meta)
app.add_route('/v1/models/{model_name}:predict', predict)
app.add_route('/v1/models/{model_name}/versions/'
'{requested_version}:predict',
predict)
return app
| 42.755656 | 78 | 0.620489 | 7,813 | 0.82686 | 0 | 0 | 0 | 0 | 0 | 0 | 1,048 | 0.110911 |
0fa091d86b1a73176978c43ffd49893026ee0d83 | 11,373 | py | Python | spinup/utils/hyper_search.py | yjl450/spinningup-drl-prototyping | 88bbbca2ce48fcb879a6592f5bee1347ef52cf30 | [
"MIT"
] | 9 | 2019-11-07T07:05:25.000Z | 2021-08-23T09:33:28.000Z | spinup/utils/hyper_search.py | yjl450/spinningup-drl-prototyping | 88bbbca2ce48fcb879a6592f5bee1347ef52cf30 | [
"MIT"
] | null | null | null | spinup/utils/hyper_search.py | yjl450/spinningup-drl-prototyping | 88bbbca2ce48fcb879a6592f5bee1347ef52cf30 | [
"MIT"
] | 3 | 2020-06-25T16:08:30.000Z | 2020-12-18T16:59:32.000Z | import pandas as pd
import json
import os
import os.path as osp
import numpy as np
"""
python -m spinup.run hyper_search <files> -ae <start from which epoch>
make a file that can order the experiments in terms of their performance
use this to easily find good hyperparameters when doing hyperparameter search
upload this file when it's ready don't use it again lol
"""
DIV_LINE_WIDTH = 50
# Global vars for tracking and labeling data at load time.
exp_idx = 0
units = dict()
def compute_hyper(data, xaxis='Epoch', value="AverageEpRet", condition="Condition1", smooth=1, no_legend=False,
legend_loc='best', color=None, linestyle=None, font_scale=1.5,
label_font_size=24, xlabel=None, ylabel=None, after_epoch=0, no_order=False,
**kwargs):
if smooth > 1:
"""
smooth data with moving window average.
that is,
smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k])
where the "smooth" param is width of that window (2k+1)
"""
y = np.ones(smooth)
for datum in data:
x = np.asarray(datum[value])
z = np.ones(len(x))
smoothed_x = np.convolve(x, y, 'same') / np.convolve(z, y, 'same')
datum[value] = smoothed_x
if isinstance(data, list):
data = pd.concat(data, ignore_index=True)
# print("columns", data.columns)
unique_names = data[condition].unique() ## these are the experiment names
n_settings = len(unique_names)
score_list = np.zeros(n_settings)
std_list = np.zeros(n_settings)
print(score_list)
for i in range(n_settings):
un = unique_names[i]
print("\nunique name: ",un)
exp_data = data.loc[data[condition] == un] ## the data related to this experiment
# average_test_epret = exp_data['AverageTestEpRet'].values
# print(average_test_epret.shape)
# final performance data only concern the last few epoches
final_performance_data = exp_data.loc[exp_data['Epoch'] >= after_epoch]
average_test_epret_final = final_performance_data['AverageTestEpRet'].values
mean_score = average_test_epret_final.mean()
std_score = average_test_epret_final.std()
score_list[i] = mean_score
std_list[i] = std_score
epoch_reached = final_performance_data['Epoch'].max()
if np.isnan(mean_score):
print('n/a')
else:
print('total epoch: %d, score: %.2f' % (epoch_reached,mean_score))
"""
here we want to give an ordering of the hyper-settings, so that we can know
which ones are good hyper-parameters
"""
sorted_index =np.flip(np.argsort(score_list))
if no_order:
sorted_index = np.arange(len(sorted_index))
for i in range(n_settings):
setting_index = sorted_index[i]
print('%s\t%.1f\t%.1f' % (unique_names[setting_index], score_list[setting_index], std_list[setting_index]))
def get_datasets(logdir, condition=None):
"""
Recursively look through logdir for output files produced by
spinup.logx.Logger.
Assumes that any file "progress.txt" is a valid hit.
"""
global exp_idx
global units
datasets = []
for root, _, files in os.walk(logdir):
if 'progress.txt' in files:
exp_name = None
try:
config_path = open(os.path.join(root, 'config.json'))
config = json.load(config_path)
if 'exp_name' in config:
exp_name = config['exp_name']
except:
print('No file named config.json')
condition1 = condition or exp_name or 'exp'
condition2 = condition1 + '-' + str(exp_idx)
exp_idx += 1
if condition1 not in units:
units[condition1] = 0
unit = units[condition1]
units[condition1] += 1
try:
exp_data = pd.read_table(os.path.join(root, 'progress.txt'))
performance = 'AverageTestEpRet' if 'AverageTestEpRet' in exp_data else 'AverageEpRet'
exp_data.insert(len(exp_data.columns), 'Unit', unit)
exp_data.insert(len(exp_data.columns), 'Condition1', condition1)
exp_data.insert(len(exp_data.columns), 'Condition2', condition2)
exp_data.insert(len(exp_data.columns), 'Performance', exp_data[performance])
datasets.append(exp_data)
except Exception as e:
print(e)
return datasets
def get_all_datasets(all_logdirs, legend=None, select=None, exclude=None):
"""
For every entry in all_logdirs,
1) check if the entry is a real directory and if it is,
pull data from it;
2) if not, check to see if the entry is a prefix for a
real directory, and pull data from that.
"""
logdirs = []
for logdir in all_logdirs:
if osp.isdir(logdir) and logdir[-1] == '/':
logdirs += [logdir]
else:
basedir = osp.dirname(logdir)
fulldir = lambda x: osp.join(basedir, x)
prefix = logdir.split('/')[-1]
listdir = os.listdir(basedir)
logdirs += sorted([fulldir(x) for x in listdir if prefix in x])
"""
Enforce selection rules, which check logdirs for certain substrings.
Makes it easier to look at graphs from particular ablations, if you
launch many jobs at once with similar names.
"""
if select is not None:
logdirs = [log for log in logdirs if all(x in log for x in select)]
if exclude is not None:
logdirs = [log for log in logdirs if all(not (x in log) for x in exclude)]
# Verify logdirs
print('Plotting from...\n' + '=' * DIV_LINE_WIDTH + '\n')
for logdir in logdirs:
print(logdir)
print('\n' + '=' * DIV_LINE_WIDTH)
# Make sure the legend is compatible with the logdirs
assert not (legend) or (len(legend) == len(logdirs)), \
"Must give a legend title for each set of experiments."
# Load data from logdirs
data = []
if legend:
for log, leg in zip(logdirs, legend):
data += get_datasets(log, leg)
else:
for log in logdirs:
data += get_datasets(log)
return data
def compare_performance(all_logdirs, legend=None, xaxis=None, values=None, count=False,
font_scale=1.5, smooth=1, select=None, exclude=None, estimator='mean', no_legend=False,
legend_loc='best', after_epoch=0,
save_name=None, xlimit=-1, color=None, linestyle=None, label_font_size=24,
xlabel=None, ylabel=None,
no_order=False):
data = get_all_datasets(all_logdirs, legend, select, exclude)
values = values if isinstance(values, list) else [values]
condition = 'Condition2' if count else 'Condition1'
estimator = getattr(np, estimator) # choose what to show on main curve: mean? max? min?
for value in values:
compute_hyper(data, xaxis=xaxis, value=value, condition=condition, smooth=smooth, no_legend=no_legend,
legend_loc=legend_loc,
estimator=estimator, color=color, linestyle=linestyle, font_scale=font_scale,
label_font_size=label_font_size,
xlabel=xlabel, ylabel=ylabel, after_epoch=after_epoch, no_order=no_order)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('logdir', nargs='*')
parser.add_argument('--legend', '-l', nargs='*')
parser.add_argument('--xaxis', '-x', default='TotalEnvInteracts')
parser.add_argument('--value', '-y', default='Performance', nargs='*')
parser.add_argument('--count', action='store_true')
parser.add_argument('--smooth', '-s', type=int, default=1)
parser.add_argument('--select', nargs='*')
parser.add_argument('--exclude', nargs='*')
parser.add_argument('--est', default='mean')
parser.add_argument('--after-epoch', '-ae', type=int, default=0)
parser.add_argument('-no', '--no-order', action='store_true')
args = parser.parse_args()
"""
Args:
logdir (strings): As many log directories (or prefixes to log
directories, which the plotter will autocomplete internally) as
you'd like to plot from.
legend (strings): Optional way to specify legend for the plot. The
plotter legend will automatically use the ``exp_name`` from the
config.json file, unless you tell it otherwise through this flag.
This only works if you provide a name for each directory that
will get plotted. (Note: this may not be the same as the number
of logdir args you provide! Recall that the plotter looks for
autocompletes of the logdir args: there may be more than one
match for a given logdir prefix, and you will need to provide a
legend string for each one of those matches---unless you have
removed some of them as candidates via selection or exclusion
rules (below).)
xaxis (string): Pick what column from data is used for the x-axis.
Defaults to ``TotalEnvInteracts``.
value (strings): Pick what columns from data to graph on the y-axis.
Submitting multiple values will produce multiple graphs. Defaults
to ``Performance``, which is not an actual output of any algorithm.
Instead, ``Performance`` refers to either ``AverageEpRet``, the
correct performance measure for the on-policy algorithms, or
``AverageTestEpRet``, the correct performance measure for the
off-policy algorithms. The plotter will automatically figure out
which of ``AverageEpRet`` or ``AverageTestEpRet`` to report for
each separate logdir.
count: Optional flag. By default, the plotter shows y-values which
are averaged across all results that share an ``exp_name``,
which is typically a set of identical experiments that only vary
in random seed. But if you'd like to see all of those curves
separately, use the ``--count`` flag.
smooth (int): Smooth data by averaging it over a fixed window. This
parameter says how wide the averaging window will be.
select (strings): Optional selection rule: the plotter will only show
curves from logdirs that contain all of these substrings.
exclude (strings): Optional exclusion rule: plotter will only show
curves from logdirs that do not contain these substrings.
after-epoch: if > 0 then when computing an algorithm's "score",
we will use the average of test returns after a certain epoch number
no-order: have this option so it doesn't print setting names in order of performance
"""
compare_performance(args.logdir, args.legend, args.xaxis, args.value, args.count,
smooth=args.smooth, select=args.select, exclude=args.exclude,
estimator=args.est, after_epoch=args.after_epoch, no_order=args.no_order)
if __name__ == "__main__":
main()
| 43.408397 | 115 | 0.627979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,227 | 0.459597 |
0fa16ef2816f764151079f54a639f1a1c4d2374a | 262 | py | Python | piecrust/wsgiutil/__init__.py | airbornemint/PieCrust2 | bd8e44a1a3ba646a9ebfbb4d4f1fa01a1daa3beb | [
"Apache-2.0"
] | null | null | null | piecrust/wsgiutil/__init__.py | airbornemint/PieCrust2 | bd8e44a1a3ba646a9ebfbb4d4f1fa01a1daa3beb | [
"Apache-2.0"
] | null | null | null | piecrust/wsgiutil/__init__.py | airbornemint/PieCrust2 | bd8e44a1a3ba646a9ebfbb4d4f1fa01a1daa3beb | [
"Apache-2.0"
] | null | null | null | from piecrust.serving.server import WsgiServer
def get_app(root_dir, cache_key='prod', enable_debug_info=False):
app = WsgiServer(root_dir,
cache_key=cache_key,
enable_debug_info=enable_debug_info)
return app
| 26.2 | 65 | 0.671756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.022901 |
0fa1f574a4f8141432aee6c23b38e68e734cc0ee | 98 | py | Python | Main.py | mriduldhall/tic-tac-toe | 9487949edf84fddcdde09bdb8cb04dbfdc5f8faf | [
"MIT"
] | null | null | null | Main.py | mriduldhall/tic-tac-toe | 9487949edf84fddcdde09bdb8cb04dbfdc5f8faf | [
"MIT"
] | null | null | null | Main.py | mriduldhall/tic-tac-toe | 9487949edf84fddcdde09bdb8cb04dbfdc5f8faf | [
"MIT"
] | null | null | null | from Interfaces.CommandLineInterface import CLI
if __name__ == '__main__':
CLI().initiate()
| 16.333333 | 47 | 0.734694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.102041 |
0fa25074fb0a3f8683d33c168968750fc928e7ea | 1,198 | py | Python | tests/data/view.py | Forumouth/flask-simple | 0764559fbbc4348cc146aa4dddbc1f90d91bc840 | [
"MIT",
"Unlicense"
] | null | null | null | tests/data/view.py | Forumouth/flask-simple | 0764559fbbc4348cc146aa4dddbc1f90d91bc840 | [
"MIT",
"Unlicense"
] | 5 | 2016-01-30T13:32:23.000Z | 2016-02-06T13:34:11.000Z | tests/data/view.py | Forumouth/flask-simple | 0764559fbbc4348cc146aa4dddbc1f90d91bc840 | [
"MIT",
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
from flask.ext.simple import FlaskEasyView
from .model import ExampleModel
from .form import generate_example_form
from .testapp import app
class SimpleView(FlaskEasyView):
'''
Simple Example View for test
'''
trailing_slash = False
model = ExampleModel
form = generate_example_form()
list_name = "simple"
class SimpleValidationFailureView(SimpleView):
'''
Simple Example view, but failures validation
'''
route_base = "/invalid"
expected_err = {"error": ["Something happend"]}
form = generate_example_form(False, expected_err)
list_name = "simple_validation_failure"
class SimpleTemplateView(SimpleView):
'''
Simple Example View with render_template
'''
route_base = "/templated"
template = "test.jinja"
list_name = "simple_templated"
class WithoutFormView(FlaskEasyView):
'''
Simple Example View without form
'''
route_base = "/noform"
trailing_slash = False
model = ExampleModel
list_name = "simple_no_form"
SimpleView.register(app)
SimpleTemplateView.register(app)
WithoutFormView.register(app)
SimpleValidationFailureView.register(app)
| 22.185185 | 53 | 0.716194 | 875 | 0.730384 | 0 | 0 | 0 | 0 | 0 | 0 | 381 | 0.31803 |
0fa2c01a7139bb57740679564f53ec3e25788f1f | 3,011 | py | Python | tests/job_concurrency1_test.py | moellep/sirepo | 4fd8b88b3b95921d50e6b225c02a46c00da16a27 | [
"Apache-2.0"
] | null | null | null | tests/job_concurrency1_test.py | moellep/sirepo | 4fd8b88b3b95921d50e6b225c02a46c00da16a27 | [
"Apache-2.0"
] | null | null | null | tests/job_concurrency1_test.py | moellep/sirepo | 4fd8b88b3b95921d50e6b225c02a46c00da16a27 | [
"Apache-2.0"
] | 2 | 2020-10-27T20:01:23.000Z | 2020-11-06T23:35:05.000Z | # -*- coding: utf-8 -*-
u"""Concurrency testing
This test does not always fail when there is a problem (false
positive), because it depends on a specific sequence of events
that can't be controlled by the test.
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
_REPORT = 'heightWeightReport'
def test_myapp(fc):
"""https://github.com/radiasoft/sirepo/issues/2346"""
from pykern import pkunit
import time
import threading
from pykern.pkdebug import pkdlog
d = fc.sr_sim_data()
d.models.simulation.name = 'srunit_long_run'
def _t2():
pkdlog('start 2')
r2 = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=_REPORT,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
),
)
pkdlog(r2)
for _ in range(20):
pkunit.pkok(r2.state != 'error', 'unexpected error state: {}')
if r2.state == 'running':
break
if r2.state == 'canceled':
pkdlog('canceled')
break
time.sleep(.1)
pkdlog('runStatus 2')
r2 = fc.sr_post('runStatus', r2.nextRequest)
else:
pkunit.pkfail('runStatus: failed to start running: {}', r2)
pkdlog('start 1')
r1 = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=_REPORT,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
),
)
for _ in range(10):
pkdlog(r1)
pkunit.pkok(r1.state != 'error', 'unexpected error state: {}')
if r1.state == 'running':
break
time.sleep(.1)
r1 = fc.sr_post('runStatus', r1.nextRequest)
else:
pkunit.pkfail('runStatus: failed to start running: {}', r1)
t2 = threading.Thread(target=_t2)
t2.start()
time.sleep(.1)
pkdlog('runCancel')
c = fc.sr_post('runCancel', r1.nextRequest)
pkunit.pkeq('canceled', c.state)
pkdlog('start 3')
r1 = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=_REPORT,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
),
)
for _ in range(10):
pkunit.pkok(r1.state != 'error', 'unexpected error state: {}')
if r1.state == 'running':
break
time.sleep(.1)
r1 = fc.sr_post('runStatus', r1.nextRequest)
else:
pkunit.pkfail('runStatus: failed to start running: {}', r1)
c = fc.sr_post('runCancel', r1.nextRequest)
pkunit.pkeq('canceled', c.state)
| 29.23301 | 74 | 0.570242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 875 | 0.290601 |
0fa631a2baeb7f92993b24dab769fd0334e826c6 | 3,707 | py | Python | FF_HOI.py | hellocatfood/FontForge-Higher-Order-Interpolation | 56ecec8e14bd09490d31a8001482d972df5b5c76 | [
"Apache-2.0"
] | 14 | 2020-11-13T11:22:03.000Z | 2021-07-20T15:30:54.000Z | FF_HOI.py | hellocatfood/FontForge-Higher-Order-Interpolation | 56ecec8e14bd09490d31a8001482d972df5b5c76 | [
"Apache-2.0"
] | null | null | null | FF_HOI.py | hellocatfood/FontForge-Higher-Order-Interpolation | 56ecec8e14bd09490d31a8001482d972df5b5c76 | [
"Apache-2.0"
] | 1 | 2021-07-22T11:11:32.000Z | 2021-07-22T11:11:32.000Z | #!/usr/bin/env python3
import fontforge
import argparse
import itertools
import os
import shutil
from lib.util import *
# This handles the glyph's "HOIINFO" comment. Right now only deals with setting rbearing of each master.
from lib import HOIInfo
parser = argparse.ArgumentParser(description='Given a specially formatted input SFD, output UFO files suitable for non-linear OpenType Font Variations, buildable with Fontmake and TTX.')
parser.add_argument('SFD', type=str)
args = parser.parse_args()
eprint("Performing sanity check on font: {} …".format(args.SFD))
font = fontforge.open(args.SFD)
HOIdict = HOIInfo.from_comments(font)
# Sanity tests
if not has_layers(font):
exit("SFD does not have required layers; read documentation.")
assert all([not font.layers[l].is_quadratic for l in font.layers]), "Only cubic layers supported in this version"
for g in font.glyphs():
eprint("Checking sanity of glyph: {} …".format(g.glyphname))
for c in g.layers["HOI paths"]:
assert c[0].name.isdigit(), "HOI path not integer name"
assert coords(c[0]) in [coords(p) for p in c], "Misaligned HOI path"
eprint("Sane (🤞). Beginning UFO generation …")
eprint("Generating master: A")
def reset_ufo(which, font):
ufo = font.fullname+"-{}.ufo".format(which)
if os.path.exists(ufo) and os.path.isdir(ufo):
shutil.rmtree(ufo)
font.generate(ufo, flags=("opentype", "no-hints", "omit-instructions"))
font.close()
def del_unneeded_ufo_layers(font):
for l in font.layers:
if l != "Fore" and l != "Back":
del font.layers[l]
HOIInfo.set_HOI_rbearing(font, HOIdict, "c1")
del_unneeded_ufo_layers(font)
reset_ufo("A", font)
eprint("Generating master: B")
font = fontforge.open(args.SFD)
def make_master(g, d1, d2):
deltas = dict()
for c in g.layers["HOI paths"]:
dx = c[d1].x - c[d2].x
dy = c[d1].y - c[d2].y
deltas[int(c[0].name)] = {"dx": dx, "dy": dy}
newfore = fontforge.layer()
acc = 0
for i, c in enumerate(g.foreground):
for j, p in enumerate(c):
if acc in deltas:
p.x += deltas[acc]["dx"]
p.y += deltas[acc]["dy"]
acc+=1
newfore+=c
pts_s = list(itertools.chain.from_iterable(g.foreground))
pts_e = list(itertools.chain.from_iterable(g.layers["End state"]))
deltas2 = dict()
for i, pt in enumerate(pts_s):
if i in deltas: continue
lpt = pts_e[i]
if pt.x == lpt.x and pt.y == lpt.y: continue
if i-1 in deltas and pts_s[i-1].on_curve:
deltas2[i] = deltas[i-1]
elif i+1 in deltas and pts_s[i+1].on_curve:
deltas2[i] = deltas[i+1]
elif i == len(pts_s)-1 and 0 in deltas:
deltas2[i] = deltas[0]
acc = 0
for i, c in enumerate(newfore):
for j, p in enumerate(c):
if acc in deltas2:
p.x += deltas2[acc]["dx"]
p.y += deltas2[acc]["dy"]
newfore[i][j] = p
acc+=1
g.foreground = newfore
for g in font.glyphs():
eprint("Glyph: {}".format(g.glyphname))
make_master(g, 1, 0)
HOIInfo.set_HOI_rbearing(font, HOIdict, "c2")
del_unneeded_ufo_layers(font)
reset_ufo("B", font)
eprint("Generating master: C")
font = fontforge.open(args.SFD)
for g in font.glyphs():
eprint("Glyph: {}".format(g.glyphname))
make_master(g, 2, 0)
HOIInfo.set_HOI_rbearing(font, HOIdict, "c3")
del_unneeded_ufo_layers(font)
reset_ufo("C", font)
eprint("Generating master: D")
font = fontforge.open(args.SFD)
for g in font.glyphs():
g.foreground = g.layers["End state"]
del_unneeded_ufo_layers(font)
reset_ufo("D", font)
eprint("Done!")
| 28.29771 | 186 | 0.635554 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 821 | 0.220936 |
0fa8c715ee62ee11ebf3efaa36c899ff0cb9fc40 | 11,293 | py | Python | graphql_subscription_manager/__init__.py | Danielhiversen/PyGraphqlWebsocketManager | 7e6e82199d56def8dbb3f710174daa763acf7f74 | [
"MIT"
] | null | null | null | graphql_subscription_manager/__init__.py | Danielhiversen/PyGraphqlWebsocketManager | 7e6e82199d56def8dbb3f710174daa763acf7f74 | [
"MIT"
] | null | null | null | graphql_subscription_manager/__init__.py | Danielhiversen/PyGraphqlWebsocketManager | 7e6e82199d56def8dbb3f710174daa763acf7f74 | [
"MIT"
] | 1 | 2021-06-11T14:27:55.000Z | 2021-06-11T14:27:55.000Z | """Subscription manager for Graph QL websocket."""
import asyncio
import json
import logging
import socket
import sys
from time import time
import pkg_resources
import websockets
_LOGGER = logging.getLogger(__name__)
STATE_STARTING = "starting"
STATE_RUNNING = "running"
STATE_STOPPED = "stopped"
try:
VERSION = pkg_resources.require("graphql-subscription-manager")[0].version
except Exception: # pylint: disable=broad-except
VERSION = "dev"
class SubscriptionManager:
"""Subscription manager."""
# pylint: disable=too-many-instance-attributes
def __init__(self, init_payload, url):
"""Create resources for websocket communication."""
try:
self.loop = asyncio.get_running_loop()
except RuntimeError:
self.loop = asyncio.get_event_loop()
self.subscriptions = {}
self._url = url
self._state = None
self.websocket = None
self._retry_timer = None
self._client_task = None
self._wait_time_before_retry = 15
self._session_id = 0
self._init_payload = init_payload
self._show_connection_error = True
self._is_running = False
self._user_agent = "Python/{0[0]}.{0[1]} PyGraphqlWebsocketManager/{1}".format(
sys.version_info, VERSION
)
def start(self):
"""Start websocket."""
_LOGGER.debug("Start state %s.", self._state)
if self._state == STATE_RUNNING:
return
self._state = STATE_STARTING
self._cancel_client_task()
self._client_task = self.loop.create_task(self.running())
for subscription in self.subscriptions.copy():
callback, sub_query = self.subscriptions.pop(subscription, (None, None))
_LOGGER.debug("Removed, %s", subscription)
if callback is None:
continue
_LOGGER.debug("Add subscription %s", callback)
self.loop.create_task(self.subscribe(sub_query, callback))
@property
def is_running(self):
"""Return if client is running or not."""
return self._is_running
async def running(self):
"""Start websocket connection."""
# pylint: disable=too-many-branches, too-many-statements
await self._close_websocket()
_LOGGER.debug("Starting")
try:
self.websocket = await asyncio.wait_for(
websockets.connect(
self._url,
subprotocols=["graphql-subscriptions"],
extra_headers={"User-Agent": self._user_agent},
),
timeout=30,
)
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Failed to connect. Reconnecting... ", exc_info=True)
self._state = STATE_STOPPED
self.retry()
return
self._state = STATE_RUNNING
_LOGGER.debug("Running")
await self.websocket.send(
json.dumps({"type": "init", "payload": self._init_payload})
)
try:
k = 0
while self._state == STATE_RUNNING:
try:
msg = await asyncio.wait_for(self.websocket.recv(), timeout=30)
except asyncio.TimeoutError:
k += 1
if k > 10:
if self._show_connection_error:
_LOGGER.error("No data, reconnecting.")
self._show_connection_error = False
else:
_LOGGER.warning("No data, reconnecting.")
self._is_running = False
_LOGGER.debug("Reconnecting")
self._state = STATE_STOPPED
self.retry()
_LOGGER.debug(
"No websocket data in 30 seconds, checking the connection."
)
try:
pong_waiter = await self.websocket.ping()
await asyncio.wait_for(pong_waiter, timeout=10)
except asyncio.TimeoutError:
if self._show_connection_error:
_LOGGER.error(
"No response to ping in 10 seconds, reconnecting."
)
self._show_connection_error = False
else:
_LOGGER.warning(
"No response to ping in 10 seconds, reconnecting."
)
self._is_running = False
_LOGGER.debug("Reconnecting")
self._state = STATE_STOPPED
self.retry()
continue
k = 0
self._is_running = True
await self._process_msg(msg)
self._show_connection_error = True
except (websockets.exceptions.InvalidStatusCode, socket.gaierror):
if self._show_connection_error:
_LOGGER.error("Connection error", exc_info=True)
self._show_connection_error = False
else:
_LOGGER.debug("Connection error", exc_info=True)
except websockets.exceptions.ConnectionClosed:
if self._show_connection_error and self._state != STATE_STOPPED:
_LOGGER.error("Connection error", exc_info=True)
self._show_connection_error = False
else:
_LOGGER.debug("Connection error", exc_info=True)
except Exception: # pylint: disable=broad-except
_LOGGER.error("Unexpected error", exc_info=True)
finally:
await self._close_websocket()
if self._state != STATE_STOPPED:
_LOGGER.debug("Reconnecting")
self._state = STATE_STOPPED
self.retry()
_LOGGER.debug("Closing running task.")
async def stop(self, timeout=10):
"""Close websocket connection."""
_LOGGER.debug("Stopping client.")
start_time = time()
self._cancel_retry_timer()
for subscription_id in range(len(self.subscriptions)):
_LOGGER.debug("Sending unsubscribe: %s", subscription_id)
await self.unsubscribe(subscription_id)
while (
timeout > 0
and self.websocket is not None
and not self.subscriptions
and (time() - start_time) < timeout / 2
):
await asyncio.sleep(0.1)
self._state = STATE_STOPPED
await self._close_websocket()
while (
timeout > 0
and self.websocket is not None
and not self.websocket.closed
and (time() - start_time) < timeout
):
await asyncio.sleep(0.1)
self._cancel_client_task()
_LOGGER.debug("Server connection is stopped")
def retry(self):
"""Retry to connect to websocket."""
_LOGGER.debug("Retry, state: %s", self._state)
if self._state in [STATE_STARTING, STATE_RUNNING]:
_LOGGER.debug("Skip retry since state: %s", self._state)
return
_LOGGER.debug("Cancel retry timer")
self._cancel_retry_timer()
self._state = STATE_STARTING
_LOGGER.debug("Restart")
self._retry_timer = self.loop.call_later(
self._wait_time_before_retry, self.start
)
_LOGGER.debug(
"Reconnecting to server in %i seconds.", self._wait_time_before_retry
)
async def subscribe(self, sub_query, callback, timeout=3):
"""Add a new subscription."""
current_session_id = self._session_id
self._session_id += 1
subscription = {
"query": sub_query,
"type": "subscription_start",
"id": current_session_id,
}
json_subscription = json.dumps(subscription)
self.subscriptions[current_session_id] = (callback, sub_query)
start_time = time()
while time() - start_time < timeout:
if (
self.websocket is None
or not self.websocket.open
or not self._state == STATE_RUNNING
):
await asyncio.sleep(1)
continue
await self.websocket.send(json_subscription)
_LOGGER.debug("New subscription %s", current_session_id)
return current_session_id
async def unsubscribe(self, subscription_id):
"""Unsubscribe."""
if self.websocket is None or not self.websocket.open:
_LOGGER.warning("Websocket is closed.")
return
await self.websocket.send(
json.dumps({"id": subscription_id, "type": "subscription_end"})
)
if self.subscriptions and subscription_id in self.subscriptions:
self.subscriptions.pop(subscription_id)
async def _close_websocket(self):
if self.websocket is None:
return
try:
await self.websocket.close()
finally:
self.websocket = None
async def _process_msg(self, msg):
"""Process received msg."""
result = json.loads(msg)
_LOGGER.debug("Recv, %s", result)
if result.get("type") == "init_fail":
if (
result.get("payload", {}).get("error")
== "Too many concurrent sockets for token"
):
self._wait_time_before_retry = self._wait_time_before_retry * 2
if self._wait_time_before_retry >= 120:
_LOGGER.error(
"Connection is closed, too many concurrent sockets for token"
)
self._wait_time_before_retry = min(self._wait_time_before_retry, 600)
return
_LOGGER.error(result.get("payload", {}).get("error"))
return
subscription_id = result.get("id")
if subscription_id is None:
return
callback, _ = self.subscriptions.get(subscription_id, (None, None))
if callback is None:
_LOGGER.debug("Unknown id %s.", subscription_id)
return
if result.get("type", "") == "complete":
_LOGGER.debug("Unsubscribe %s successfully.", subscription_id)
return
data = result.get("payload")
if data is None:
return
self._wait_time_before_retry = 15
try:
await callback(data)
except TypeError as exp:
if "object NoneType can't be used in 'await' expression" in str(exp):
callback(data)
return
raise exp
def _cancel_retry_timer(self):
if self._retry_timer is None:
return
try:
self._retry_timer.cancel()
finally:
self._retry_timer = None
def _cancel_client_task(self):
if self._client_task is None:
return
try:
self._client_task.cancel()
finally:
self._client_task = None
| 34.854938 | 87 | 0.553352 | 10,834 | 0.959355 | 0 | 0 | 117 | 0.01036 | 8,083 | 0.715753 | 1,783 | 0.157885 |
0fa9623f1cbc23c68dedf361a5e9c2d89e9defba | 386 | py | Python | github_api/cli/env.py | kazamori/github-cli | f8b9f1ffedd12fe43664516dacc6c3a861ae4dc9 | [
"Apache-2.0"
] | null | null | null | github_api/cli/env.py | kazamori/github-cli | f8b9f1ffedd12fe43664516dacc6c3a861ae4dc9 | [
"Apache-2.0"
] | null | null | null | github_api/cli/env.py | kazamori/github-cli | f8b9f1ffedd12fe43664516dacc6c3a861ae4dc9 | [
"Apache-2.0"
] | null | null | null | import os
_GITHUB_SETTING_SITE = 'https://github.com/settings/tokens'
TOKEN = os.environ.get('GITHUB_API_TOKEN')
if TOKEN is None:
message = 'requires Personal access tokens. '
message += 'export GITHUB_API_TOKEN="***", '
message += f'get from {_GITHUB_SETTING_SITE} if you do not have'
raise RuntimeError(message)
HEADERS = {
'Authorization': f'token {TOKEN}'
}
| 25.733333 | 68 | 0.699482 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 206 | 0.533679 |
0fa981c50f162d9ee1e72a1a2524c381e554d62b | 20,124 | py | Python | methionineTest.py | Csernica/Isotomics | dffd518d479fcfaa40da5dd275631fe68332bfb4 | [
"MIT"
] | null | null | null | methionineTest.py | Csernica/Isotomics | dffd518d479fcfaa40da5dd275631fe68332bfb4 | [
"MIT"
] | null | null | null | methionineTest.py | Csernica/Isotomics | dffd518d479fcfaa40da5dd275631fe68332bfb4 | [
"MIT"
] | null | null | null | import copy
import json
import numpy as np
import pandas as pd
import basicDeltaOperations as op
import calcIsotopologues as ci
import fragmentAndSimulate as fas
import solveSystem as ss
'''
This is a set of functions to quickly initalize methionine molecules based on input delta values and to simulate its fragmentation. See runAllTests for implementation.
'''
def initializeMethionine(deltas, fragSubset = ['full','133','104','102','88','74High','74Low','61','56'], printHeavy = True):
'''
Initializes methionine, returning a dataframe with basic information about the molecule as well as information about fragmentation.
Inputs:
deltas: A list of 13 M1 delta values, giving the delta values by site for the 13C, 17O, 15N, 33S, and 2H isotopes. The sites are defined in the IDList variable, below.
fragSubset: A list giving the subset of fragments to observe. If you are not observing all fragments, you may input only those you do observe.
printHeavy: The user manually specifies delta 17O, and delta 18O is set via mass scaling (see basicDeltaOperations). If True, this will print out delta 18O, 34S, & 36S.
Outputs:
molecularDataFrame: A dataframe containing basic information about the molecule.
expandedFrags: An ATOM depiction of each fragment, where an ATOM depiction has one entry for each atom (rather than for each site). See fragmentAndSimulate for details.
fragSubgeometryKeys: A list of strings, e.g. 133_01, 133_02, corresponding to each subgeometry of each fragment. A fragment will have multiple subgeometries if there are multiple fragmentation pathways to form it.
fragmentationDictionary: A dictionary like the allFragments variable, but only including the subset of fragments selected by fragSubset.
'''
##### INITIALIZE SITES #####
IDList = ['Cmethyl','Cgamma','Calphabeta','Ccarboxyl','Ocarboxyl','Ssulfur','Namine','Hmethyl','Hgamma',
'Halphabeta','Hamine','Hhydroxyl','Hprotonated']
elIDs = ['C','C','C','C','O','S','N','H','H','H','H','H','H']
numberAtSite = [1,1,2,1,2,1,1,3,2,3,2,1,1]
l = [elIDs, numberAtSite, deltas]
cols = ['IDS','Number','deltas']
condensedFrags =[]
fragKeys = []
#88 and both 74 are conjecture. 74 High has only one oxygen, so we generally do not use it.
allFragments = {'full':{'01':{'subgeometry':[1,1,1,1,1,1,1,1,1,1,1,1,1],'relCont':1}},
'133':{'01':{'subgeometry':[1,1,1,1,1,1,'x',1,1,1,'x',1,'x'],'relCont':1}},
'104':{'01':{'subgeometry':[1,1,1,'x','x',1,1,1,1,1,1,'x','x'],'relCont':1}},
'102':{'01':{'subgeometry':['x',1,1,1,1,'x',1,'x',1,1,1,1,'x'],'relCont':1}},
'88':{'01':{'subgeometry':[1,1,1,'x','x',1,'x',1,1,'x',1,'x','x'],'relCont':1}},
'74High':{'01':{'subgeometry':[1,'x',1,'x',1,'x',1,1,1,1,'x','x','x'],'relCont':1}},
'74Low':{'01':{'subgeometry':[1,1,'x','x',1,'x',1,'x',1,'x',1,'x','x'],'relCont':1}},
'61':{'01':{'subgeometry':[1,1,'x','x','x',1,'x',1,1,'x','x','x','x'],'relCont':1}},
'56':{'01':{'subgeometry':['x',1,1,'x','x','x',1,'x',1,1,'x',1,'x'],'relCont':1}}}
fragmentationDictionary = {key: value for key, value in allFragments.items() if key in fragSubset}
for fragKey, subFragDict in fragmentationDictionary.items():
for subFragNum, subFragInfo in subFragDict.items():
l.append(subFragInfo['subgeometry'])
cols.append(fragKey + '_' + subFragNum)
condensedFrags.append(subFragInfo['subgeometry'])
fragKeys.append(fragKey + '_' + subFragNum)
molecularDataFrame = pd.DataFrame(l, columns = IDList)
molecularDataFrame = molecularDataFrame.transpose()
molecularDataFrame.columns = cols
expandedFrags = [fas.expandFrag(x, numberAtSite) for x in condensedFrags]
if printHeavy:
SConc = op.deltaToConcentration('S',deltas[5])
del34 = op.ratioToDelta('34S',SConc[2]/SConc[0])
del36 = op.ratioToDelta('36S',SConc[3]/SConc[0])
OConc = op.deltaToConcentration('O',deltas[4])
del18 = op.ratioToDelta('18O',OConc[2]/OConc[0])
print("Delta 34S")
print(del34)
print("Delta 36S")
print(del36)
print("Delta 18O")
print(del18)
return molecularDataFrame, expandedFrags, fragKeys, fragmentationDictionary
def simulateMeasurement(molecularDataFrame, fragmentationDictionary, expandedFrags, fragKeys, abundanceThreshold = 0, UValueList = [],
massThreshold = 4, clumpD = {}, outputPath = None, disableProgress = False, calcFF = False, fractionationFactors = {}, omitMeasurements = {}, ffstd = 0.05, unresolvedDict = {}, outputFull = False):
'''
Simulates M+N measurements of a methionine molecule with input deltas specified by the input dataframe molecularDataFrame.
Inputs:
molecularDataFrame: A dataframe containing basic information about the molecule.
expandedFrags: An ATOM depiction of each fragment, where an ATOM depiction has one entry for each atom (rather than for each site). See fragmentAndSimulate for details.
fragSubgeometryKeys: A list of strings, e.g. 133_01, 133_02, corresponding to each subgeometry of each fragment. A fragment will have multiple subgeometries if there are multiple fragmentation pathways to form it.
fragmentationDictionary: A dictionary like the allFragments variable from initalizeMethionine, but only including the subset of fragments selected by fragSubset.
abundanceThreshold: A float; Does not include measurements below this M+N relative abundance, i.e. assuming they will not be measured due to low abundance.
UValueList: A list giving specific substitutions to calculate molecular average U values for ('13C', '15N', etc.)
massThreshold: An integer; will calculate M+N relative abundances for N <= massThreshold
clumpD: Specifies information about clumps to add; otherwise the isotome follows the stochastic assumption. Currently works only for mass 1 substitutions (e.g. 1717, 1317, etc.) See ci.introduceClump for details.
outputPath: A string, e.g. 'output', or None. If it is a string, outputs the simulated spectrum as a json.
disableProgress: Disables tqdm progress bars when True.
calcFF: When True, computes a new set of fractionation factors for this measurement.
fractionationFactors: A dictionary, specifying a fractionation factor to apply to each ion beam. This is used to apply fractionation factors calculated previously to this predicted measurement (e.g. for a sample/standard comparison with the same experimental fractionation)
omitMeasurements: omitMeasurements: A dictionary, {}, specifying measurements which I will not observed. For example, omitMeasurements = {'M1':{'61':'D'}} would mean I do not observe the D ion beam of the 61 fragment of the M+1 experiment, regardless of its abundance.
ffstd: A float; if new fractionation factors are calculated, they are pulled from a normal distribution centered around 1, with this standard deviation.
unresolvedDict: A dictionary, specifying which unresolved ion beams add to each other.
outputFull: A boolean. Typically False, in which case beams that are not observed are culled from the dictionary. If True, includes this information; this should only be used for debugging, and will likely break the solver routine.
Outputs:
predictedMeasurement: A dictionary giving information from the M+N measurements.
MN: A dictionary where keys are mass selections ("M1", "M2") and values are dictionaries giving information about the isotopologues of each mass selection.
fractionationFactors: The calculated fractionation factors for this measurement (empty unless calcFF == True)
'''
M1Only = False
if massThreshold == 1:
M1Only = True
byAtom = ci.inputToAtomDict(molecularDataFrame, disable = disableProgress, M1Only = M1Only)
#Introduce any clumps of interest with clumps
if clumpD == {}:
bySub = ci.calcSubDictionary(byAtom, molecularDataFrame, atomInput = True)
else:
print("Adding clumps")
stochD = copy.deepcopy(byAtom)
for clumpNumber, clumpInfo in clumpD.items():
byAtom = ci.introduceClump(byAtom, clumpInfo['Sites'], clumpInfo['Amount'], molecularDataFrame)
for clumpNumber, clumpInfo in clumpD.items():
ci.checkClumpDelta(clumpInfo['Sites'], molecularDataFrame, byAtom, stochD)
bySub = ci.calcSubDictionary(byAtom, molecularDataFrame, atomInput = True)
#Initialize Measurement output
if disableProgress == False:
print("Simulating Measurement")
allMeasurementInfo = {}
allMeasurementInfo = fas.UValueMeasurement(bySub, allMeasurementInfo, massThreshold = massThreshold,
subList = UValueList)
MN = ci.massSelections(byAtom, massThreshold = massThreshold)
MN = fas.trackMNFragments(MN, expandedFrags, fragKeys, molecularDataFrame, unresolvedDict = unresolvedDict)
predictedMeasurement, FF = fas.predictMNFragmentExpt(allMeasurementInfo, MN, expandedFrags, fragKeys, molecularDataFrame,
fragmentationDictionary,
abundanceThreshold = abundanceThreshold, calcFF = calcFF, ffstd = ffstd, fractionationFactors = fractionationFactors, omitMeasurements = omitMeasurements, unresolvedDict = unresolvedDict, outputFull = outputFull)
if outputPath != None:
output = json.dumps(predictedMeasurement)
f = open(outputPath + ".json","w")
f.write(output)
f.close()
return predictedMeasurement, MN, FF
def updateAbundanceCorrection(latestDeltas, fragSubset, fragmentationDictionary, expandedFrags,
fragSubgeometryKeys, processStandard, processSample, isotopologuesDict, UValuesSmp, molecularDataFrame,
NUpdates = 30, breakCondition = 1, perturbTheoryOAmt = 0.002,
experimentalOCorrectList = [],
abundanceThreshold = 0,
massThreshold = 1,
omitMeasurements = {},
unresolvedDict = {},
UMNSub = ['13C'],
N = 100,
setSpreadByExtreme = False,
oACorrectBounds = False):
'''
A function for the iterated abundance correction. This function iterates N times; for each, it:
1) takes the most recent set of deltas, recomputes the predicted measurement of methionine with them, and uses this to update the O value correction.
2) Defines a reasonable standard deviation to sample around this O value, based on the perturbTheoryOAmt parameter (e.g. sigma of 0.002 * O_correct)
3) Recalculates the site specific structure using the new correction factors.
4) Checks if the difference between the old deltas and new deltas is smaller than a break condition; if so, ends the routine.
It outputs the final set of results and thisODict, a data product storing information about the correction procedure.
Inputs:
latestDeltas: The input deltas to use for the first iteration of the procedure.
fragSubset: A list giving the subset of fragments to observe. If you are not observing all fragments, you may input only those you do observe.
fragmentationDictionary: A dictionary like the allFragments variable from initalizeMethionine, but only including the subset of fragments selected by fragSubset.
expandedFrags: An ATOM depiction of each fragment, where an ATOM depiction has one entry for each atom (rather than for each site). See fragmentAndSimulate for details.
fragSubgeometryKeys: A list of strings, e.g. 133_01, 133_02, corresponding to each subgeometry of each fragment. A fragment will have multiple subgeometries if there are multiple fragmentation pathways to form it.
processStandard: A dictionary containing data from several measurements, in the form: process[fileKey][MNKey][fragKey] = {'Observed Abundance':A list of floats,
'Subs':A list of strings
'Error':A list of floats
'predicted Abundance':A list of floats}
it should have information for each measurement of each observation. See runAllTests for implementation.
processSample: As processStandard, but the 'Predicted Abundance' terms will be an empty list.
isotopologuesDict: isotopologuesDict: A dictionary where the keys are "M0", "M1", etc. and the values are dataFrames giving the isotopologues with those substitutions.
UValuesSmp: A dictionary specifying the molecular average U values and their errors, i.e. {'13C':'Observed':float,'Error':float}. See readInput.readComputedUValues
molecularDataFrame: A dataFrame containing information about the molecule.
NUpdates: The maximum number of iterations to perform.
breakCondition: Each iteration, a residual is calculated as the sum of squares between all delta values. If that sums is <break condition, the routine ends.
perturbTheoryOAmt: Each O correction is given as a mean and a sigma. Then for each iteration of the Monte Carlo, we draw a new factor from this distribution. This parameter determines the relative width, e.g. sigma = mean * perturbTheoryOAmt
N = 100: The number of iterations for each MN Monte Carlo. E.g., if NUPdates is 30 and N is 100, we recalculate the methionine spectrum 30 times. Each iteration, we solve for site specific values using a monte carlo routine with N = 100.
UMNSub: Sets the specific substitutions that we will use molecular average U values from to calculate UMN. Otherwise it will use all molecular average U values for that UMN. Recommended to use--the procedure only works for substitions that are totally solved for. For example, if one 13C 13C isotopologue is not solved for precisely in M+N relative abundance space, we should not use 13C13C in the UMN routine. The best candidates tend to be abundant things--36S, 18O, 13C, 34S, and so forth.
abundanceThreshold, massThreshold, omitMeasurements, unresolvedDict: See simulateMeasurement; set these parameters for each simulated dataset.
experimentalOCorrectList: A list, containing information about which peaks to use experimental correction for. See solveSystem.perturbSample.
Outputs:
M1Results: A dataframe giving the final results of the iterated correction process.
thisODict: A dictionary containing information about each correction (all except Histogram) and histograms of the sampled O values from every 10th iteration (as well as the final iteration).
'''
#Initialize dictionary to track output of iterated correction process.
thisODict = {'residual':[],
'delta':[],
'O':[],
'relDelta':[],
'relDeltaErr':[],
'Histogram':[]}
for i in range(NUpdates):
oldDeltas = latestDeltas
#Get new dataframe, simulate new measurement.
M1Df, expandedFrags, fragSubgeometryKeys, fragmentationDictionary = initializeMethionine(latestDeltas, fragSubset,
printHeavy = False)
predictedMeasurementUpdate, MNDictUpdate, FFUpdate = simulateMeasurement(M1Df, fragmentationDictionary,
expandedFrags,
fragSubgeometryKeys,
abundanceThreshold = abundanceThreshold,
massThreshold = massThreshold,
calcFF = False,
outputPath = None,
disableProgress = True,
fractionationFactors = {},
omitMeasurements = omitMeasurements,
unresolvedDict = unresolvedDict)
#Generate new O Corrections
OCorrectionUpdate = ss.OValueCorrectTheoretical(predictedMeasurementUpdate, processSample,
massThreshold = massThreshold)
#For each O correction, generate a normal distribution. The computed value is the mean, and the sigma is set by perturbTheoryOAmt.
#explicitOCorrect may optionally contain a "Bounds" entry, when using extreme values. For example, explicitOCorrect[MNKey][fragKey] = (Lower Bound, Upper Bound).
#This is not implemented in this routine.
explicitOCorrect = {}
for MNKey, MNData in OCorrectionUpdate.items():
if MNKey not in explicitOCorrect:
explicitOCorrect[MNKey] = {}
for fragKey, fragData in MNData.items():
if fragKey not in explicitOCorrect[MNKey]:
explicitOCorrect[MNKey][fragKey] = {}
explicitOCorrect[MNKey][fragKey]['Mu,Sigma'] = (fragData, fragData * perturbTheoryOAmt)
M1Results = ss.M1MonteCarlo(processStandard, processSample, OCorrectionUpdate, isotopologuesDict,
fragmentationDictionary, perturbTheoryOAmt = perturbTheoryOAmt,
experimentalOCorrectList = experimentalOCorrectList,
N = N, GJ = False, debugMatrix = False, disableProgress = True,
storePerturbedSamples = False, storeOCorrect = True,
explicitOCorrect = explicitOCorrect, perturbOverrideList = ['M1'])
processedResults = ss.processM1MCResults(M1Results, UValuesSmp, isotopologuesDict, molecularDataFrame, disableProgress = True,
UMNSub = UMNSub)
ss.updateSiteSpecificDfM1MC(processedResults, molecularDataFrame)
M1Df = molecularDataFrame.copy()
M1Df['deltas'] = M1Df['VPDB etc. Deltas']
thisODict['O'].append(copy.deepcopy(OCorrectionUpdate['M1']))
thisODict['delta'].append(list(M1Df['deltas']))
residual = ((np.array(M1Df['deltas']) - np.array(oldDeltas))**2).sum()
thisODict['residual'].append(residual)
latestDeltas = M1Df['deltas'].values
thisODict['relDelta'].append(M1Df['Relative Deltas'].values)
thisODict['relDeltaErr'].append(M1Df['Relative Deltas Error'].values)
print(residual)
if i % 10 == 0 or residual <= breakCondition:
correctVals = {'61':[],
'133':[],
'full':[]}
for res in M1Results['Extra Info']['O Correct']:
correctVals['full'].append(res['full'])
correctVals['133'].append(res['133'])
correctVals['61'].append(res['61'])
thisODict['Histogram'].append(copy.deepcopy(correctVals))
if residual <= breakCondition:
break
return M1Results, thisODict | 68.917808 | 500 | 0.639485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11,141 | 0.553618 |
0faccc6541fe716583eb34dadb7193b4db3cb766 | 11,704 | py | Python | neuro-cli/tests/unit/formatters/test_storage_formatters.py | neuro-inc/platform-client-python | 012e355249ea900b76f9ce4209fb9d029652f9b2 | [
"Apache-2.0"
] | 11 | 2020-10-11T15:38:11.000Z | 2021-11-09T11:29:50.000Z | neuro-cli/tests/unit/formatters/test_storage_formatters.py | neuro-inc/platform-client-python | 012e355249ea900b76f9ce4209fb9d029652f9b2 | [
"Apache-2.0"
] | 611 | 2020-09-30T21:27:52.000Z | 2022-01-10T10:44:44.000Z | neuro-cli/tests/unit/formatters/test_storage_formatters.py | neuro-inc/platform-client-python | 012e355249ea900b76f9ce4209fb9d029652f9b2 | [
"Apache-2.0"
] | 1 | 2020-10-05T15:10:24.000Z | 2020-10-05T15:10:24.000Z | import time
from typing import Any, List
import pytest
from yarl import URL
from neuro_sdk import Action, FileStatus, FileStatusType
from neuro_sdk.storage import DiskUsageInfo
from neuro_cli.formatters.storage import (
BaseFilesFormatter,
BSDAttributes,
BSDPainter,
DiskUsageFormatter,
FilesSorter,
GnuIndicators,
GnuPainter,
LongFilesFormatter,
NonePainter,
SimpleFilesFormatter,
VerticalColumnsFilesFormatter,
get_painter,
)
class TestNonePainter:
def test_simple(self, rich_cmp: Any) -> None:
painter = NonePainter()
file = FileStatus(
"File1",
2048,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/user/File1"),
)
rich_cmp(painter.paint(file.name, file.type))
class TestGnuPainter:
def test_color_parsing_simple(self) -> None:
painter = GnuPainter("rs=1;0;1")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
painter = GnuPainter(":rs=1;0;1")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
painter = GnuPainter("rs=1;0;1:")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
painter = GnuPainter("rs=1;0;1:fi=32;42")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
assert painter.color_indicator[GnuIndicators.FILE] == "32;42"
painter = GnuPainter("rs=1;0;1:fi")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
assert painter.color_indicator[GnuIndicators.FILE] == ""
painter = GnuPainter("rs=1;0;1:fi=")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
assert painter.color_indicator[GnuIndicators.FILE] == ""
@pytest.mark.parametrize(
"escaped,result",
[
("\\a", "\a"),
("\\b", "\b"),
("\\e", chr(27)),
("\\f", "\f"),
("\\n", "\n"),
("\\r", "\r"),
("\\t", "\t"),
("\\v", "\v"),
("\\?", chr(127)),
("\\_", " "),
("a\\n", "a\n"),
("a\\tb", "a\tb"),
("a\\t\\rb", "a\t\rb"),
("a\\=b", "a=b"),
],
)
def test_color_parsing_escaped_simple(self, escaped: str, result: str) -> None:
painter = GnuPainter("rs=" + escaped)
assert painter.color_indicator[GnuIndicators.RESET] == result
painter = GnuPainter(escaped + "=1;2")
assert painter.color_ext_type[result] == "1;2"
painter = GnuPainter(escaped + "=" + escaped)
assert painter.color_ext_type[result] == result
@pytest.mark.parametrize(
"escaped,result",
[
("\\7", chr(7)),
("\\8", "8"),
("\\10", chr(8)),
("a\\2", "a" + chr(2)),
("a\\2b", "a" + chr(2) + "b"),
],
)
def test_color_parsing_escaped_octal(self, escaped: str, result: str) -> None:
painter = GnuPainter("rs=" + escaped)
assert painter.color_indicator[GnuIndicators.RESET] == result
painter = GnuPainter(escaped + "=1;2")
assert painter.color_ext_type[result] == "1;2"
painter = GnuPainter(escaped + "=" + escaped)
assert painter.color_ext_type[result] == result
@pytest.mark.parametrize(
"escaped,result",
[
("\\x7", chr(0x7)),
("\\x8", chr(0x8)),
("\\x10", chr(0x10)),
("\\XaA", chr(0xAA)),
("a\\x222", "a" + chr(0x22) + "2"),
("a\\x2z", "a" + chr(0x2) + "z"),
],
)
def test_color_parsing_escaped_hex(self, escaped: str, result: str) -> None:
painter = GnuPainter("rs=" + escaped)
assert painter.color_indicator[GnuIndicators.RESET] == result
painter = GnuPainter(escaped + "=1;2")
assert painter.color_ext_type[result] == "1;2"
painter = GnuPainter(escaped + "=" + escaped)
assert painter.color_ext_type[result] == result
@pytest.mark.parametrize(
"escaped,result",
[
("^a", chr(1)),
("^?", chr(127)),
("^z", chr(26)),
("a^Z", "a" + chr(26)),
("a^Zb", "a" + chr(26) + "b"),
],
)
def test_color_parsing_carret(self, escaped: str, result: str) -> None:
painter = GnuPainter("rs=" + escaped)
assert painter.color_indicator[GnuIndicators.RESET] == result
painter = GnuPainter(escaped + "=1;2")
assert painter.color_ext_type[result] == "1;2"
painter = GnuPainter(escaped + "=" + escaped)
assert painter.color_ext_type[result] == result
@pytest.mark.parametrize("escaped", [("^1"), ("^"), ("^" + chr(130))])
def test_color_parsing_carret_incorrect(self, escaped: str) -> None:
with pytest.raises(EnvironmentError):
GnuPainter("rs=" + escaped)
with pytest.raises(EnvironmentError):
GnuPainter(escaped + "=1;2")
@pytest.mark.parametrize(
"ls_colors",
[
"di=32;41:fi=0;44:no=0;46",
"di=32;41:no=0;46",
"no=0;46",
"*.text=0;46",
"*.txt=0;46",
],
)
def test_coloring(self, rich_cmp: Any, ls_colors: str) -> None:
file = FileStatus(
"test.txt",
1024,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/usertest.txt"),
)
folder = FileStatus(
"tmp",
0,
FileStatusType.DIRECTORY,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.WRITE,
uri=URL("storage://default/usertmp"),
)
painter = GnuPainter(ls_colors)
rich_cmp(painter.paint(file.name, file.type), index=0)
rich_cmp(painter.paint(folder.name, folder.type), index=1)
class TestBSDPainter:
def test_color_parsing(self) -> None:
painter = BSDPainter("exfxcxdxbxegedabagacad")
assert painter._colors[BSDAttributes.DIRECTORY] == "ex"
@pytest.mark.parametrize(
"ls_colors", ["exfxcxdxbxegedabagacad", "Eafxcxdxbxegedabagacad"]
)
def test_coloring(self, ls_colors: str, rich_cmp: Any) -> None:
file = FileStatus(
"test.txt",
1024,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/usertest.txt"),
)
folder = FileStatus(
"tmp",
0,
FileStatusType.DIRECTORY,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.WRITE,
uri=URL("storage://default/usertmp"),
)
painter = BSDPainter(ls_colors)
rich_cmp(painter.paint(file.name, file.type), index=0)
rich_cmp(painter.paint(folder.name, folder.type), index=1)
class TestPainterFactory:
def test_detection(self, monkeypatch: Any) -> None:
monkeypatch.setenv("LS_COLORS", "")
monkeypatch.setenv("LSCOLORS", "")
painter = get_painter(True)
assert isinstance(painter, NonePainter)
monkeypatch.setenv("LSCOLORS", "exfxcxdxbxegedabagacad")
monkeypatch.setenv("LS_COLORS", "di=32;41:fi=0;44:no=0;46")
painter_without_color = get_painter(False)
painter_with_color = get_painter(True)
assert isinstance(painter_without_color, NonePainter)
assert not isinstance(painter_with_color, NonePainter)
monkeypatch.setenv("LSCOLORS", "")
monkeypatch.setenv("LS_COLORS", "di=32;41:fi=0;44:no=0;46")
painter = get_painter(True)
assert isinstance(painter, GnuPainter)
monkeypatch.setenv("LSCOLORS", "exfxcxdxbxegedabagacad")
monkeypatch.setenv("LS_COLORS", "")
painter = get_painter(True)
assert isinstance(painter, BSDPainter)
class TestFilesFormatter:
files = [
FileStatus(
"File1",
2048,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/userFile1"),
),
FileStatus(
"File2",
1024,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-10-10 13:10:10", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/userFile2"),
),
FileStatus(
"File3 with space",
1_024_001,
FileStatusType.FILE,
int(time.mktime(time.strptime("2019-02-02 05:02:02", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/userFile 3 with space"),
),
]
folders = [
FileStatus(
"Folder1",
0,
FileStatusType.DIRECTORY,
int(time.mktime(time.strptime("2017-03-03 06:03:03", "%Y-%m-%d %H:%M:%S"))),
Action.MANAGE,
uri=URL("storage://default/userFolder11"),
),
FileStatus(
"1Folder with space",
0,
FileStatusType.DIRECTORY,
int(time.mktime(time.strptime("2017-03-03 06:03:02", "%Y-%m-%d %H:%M:%S"))),
Action.MANAGE,
uri=URL("storage://default/user1Folder with space"),
),
]
files_and_folders = files + folders
@pytest.mark.parametrize(
"formatter",
[
(SimpleFilesFormatter(color=False)),
(VerticalColumnsFilesFormatter(width=100, color=False)),
(LongFilesFormatter(human_readable=False, color=False)),
],
)
def test_formatter_with_files_and_folders(
self, formatter: BaseFilesFormatter, rich_cmp: Any
) -> None:
rich_cmp(formatter(self.files_and_folders))
@pytest.mark.parametrize(
"formatter",
[
(SimpleFilesFormatter(color=False)),
(VerticalColumnsFilesFormatter(width=100, color=False)),
(LongFilesFormatter(human_readable=False, color=False)),
],
)
def test_formatter_with_empty_files(
self, formatter: BaseFilesFormatter, rich_cmp: Any
) -> None:
files: List[FileStatus] = []
rich_cmp(formatter(files))
def test_sorter(self) -> None:
sorter = FilesSorter.NAME
files = sorted(self.files_and_folders, key=sorter.key())
assert files == [
self.folders[1],
self.files[0],
self.files[1],
self.files[2],
self.folders[0],
]
sorter = FilesSorter.SIZE
files = sorted(self.files_and_folders, key=sorter.key())
assert files[2:5] == [self.files[1], self.files[0], self.files[2]]
sorter = FilesSorter.TIME
files = sorted(self.files_and_folders, key=sorter.key())
assert files == [
self.folders[1],
self.folders[0],
self.files[0],
self.files[1],
self.files[2],
]
class TestUsageFormatter:
def test_formatter(self, rich_cmp: Any) -> None:
usage = DiskUsageInfo(
total=100000, used=80000, free=20000, cluster_name="default"
)
formatter = DiskUsageFormatter()
rich_cmp(formatter(usage))
| 32.876404 | 88 | 0.550239 | 11,207 | 0.957536 | 0 | 0 | 6,027 | 0.514952 | 0 | 0 | 1,782 | 0.152256 |
0fad05bfe5b392dcaaa42d73399a9890ce38ef8d | 2,059 | py | Python | examples/example2d1many.py | elliottslaughter/cufinufft | bb1453dfe9dc12159e8e346eae79ad4d71fd566f | [
"Apache-2.0"
] | 56 | 2020-05-12T22:22:22.000Z | 2022-01-28T23:54:48.000Z | examples/example2d1many.py | elliottslaughter/cufinufft | bb1453dfe9dc12159e8e346eae79ad4d71fd566f | [
"Apache-2.0"
] | 108 | 2020-05-13T16:59:51.000Z | 2022-03-31T22:30:57.000Z | examples/example2d1many.py | elliottslaughter/cufinufft | bb1453dfe9dc12159e8e346eae79ad4d71fd566f | [
"Apache-2.0"
] | 15 | 2020-05-22T12:29:36.000Z | 2022-03-03T18:08:03.000Z | """
Demonstrate the type 1 NUFFT using cuFINUFFT
"""
import numpy as np
import pycuda.autoinit
from pycuda.gpuarray import GPUArray, to_gpu
from cufinufft import cufinufft
# Set up parameters for problem.
N1, N2 = 59, 61 # Size of uniform grid
M = 100 # Number of nonuniform points
n_transf = 2 # Number of input arrays
eps = 1e-6 # Requested tolerance
dtype = np.float32 # Datatype (real)
complex_dtype = np.complex64 # Datatype (complex)
# Generate coordinates of non-uniform points.
kx = np.random.uniform(-np.pi, np.pi, size=M)
ky = np.random.uniform(-np.pi, np.pi, size=M)
# Generate source strengths.
c = (np.random.standard_normal((n_transf, M))
+ 1j * np.random.standard_normal((n_transf, M)))
# Cast to desired datatype.
kx = kx.astype(dtype)
ky = ky.astype(dtype)
c = c.astype(complex_dtype)
# Allocate memory for the uniform grid on the GPU.
fk_gpu = GPUArray((n_transf, N1, N2), dtype=complex_dtype)
# Initialize the plan and set the points.
plan = cufinufft(1, (N1, N2), n_transf, eps=eps, dtype=dtype)
plan.set_pts(to_gpu(kx), to_gpu(ky))
# Execute the plan, reading from the strengths array c and storing the
# result in fk_gpu.
plan.execute(to_gpu(c), fk_gpu)
# Retreive the result from the GPU.
fk = fk_gpu.get()
# Check accuracy of the transform at position (nt1, nt2).
nt1 = int(0.37 * N1)
nt2 = int(0.26 * N2)
for i in range(n_transf):
# Calculate the true value of the type 1 transform at the uniform grid
# point (nt1, nt2), which corresponds to the coordinate nt1 - N1 // 2 and
# nt2 - N2 // 2.
x, y = nt1 - N1 // 2, nt2 - N2 // 2
fk_true = np.sum(c[i] * np.exp(1j * (x * kx + y * ky)))
# Calculate the absolute and relative error.
err = np.abs(fk[i, nt1, nt2] - fk_true)
rel_err = err / np.max(np.abs(fk[i]))
print(f"[{i}] Absolute error on mode [{nt1}, {nt2}] is {err:.3g}")
print(f"[{i}] Relative error on mode [{nt1}, {nt2}] is {rel_err:.3g}")
assert(rel_err < 10 * eps)
| 31.19697 | 77 | 0.645945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 914 | 0.443905 |
0fae0ba2824dc5a926e2388c414dc0455b2ced7b | 7,293 | py | Python | 2021/22/22b.py | kristianwiklund/AOC | be2f2e3a97b775281382dca8ea34a6522571052a | [
"MIT"
] | 3 | 2020-12-02T18:18:05.000Z | 2021-12-03T18:39:26.000Z | 2021/22/22b.py | kristianwiklund/AOC | be2f2e3a97b775281382dca8ea34a6522571052a | [
"MIT"
] | null | null | null | 2021/22/22b.py | kristianwiklund/AOC | be2f2e3a97b775281382dca8ea34a6522571052a | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
import cut
from box import Box
from reactor import Reactor
from termcolor import colored
# -- assert helper
def check(what,output,f=None):
try:
assert(what)
except:
print("Assert failed, debug output: ",end="")
print(output)
if f is not None:
f()
sys.exit()
# -- end assert helper
# --- test cases
print (colored("Testcase 1: adding two identical boxes results in only one box","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..3")
R += Box("on x=1..3,y=1..3,z=1..3")
assert(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..3]")
print (colored("Testcase 2: adding various boxes that overlap but are on the edge with the first, also result in only one box","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..3")
print (colored(" 2a: check that adding sliver on x doesn't cause more boxes","yellow"))
R += Box("on x=1..1,y=1..3,z=1..3")
check(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..3]",[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
print (colored(" 2c: check that adding sliver on z doesn't cause more boxes ","yellow"))
R += Box("on x=1..3,y=1..3,z=1..1")
check(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..3]",[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
print (colored(" 2b: check that adding sliver on y doesn't cause more boxes","yellow"))
R += Box("on x=1..3,y=1..1,z=1..3")
check(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..3]",[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
print (colored("Testcase 3: merge on X edge"),"red")
R = Reactor()
R += Box("on x=1..1,y=1..1,z=1..1")
R += Box("on x=2..3,y=1..1,z=1..1")
assert(R.realcubes.__repr__()=="[on x=1..3,y=1..1,z=1..1]")
print ("Testcase 4: merge on Y edge")
R += Box("on x=1..3,y=2..3,z=1..1")
assert(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..1]")
print ("Testcase 5: merge on Z edge")
R += Box("on x=1..3,y=1..3,z=2..3")
assert(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..3]")
print ("Testcase 6: merge on (reverse) X edge")
R = Reactor()
R += Box("on x=2..3,y=2..3,z=2..3")
R += Box("on x=1..1,y=2..3,z=2..3")
assert(R.realcubes.__repr__()=="[on x=1..3,y=2..3,z=2..3]")
print ("Testcase 7: merge on (reverse) Y edge")
R += Box("on x=1..3,y=1..1,z=2..3")
assert(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=2..3]")
print ("Testcase 8: merge on (reverse) Z edge")
R += Box("on x=1..3,y=1..3,z=1..1")
assert(R.realcubes.__repr__()=="[on x=1..3,y=1..3,z=1..3]")
print (colored("Testcase 9a: Remove a 1x1x1 cube from a 3x3x3 cube","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..3")
assert(R.size()==27)
R += Box("off x=1..1,y=1..1,z=1..1")
check(R.size()==26,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 9b: Remove a 1x1x1 cube from a 3x3x3 cube","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..3")
assert(R.size()==27)
R += Box("off x=3..3,y=3..3,z=3..3")
check(R.size()==26,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 9c: Remove a 1x1x1 cube from a 3x3x3 cube","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..3")
assert(R.size()==27)
R += Box("off x=2..2,y=2..2,z=2..2")
check(R.size()==26,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 10x: Remove a slab from the upper end of a slab","red"))
R = Reactor()
R += Box("on x=1..3,y=1..1,z=1..1")
assert(R.size()==3)
R += Box("off x=3..3,y=1..1,z=1..1")
check(R.size()==2,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 10y: Remove a slab from the upper end of a slab","red"))
R = Reactor()
R += Box("on x=1..1,y=1..3,z=1..1")
assert(R.size()==3)
R += Box("off x=1..1,y=3..3,z=1..1")
check(R.size()==2,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 10z: Remove a slab from the upper end of a slab","red"))
R = Reactor()
R += Box("on x=1..1,y=1..1,z=1..3")
assert(R.size()==3)
R += Box("off x=1..1,y=1..1,z=3..3")
check(R.size()==2,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 10xy: Remove a corner from the upper end of a slab","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..1")
assert(R.size()==9)
R += Box("off x=3..3,y=3..3,z=1..1")
check(R.size()==8,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 10xy: Remove a corner from the upper end of a cube","red"))
R = Reactor()
R += Box("on x=1..3,y=1..3,z=1..3")
assert(R.size()==27)
R += Box("off x=3..3,y=3..3,z=3..3")
check(R.size()==26,[R.size(),R.realcubes],f=lambda : R.savefig())
print (colored("Testcase 10x-x: Remove a smaller part of the blob","red"))
R = Reactor()
R+= Box("on x=1..4,y=1..1,z=1..1")
R+= Box("off x=2..3,y=1..1,z=1..1")
check(R.size()==2,[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
print (colored("Testcase 10y-y: Remove a smaller part of the blob","red"))
R = Reactor()
R+= Box("on x=1..1,y=1..4,z=1..1")
R+= Box("off x=1..1,y=2..3,z=1..1")
check(R.size()==2,[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
print (colored("Testcase 10z-z: Remove a smaller part of the blob","red"))
R = Reactor()
R+= Box("on x=1..1,y=1..1,z=1..4")
R+= Box("off x=1..1,y=1..1,z=2..3")
check(R.size()==2,[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
# -----------------
print (colored("Testcase 10xyz: Remove a part of a blob","red"))
# off x=9..11,y=9..11,z=9..11 from on x=10..10,y=10..12,z=10..12
R = Reactor()
R+=Box("on x=10..10,y=10..12,z=10..12")
R+=Box("off x=11..11,y=9..11,z=9..11")
R.savefig()
sys.exit()
# -----------------
print(colored("Testcase 11, deconstructed","green"))
R = Reactor()
# steps 11a and 11b results in these cubes
a=Box("on x=10..10,y=10..12,z=10..12")
a.id="[1]"
b=Box("on x=11..12,y=10..10,z=10..12")
b.id="[2]"
c=Box("on x=11..12,y=11..12,z=10..10")
c.id="[3]"
d=Box("on x=11..13,y=11..13,z=11..13")
d.id="[4]"
R+=a
#R+=b
#R+=c
#R+=d
# steps 11c removes a cube
e=Box("off x=9..11,y=9..11,z=9..11")
e.id="[5]"
R+=e
R.savefig()
sys.exit()
print (colored("Testcase 11 : First example from AOC","red"))
R=Reactor()
print (" 11a: Check that first cube is size 27")
a=Box("on x=10..12,y=10..12,z=10..12")
a.id="Box1"
R+=a
assert(R.size()==27)
b = Box("on x=11..13,y=11..13,z=11..13")
b.id = "Box2"
R+=b
print (" 11b: Check that merging two cubes result in the correct size ("+str(27+19)+")")
check(R.size()==27+19,[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
c = Box("off x=9..11,y=9..11,z=9..11")
c.id = "Box3"
R+=c
print (" 11c: Check that removing a cube result in the correct size ("+str(27+19-8)+")")
check(R.size()==27+19-8,[R.size(),R.realcubes,[i.size() for i in R.realcubes]],f=lambda : R.savefig())
print (" 11d: Check that adding a cube result in the correct size ("+str(39)+")")
R+=Box("on x=10..10,y=10..10,z=10..10")
assert(R.size()==39)
sys.exit()
# - finally, read input from stdin and solve the problem
R = Reactor()
def readinaTOR():
RR = Reactor()
for l in sys.stdin:
l = l.strip()
b = Box(l)
RR = RR + b
return RR
| 29.526316 | 136 | 0.581791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,585 | 0.491567 |
0faef4c43d66c9f68163503158caeb67ad86c96d | 386 | py | Python | planner/progression/__init__.py | cyclone923/pyplanners | e2fe156ff00d0914cfb4f20afed158706f4d5d28 | [
"MIT"
] | null | null | null | planner/progression/__init__.py | cyclone923/pyplanners | e2fe156ff00d0914cfb4f20afed158706f4d5d28 | [
"MIT"
] | null | null | null | planner/progression/__init__.py | cyclone923/pyplanners | e2fe156ff00d0914cfb4f20afed158706f4d5d28 | [
"MIT"
] | null | null | null | from .hill_climbing import hill_climbing_search, strategies
from .best_first import path_cost_fn, greedy_cost_fn, weighted_cost_fn, macro_greedy_cost_fn, \
a_star_search, best_first_search, deferred_best_first_search, macro_deferred_best_first_search, semideferred_best_first_search
from .simple import dfs, bfs, srandom_walk, srrt
from .recurrent import rdfs, rbfs, random_walk, rrt
| 64.333333 | 128 | 0.860104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0fb0cca38d85a28606d607324eeae70804e43a93 | 18,289 | py | Python | domain_api/migrations/0001_initial.py | heytrav/drs-project | 68caa7e9bc8dea8963ddef561b42d05a7a88becd | [
"MIT"
] | 1 | 2020-07-01T02:35:14.000Z | 2020-07-01T02:35:14.000Z | domain_api/migrations/0001_initial.py | heytrav/drs-api | 68caa7e9bc8dea8963ddef561b42d05a7a88becd | [
"MIT"
] | 2 | 2020-06-05T17:25:14.000Z | 2021-03-19T21:53:44.000Z | domain_api/migrations/0001_initial.py | heytrav/drs-api | 68caa7e9bc8dea8963ddef561b42d05a7a88becd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-26 01:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AccountDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=200)),
('surname', models.CharField(max_length=200)),
('middle_name', models.CharField(blank=True, max_length=200)),
('email', models.CharField(max_length=200)),
('email2', models.CharField(blank=True, max_length=200)),
('email3', models.CharField(blank=True, max_length=200)),
('telephone', models.CharField(blank=True, max_length=200)),
('fax', models.CharField(blank=True, max_length=200)),
('company', models.CharField(blank=True, max_length=200)),
('house_number', models.CharField(max_length=10)),
('street1', models.CharField(max_length=200)),
('street2', models.CharField(blank=True, max_length=200)),
('street3', models.CharField(blank=True, max_length=200)),
('city', models.CharField(max_length=200)),
('suburb', models.CharField(blank=True, max_length=200)),
('state', models.CharField(blank=True, max_length=200)),
('postcode', models.CharField(max_length=20)),
('country', models.CharField(max_length=2)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('postal_info_type', models.CharField(choices=[('int', 'international'), ('loc', 'local')], default='loc', max_length=3)),
('disclose_name', models.BooleanField(default=False)),
('disclose_company', models.BooleanField(default=False)),
('disclose_address', models.BooleanField(default=False)),
('disclose_telephone', models.BooleanField(default=False)),
('disclose_fax', models.BooleanField(default=False)),
('disclose_email', models.BooleanField(default=False)),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='personal_details', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('email', models.CharField(max_length=200, null=True)),
('telephone', models.CharField(blank=True, max_length=200, null=True)),
('fax', models.CharField(blank=True, max_length=200, null=True)),
('company', models.CharField(blank=True, max_length=200, null=True)),
('house_number', models.CharField(blank=True, max_length=10, null=True)),
('street1', models.CharField(max_length=200, null=True)),
('street2', models.CharField(blank=True, max_length=200, null=True)),
('street3', models.CharField(blank=True, max_length=200, null=True)),
('city', models.CharField(max_length=200, null=True)),
('suburb', models.CharField(blank=True, max_length=200, null=True)),
('state', models.CharField(blank=True, max_length=200, null=True)),
('postcode', models.CharField(max_length=20, null=True)),
('country', models.CharField(max_length=2, null=True)),
('postal_info_type', models.CharField(choices=[('int', 'international'), ('loc', 'local')], default='loc', max_length=3)),
('authcode', models.CharField(blank=True, max_length=100, null=True)),
('roid', models.CharField(blank=True, max_length=100, null=True)),
('disclose_name', models.BooleanField(default=False)),
('disclose_company', models.BooleanField(default=False)),
('disclose_address', models.BooleanField(default=False)),
('disclose_telephone', models.BooleanField(default=False)),
('disclose_fax', models.BooleanField(default=False)),
('disclose_email', models.BooleanField(default=False)),
('status', models.CharField(max_length=200, null=True)),
('registry_id', models.CharField(max_length=200, unique=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('account_template', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.AccountDetail')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contacts', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ContactType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='DefaultAccountContact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mandatory', models.BooleanField(default=False)),
('account_template', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.AccountDetail')),
('contact_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.ContactType')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='default_account_contact', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DefaultAccountTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_template', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.AccountDetail')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='default_account', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DefaultContact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mandatory', models.BooleanField(default=False)),
('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.Contact')),
('contact_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.ContactType')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='default_contact', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DefaultRegistrant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='default_registrant', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Domain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='DomainContact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.NullBooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.Contact')),
('contact_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.ContactType')),
],
),
migrations.CreateModel(
name='DomainProvider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('slug', models.CharField(max_length=100, unique=True)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='DomainRegistrant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.NullBooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='RegisteredDomain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.NullBooleanField()),
('auto_renew', models.BooleanField(default=True)),
('registration_period', models.IntegerField()),
('authcode', models.CharField(max_length=100, null=True)),
('roid', models.CharField(max_length=100, null=True)),
('status', models.CharField(max_length=200, null=True)),
('anniversary', models.DateTimeField(null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('domain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.Domain')),
],
),
migrations.CreateModel(
name='Registrant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('registry_id', models.CharField(max_length=200, unique=True)),
('name', models.CharField(max_length=200, null=True)),
('email', models.CharField(max_length=200, null=True)),
('telephone', models.CharField(blank=True, max_length=200, null=True)),
('fax', models.CharField(blank=True, max_length=200, null=True)),
('company', models.CharField(blank=True, max_length=200, null=True)),
('house_number', models.CharField(blank=True, max_length=10, null=True)),
('street1', models.CharField(max_length=200, null=True)),
('street2', models.CharField(blank=True, max_length=200, null=True)),
('street3', models.CharField(blank=True, max_length=200, null=True)),
('city', models.CharField(max_length=200, null=True)),
('suburb', models.CharField(blank=True, max_length=200, null=True)),
('state', models.CharField(blank=True, max_length=200, null=True)),
('status', models.CharField(max_length=200, null=True)),
('postcode', models.CharField(max_length=20, null=True)),
('country', models.CharField(max_length=2, null=True)),
('postal_info_type', models.CharField(choices=[('int', 'international'), ('loc', 'local')], default='loc', max_length=3)),
('authcode', models.CharField(blank=True, max_length=100, null=True)),
('roid', models.CharField(blank=True, max_length=100, null=True)),
('disclose_name', models.BooleanField(default=False)),
('disclose_company', models.BooleanField(default=False)),
('disclose_address', models.BooleanField(default=False)),
('disclose_telephone', models.BooleanField(default=False)),
('disclose_fax', models.BooleanField(default=False)),
('disclose_email', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('account_template', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.AccountDetail')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='registrants', to=settings.AUTH_USER_MODEL)),
('provider', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.DomainProvider')),
],
),
migrations.CreateModel(
name='TopLevelDomain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zone', models.CharField(max_length=100, unique=True)),
('description', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='TopLevelDomainProvider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('anniversary_notification_period_days', models.IntegerField(default=30)),
('renewal_period', models.IntegerField(default=30)),
('grace_period_days', models.IntegerField(default=30)),
('provider', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.DomainProvider')),
('zone', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.TopLevelDomain')),
],
),
migrations.AddField(
model_name='registereddomain',
name='tld',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.TopLevelDomain'),
),
migrations.AddField(
model_name='registereddomain',
name='tld_provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.TopLevelDomainProvider'),
),
migrations.AddField(
model_name='domainregistrant',
name='registered_domain',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='registrant', to='domain_api.RegisteredDomain'),
),
migrations.AddField(
model_name='domainregistrant',
name='registrant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.Registrant'),
),
migrations.AddField(
model_name='domaincontact',
name='registered_domain',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contacts', to='domain_api.RegisteredDomain'),
),
migrations.AddField(
model_name='defaultregistrant',
name='registrant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.Registrant'),
),
migrations.AddField(
model_name='defaultcontact',
name='provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.DomainProvider'),
),
migrations.AddField(
model_name='defaultaccounttemplate',
name='provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.DomainProvider'),
),
migrations.AddField(
model_name='defaultaccountcontact',
name='provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.DomainProvider'),
),
migrations.AddField(
model_name='contact',
name='provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.DomainProvider'),
),
migrations.AlterUniqueTogether(
name='registereddomain',
unique_together=set([('domain', 'tld', 'active')]),
),
migrations.AlterUniqueTogether(
name='domainregistrant',
unique_together=set([('registered_domain', 'registrant', 'active')]),
),
migrations.AlterUniqueTogether(
name='domaincontact',
unique_together=set([('registered_domain', 'contact_type', 'contact', 'active')]),
),
migrations.AlterUniqueTogether(
name='defaultregistrant',
unique_together=set([('project_id', 'registrant')]),
),
migrations.AlterUniqueTogether(
name='defaultcontact',
unique_together=set([('project_id', 'contact_type', 'contact', 'provider')]),
),
migrations.AlterUniqueTogether(
name='defaultaccounttemplate',
unique_together=set([('project_id', 'provider', 'account_template')]),
),
migrations.AlterUniqueTogether(
name='defaultaccountcontact',
unique_together=set([('project_id', 'contact_type', 'account_template', 'provider', 'mandatory')]),
),
]
| 57.694006 | 164 | 0.604844 | 18,065 | 0.987752 | 0 | 0 | 0 | 0 | 0 | 0 | 3,444 | 0.18831 |
0fb114654c966c204091aeb485fbf2b16514cab7 | 582 | py | Python | src/ralph/networks/migrations/0010_auto_20170216_1230.py | DoNnMyTh/ralph | 97b91639fa68965ad3fd9d0d2652a6545a2a5b72 | [
"Apache-2.0"
] | 1,668 | 2015-01-01T12:51:20.000Z | 2022-03-29T09:05:35.000Z | src/ralph/networks/migrations/0010_auto_20170216_1230.py | hq-git/ralph | e2448caf02d6e5abfd81da2cff92aefe0a534883 | [
"Apache-2.0"
] | 2,314 | 2015-01-02T13:26:26.000Z | 2022-03-29T04:06:03.000Z | src/ralph/networks/migrations/0010_auto_20170216_1230.py | hq-git/ralph | e2448caf02d6e5abfd81da2cff92aefe0a534883 | [
"Apache-2.0"
] | 534 | 2015-01-05T12:40:28.000Z | 2022-03-29T21:10:12.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('networks', '0009_auto_20160823_0921'),
]
operations = [
migrations.AlterField(
model_name='network',
name='gateway',
field=models.ForeignKey(to='networks.IPAddress', blank=True, on_delete=django.db.models.deletion.SET_NULL, verbose_name='Gateway address', null=True, related_name='gateway_network'),
),
]
| 27.714286 | 194 | 0.671821 | 440 | 0.756014 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.223368 |
0fb48e1799c5a6fbf154e674d62f184dedc8e4ed | 4,072 | py | Python | tests/tasks/dbt/test_dbt.py | andykawabata/prefect | a11061c19847beeea26616ccaf4b404ad939676b | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-09-28T16:24:02.000Z | 2020-10-08T17:08:19.000Z | tests/tasks/dbt/test_dbt.py | andykawabata/prefect | a11061c19847beeea26616ccaf4b404ad939676b | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2021-06-28T20:33:49.000Z | 2022-02-27T10:58:04.000Z | tests/tasks/dbt/test_dbt.py | yalaudah/prefect | 2f7f92c39a4575119c3268b0415841c6aca5df60 | [
"Apache-2.0"
] | 1 | 2019-12-27T15:57:54.000Z | 2019-12-27T15:57:54.000Z | import os
import sys
import pytest
from prefect import Flow
from prefect.tasks.dbt import DbtShellTask
pytestmark = pytest.mark.skipif(
sys.platform == "win32", reason="DbtShellTask currently not supported on Windows"
)
def test_shell_result_from_stdout(tmpdir):
dbt_dir = tmpdir.mkdir("dbt")
task = DbtShellTask(
command="dbt --version",
profile_name="default",
environment="test",
dbt_kwargs={
"type": "snowflake",
"threads": 1,
"account": "JH72176.us-east-1",
"user": "jane@company.com",
"role": "analyst",
"database": "staging",
"warehouse": "data_science",
"schema": "analysis",
"private_key_path": "/src/private_key.p8",
"private_key_passphrase": "password123",
},
overwrite_profiles=True,
profiles_dir=str(dbt_dir),
)
out = task.run()
# default config should return a string
assert isinstance(out, str)
# check that the result is not empty
assert len(out) > 0
def test_shell_result_from_stdout_with_full_return(tmpdir):
dbt_dir = tmpdir.mkdir("dbt")
task = DbtShellTask(
return_all=True,
command="dbt --version",
profile_name="default",
environment="test",
dbt_kwargs={
"type": "snowflake",
"threads": 1,
"account": "JH72176.us-east-1",
"user": "jane@company.com",
"role": "analyst",
"database": "staging",
"warehouse": "data_science",
"schema": "analysis",
"private_key_path": "/src/private_key.p8",
"private_key_passphrase": "password123",
},
overwrite_profiles=True,
profiles_dir=str(dbt_dir),
)
out = task.run()
# when set to `return_all=True`, should return a list
assert isinstance(out, list)
# check that the result is multiple lines
assert len(out) > 1
def test_shell_creates_profiles_yml_file(tmpdir):
dbt_dir = tmpdir.mkdir("dbt")
with Flow(name="test") as f:
task = DbtShellTask(
profile_name="default",
environment="test",
dbt_kwargs={
"type": "snowflake",
"threads": 1,
"account": "JH72176.us-east-1",
"user": "jane@company.com",
"role": "analyst",
"database": "staging",
"warehouse": "data_science",
"schema": "analysis",
"private_key_path": "/src/private_key.p8",
"private_key_passphrase": "password123",
},
overwrite_profiles=True,
profiles_dir=str(dbt_dir),
)(command="ls")
out = f.run()
profiles_path = dbt_dir.join("profiles.yml")
assert out.is_successful()
assert os.path.exists(profiles_path)
def test_shell_uses_dbt_envar(tmpdir, monkeypatch):
dbt_project_path = tmpdir.mkdir("dbt_project")
monkeypatch.setenv("DBT_PROFILES_DIR", str(dbt_project_path))
real_profiles_path = dbt_project_path.join("profiles.yml")
open(real_profiles_path, "a").close()
with Flow(name="test") as f:
task = DbtShellTask(
profile_name="default",
environment="test",
dbt_kwargs={
"type": "snowflake",
"threads": 1,
"account": "JH72176.us-east-1",
"user": "jane@company.com",
"role": "analyst",
"database": "staging",
"warehouse": "data_science",
"schema": "analysis",
"private_key_path": "/src/private_key.p8",
"private_key_passphrase": "password123",
},
overwrite_profiles=False,
profiles_dir=str(tmpdir),
)(command="ls")
out = f.run()
missing_profiles_path = tmpdir.join("profiles.yml")
assert out.is_successful()
assert not os.path.exists(missing_profiles_path)
| 30.848485 | 85 | 0.561395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,350 | 0.331532 |
0fb6d81fdcc4a6f92216d10fda5840c9b15c50e3 | 295 | py | Python | tests/data/formulas/five/five-1.0.py | pmuller/ipkg | 9e3df803e0f16035a7552f63bbf54886244827d8 | [
"MIT"
] | 3 | 2016-04-20T21:24:59.000Z | 2021-11-17T10:58:49.000Z | tests/data/formulas/five/five-1.0.py | pmuller/ipkg | 9e3df803e0f16035a7552f63bbf54886244827d8 | [
"MIT"
] | null | null | null | tests/data/formulas/five/five-1.0.py | pmuller/ipkg | 9e3df803e0f16035a7552f63bbf54886244827d8 | [
"MIT"
] | null | null | null | from os.path import dirname
from ipkg.build import Formula, File
class five(Formula):
name = 'five'
version = '1.0'
sources = File(dirname(__file__) + '/../../sources/five-1.0.tar.gz')
platform = 'any'
dependencies = ('four > 1.0',)
def install(self):
pass
| 17.352941 | 72 | 0.6 | 226 | 0.766102 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.20339 |
0fb8484d9a2f11e82bc1b0d029f000c6539ffbe4 | 828 | py | Python | venv/Lib/site-packages/captcha/urls.py | bopopescu/diandian_online | 05eba122762c087623d42fad5352f6b67c73bcc5 | [
"BSD-3-Clause"
] | 3 | 2020-03-16T02:49:59.000Z | 2022-03-21T15:04:44.000Z | venv/Lib/site-packages/captcha/urls.py | bopopescu/diandian_online | 05eba122762c087623d42fad5352f6b67c73bcc5 | [
"BSD-3-Clause"
] | 1 | 2020-12-10T08:04:25.000Z | 2020-12-10T08:04:25.000Z | venv/Lib/site-packages/captcha/urls.py | bopopescu/diandian_online | 05eba122762c087623d42fad5352f6b67c73bcc5 | [
"BSD-3-Clause"
] | 2 | 2020-05-01T08:16:25.000Z | 2020-07-21T00:12:06.000Z | from django.conf.urls import url
from django.urls import path
from captcha import views
# urlpatterns = [
# url(r'image/(?P<key>\w+)/$', views.captcha_image, name='captcha-image', kwargs={'scale': 1}),
# url(r'image/(?P<key>\w+)@2/$', views.captcha_image, name='captcha-image-2x', kwargs={'scale': 2}),
# url(r'audio/(?P<key>\w+).wav$', views.captcha_audio, name='captcha-audio'),
# url(r'refresh/$', views.captcha_refresh, name='captcha-refresh'),
# ]
urlpatterns = [
path('image/<slug:key>/', views.captcha_image, name='captcha-image', kwargs={'scale': 1}),
path('image/<slug:key>@2/', views.captcha_image, name='captcha-image-2x', kwargs={'scale': 2}),
path('audio/<slug:key>.wav', views.captcha_audio, name='captcha-audio'),
path('refresh/', views.captcha_refresh, name='captcha-refresh'),
] | 51.75 | 104 | 0.660628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 526 | 0.635266 |
0fb9d85cce91d01ad8ccfcea025a3fb078866669 | 4,147 | py | Python | tacker/api/views/__init__.py | takahashi-tsc/tacker | a0ae01a13dcc51bb374060adcbb4fd484ab37156 | [
"Apache-2.0"
] | 116 | 2015-10-18T02:57:08.000Z | 2022-03-15T04:09:18.000Z | tacker/api/views/__init__.py | takahashi-tsc/tacker | a0ae01a13dcc51bb374060adcbb4fd484ab37156 | [
"Apache-2.0"
] | 6 | 2016-11-07T22:15:54.000Z | 2021-05-09T06:13:08.000Z | tacker/api/views/__init__.py | takahashi-tsc/tacker | a0ae01a13dcc51bb374060adcbb4fd484ab37156 | [
"Apache-2.0"
] | 166 | 2015-10-20T15:31:52.000Z | 2021-11-12T08:39:49.000Z | # Copyright (C) 2020 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker.api.common import attribute_filter
from tacker.common import exceptions as exception
class BaseViewBuilder(object):
@classmethod
def validate_filter(cls, filters=None):
if not filters:
return
return attribute_filter.parse_filter_rule(filters,
target=cls.FLATTEN_ATTRIBUTES)
@classmethod
def validate_attribute_fields(cls, all_fields=None, fields=None,
exclude_fields=None, exclude_default=None):
if all_fields and (fields or exclude_fields or exclude_default):
msg = ("Invalid query parameter combination: 'all_fields' "
"cannot be combined with 'fields' or 'exclude_fields' "
"or 'exclude_default'")
raise exception.ValidationError(msg)
if fields and (all_fields or exclude_fields):
msg = ("Invalid query parameter combination: 'fields' "
"cannot be combined with 'all_fields' or 'exclude_fields' ")
raise exception.ValidationError(msg)
if exclude_fields and (all_fields or fields or exclude_default):
msg = ("Invalid query parameter combination: 'exclude_fields' "
"cannot be combined with 'all_fields' or 'fields' "
"or 'exclude_default'")
raise exception.ValidationError(msg)
if exclude_default and (all_fields or exclude_fields):
msg = ("Invalid query parameter combination: 'exclude_default' "
"cannot be combined with 'all_fields' or 'exclude_fields' ")
raise exception.ValidationError(msg)
def _validate_complex_attributes(query_parameter, fields):
msg = ("Invalid query parameter '%(query_parameter)s'. "
"Value: %(field)s")
for field in fields:
if field in cls.COMPLEX_ATTRIBUTES:
continue
elif '*' in field:
# Field should never contain '*' as it's reserved for
# special purpose for handling key-value pairs.
raise exception.ValidationError(msg %
{"query_parameter": query_parameter,
"field": field})
elif field not in cls.FLATTEN_COMPLEX_ATTRIBUTES:
# Special case for field with key-value pairs.
# In this particular case, key will act as an attribute
# in structure so you need to treat it differently than
# other fields. All key-value pair field will be post-fix
# with '*' in FLATTEN_COMPLEX_ATTRIBUTES. Request
# with field which contains '*' will be treated as an
# error.
special_field = False
for attribute in cls.FLATTEN_COMPLEX_ATTRIBUTES:
if '*' in attribute and field.startswith(
attribute.split('*')[0]):
special_field = True
if not special_field:
raise exception.ValidationError(msg %
{"query_parameter": query_parameter,
"field": field})
if fields:
_validate_complex_attributes("fields", fields.split(','))
elif exclude_fields:
_validate_complex_attributes("exclude_fields",
exclude_fields.split(","))
| 44.591398 | 79 | 0.594888 | 3,418 | 0.82421 | 0 | 0 | 3,376 | 0.814082 | 0 | 0 | 1,671 | 0.402942 |
0fba0a7eb708f5b3bddbe1370f87cfb260a40450 | 428 | py | Python | users/migrations/0002_auto_20190529_1832.py | VladaDidko/skill- | 861c08376e2bc9b9a5a44e3a8560324ee53ce2d0 | [
"Unlicense"
] | null | null | null | users/migrations/0002_auto_20190529_1832.py | VladaDidko/skill- | 861c08376e2bc9b9a5a44e3a8560324ee53ce2d0 | [
"Unlicense"
] | 18 | 2019-05-28T17:20:34.000Z | 2022-03-11T23:50:12.000Z | users/migrations/0002_auto_20190529_1832.py | VladaDidko/skill- | 861c08376e2bc9b9a5a44e3a8560324ee53ce2d0 | [
"Unlicense"
] | 3 | 2019-05-27T09:51:54.000Z | 2019-12-12T20:35:29.000Z | # Generated by Django 2.2.1 on 2019-05-29 15:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='gender',
field=models.CharField(choices=[('M', 'Male'), ('F', 'Female')], default='Ч', max_length=1),
),
]
| 22.526316 | 104 | 0.57243 | 336 | 0.783217 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.254079 |
0fbabd4ea1caf69d29462adc61c987bfc0ab93d1 | 4,343 | py | Python | SC001_2021.Jan-Feb/Assignment_3/hangman.py | pcdd05/stanCode_python | 9fa6d3abd7a1681a7a254e2dcbf3093eae25476f | [
"MIT"
] | null | null | null | SC001_2021.Jan-Feb/Assignment_3/hangman.py | pcdd05/stanCode_python | 9fa6d3abd7a1681a7a254e2dcbf3093eae25476f | [
"MIT"
] | null | null | null | SC001_2021.Jan-Feb/Assignment_3/hangman.py | pcdd05/stanCode_python | 9fa6d3abd7a1681a7a254e2dcbf3093eae25476f | [
"MIT"
] | null | null | null | """
File: hangman.py
Name: DiCheng
-----------------------------
This program plays hangman game.
Users sees a dashed word, trying to
correctly figure the un-dashed word out
by inputting one character each round.
If the user input is correct, show the
updated word on console. Players have N_TURNS
chances to try and win this game.
"""
import random
# This constant controls the number of guess the player has.
N_TURNS = 7
def main():
"""
With given number of guess left and hint of guessing, checking answer with new input alphabet every time to see
if its good guess or not. Try avoid losing all your life point before finding what's the hidden word, otherwise
you'll be hung.
"""
life_point = N_TURNS
ans = random_word()
current_guess = default_current_guess(len(ans))
while life_point != 0:
input_guess = get_input_guess(current_guess, life_point)
current_guess = match_ans(input_guess, current_guess, ans)
life_point = check_guess(life_point, input_guess, current_guess)
if current_guess.find("-") == -1:
print("You win!!")
break
if life_point == 0:
print("You are completely hung : (")
print("The word was: " + ans)
def default_current_guess(length):
"""
Get the default guess hint by the length of answer.
:param length: the length of random answer.
:return: str, the default guess hint, which is all blind by "-".
"""
current_guess = ""
for i in range(length):
current_guess += "-"
return current_guess
def get_input_guess(current_guess, life_point):
"""
Show the hint of guess and the number of guess left first, then get the new guess input.
:param current_guess: str, the hint of current guess.
:param life_point: int, the number of guess left.
:return: str, the user guess alphabet in legal format.
"""
print("The word looks like: " + current_guess)
print("You have " + str(life_point) + " guesses left.")
return check_format(input("Your guess: "))
def check_format(input_guess):
"""
check the format in new guess input and return in upper case.
:param input_guess: str, the given alphabet of new guess.
:return: str, the guess in legal format and upper case.
"""
while True:
if input_guess.isalpha() and len(input_guess) == 1:
input_guess = input_guess.upper()
return input_guess
input_guess = input("illegal format.\nYour guess: ")
def match_ans(input_guess, current_guess, ans):
"""
Matching new guess alphabet to each answer char if equals, and return the replaced hint of latest guess.
:param input_guess: str, the new guess alphabet.
:param current_guess: str, the hint of current guess.
:param ans: str, the hidden random word.
:return: str, after matching, return the replaced hint of latest guess.
"""
result = ""
for i in range(len(ans)):
if input_guess == ans[i]:
result += ans[i]
else:
result += current_guess[i]
return result
def check_guess(life_point, input_guess, current_guess):
"""
Checking the new guess alphabet can be found in latest guess or not, and reduce 1 point if its not.
:param life_point: int, the current number of guess left.
:param input_guess: str, the new guess alphabet input.
:param current_guess: str, the replaced hint of latest guess.
:return: the latest number of guess left.
"""
if current_guess.find(input_guess) != -1:
print("You are correct!")
return life_point
print("There is no " + input_guess + "'s in the word.")
return life_point - 1
def random_word():
"""
get random word as hidden answer.
:return: str, random word
"""
num = random.choice(range(9))
if num == 0:
return "NOTORIOUS"
elif num == 1:
return "GLAMOROUS"
elif num == 2:
return "CAUTIOUS"
elif num == 3:
return "DEMOCRACY"
elif num == 4:
return "BOYCOTT"
elif num == 5:
return "ENTHUSIASTIC"
elif num == 6:
return "HOSPITALITY"
elif num == 7:
return "BUNDLE"
elif num == 8:
return "REFUND"
##### DO NOT EDIT THE CODE BELOW THIS LINE #####
if __name__ == '__main__':
main()
| 30.801418 | 115 | 0.643564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,460 | 0.566429 |
0fbade66d2854d66ac77d55cf0a93e6da49f2f41 | 9,532 | py | Python | CyberPi/Python with CyberPi 064(面向对象 精灵游戏).py | SCSZCC/PythonWithHardware | 3e5ae890cb7a8e5e2c5a636092aca9ce21728ab2 | [
"MIT"
] | 2 | 2020-08-15T02:49:19.000Z | 2020-08-15T02:49:31.000Z | CyberPi/Python with CyberPi 064(面向对象 精灵游戏).py | SCSZCC/PythonWithHardware | 3e5ae890cb7a8e5e2c5a636092aca9ce21728ab2 | [
"MIT"
] | null | null | null | CyberPi/Python with CyberPi 064(面向对象 精灵游戏).py | SCSZCC/PythonWithHardware | 3e5ae890cb7a8e5e2c5a636092aca9ce21728ab2 | [
"MIT"
] | 1 | 2022-02-24T05:30:30.000Z | 2022-02-24T05:30:30.000Z | """"
名称:064 童芯派 面向对象 体感小飞机
硬件: 童芯派
功能介绍:
使用童芯派的精灵功能在屏幕上编写了一个基于陀螺仪控制的体感小游戏,控制角色飞机躲避空中的子弹。
难度:⭐⭐⭐
支持的模式:上传模式
无
"""
# ---------程序分割线----------------程序分割线----------------程序分割线----------
import cyberpi
import random
import math
import time
class Enemy:
def __init__(self):
self.x = random.randint(0, 128)
self.y = -1
self.pix = cyberpi.sprite()
self.pix.draw_pixels(
[0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0xf5a623, 0xf5a623, 0xf5a623, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0xf5a623,
0xf5a623, 0xf5a623, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0xf5a623, 0xf5a623, 0xf5a623, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000])
self.pix.set_align("center")
self.pix.set_brush(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
self.pix.move_to(self.x, self.y)
self.speed = random.randint(2, 6)
def set_speed_up(self):
self.speed += 1
if self.speed >= 30:
self.speed = 30
def set_speed_down(self):
self.speed -= 1
if self.speed <= 2:
self.speed = 2
def speed_re(self):
self.speed = random.randint(3, 8)
def start(self):
self.pix.show()
self.pix.set_brush(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
self.x = random.randint(2, 125)
self.y = -1
self.pix.move_to(self.x, self.y)
def move(self):
if self.pix.get_y() > 128:
self.start()
self.pix.move_y(self.speed)
class Player:
def __init__(self):
cyberpi.led.on('b')
self.point = 0
self.player = cyberpi.sprite()
self.player.draw_pixels(
[0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff,
0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x1eaaff, 0x1eaaff,
0x1eaaff, 0x1eaaff, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000,
0x000000, 0x000000, 0x000000, 0x000000, 0x000000, 0x000000])
self.player.move_to(64, 64)
def reset(self):
self.point = 0
cyberpi.led.on('b')
self.player.move_to(64, 64)
def collide_detect(self):
self.player_x = 64 - -cyberpi.get_roll()
self.player_y = 64 - -cyberpi.get_pitch()
if self.player_x < 0:
self.player_x = 5
if self.player_x > 128:
self.player_x = 123
if self.player_y < 0:
self.player_y = 5
if self.player_y > 128:
self.player_y = 123
self.player.move_to(self.player_x, self.player_y)
def control(self, *para):
self.collide_detect()
for item in para:
if self.player.is_touch(item.pix):
item.pix.hide()
self.point += 1
print(self.point)
num = math.floor(self.point / 4)
cyberpi.led.off(id=6 - num)
if self.point >= 20:
cyberpi.audio.play('prompt-tone')
return False
first = Player()
a = Enemy()
b = Enemy()
c = Enemy()
d = Enemy()
e = Enemy()
f = Enemy()
g = Enemy()
h = Enemy()
enemy_list = [a, b, c, e, f, g, h]
cyberpi.console.println("体感小飞机 上键加速 下键减速 灯光为血条")
cyberpi.console.println("A键重新开始")
cyberpi.console.println("B键开始游戏")
time.sleep(5)
while True:
first.control(*enemy_list)
cyberpi.screen.render()
if cyberpi.controller.is_press("B"):
cyberpi.display.clear()
break
cyberpi.screen.render()
time.sleep(1)
while True:
if first.control(*enemy_list) is False:
cyberpi.led.on('r')
cyberpi.display.label("GameOver", 24, 'center')
cyberpi.audio.play('prompt-tone')
while True:
if cyberpi.controller.is_press('A'):
first.reset()
cyberpi.screen.render()
time.sleep(2)
for i in enemy_list:
i.start()
i.speed_re()
cyberpi.screen.render()
break
for i in enemy_list:
i.move()
if cyberpi.controller.is_press('up'):
for i in enemy_list:
i.set_speed_up()
if cyberpi.controller.is_press('down'):
for i in enemy_list:
i.set_speed_down()
cyberpi.screen.render() | 49.645833 | 112 | 0.644251 | 8,019 | 0.818432 | 0 | 0 | 0 | 0 | 0 | 0 | 564 | 0.057563 |
0fbb3aad96ce58448158e08b16e3b6dc036a20e3 | 5,877 | pyw | Python | venv/Lib/site-packages/PyQt4/examples/designer/plugins/widgets/polygonwidget.pyw | prateekfxtd/ns_Startup | 095a62b3a8c7bf0ff7b767355d57d993bbd2423d | [
"MIT"
] | 1 | 2022-03-16T02:10:30.000Z | 2022-03-16T02:10:30.000Z | venv/Lib/site-packages/PyQt4/examples/designer/plugins/widgets/polygonwidget.pyw | prateekfxtd/ns_Startup | 095a62b3a8c7bf0ff7b767355d57d993bbd2423d | [
"MIT"
] | null | null | null | venv/Lib/site-packages/PyQt4/examples/designer/plugins/widgets/polygonwidget.pyw | prateekfxtd/ns_Startup | 095a62b3a8c7bf0ff7b767355d57d993bbd2423d | [
"MIT"
] | 2 | 2019-05-28T11:58:59.000Z | 2020-09-23T17:21:19.000Z | #!/usr/bin/env python
"""
polygonwidget.py
A PyQt custom widget example for Qt Designer.
Copyright (C) 2006 David Boddie <david@boddie.org.uk>
Copyright (C) 2005-2006 Trolltech ASA. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import math
from PyQt4 import QtCore, QtGui
class PolygonWidget(QtGui.QWidget):
"""PolygonWidget(QtGui.QWidget)
Provides a custom widget to display a polygon with properties and slots
that can be used to customize its appearance.
"""
def __init__(self, parent=None):
super(PolygonWidget, self).__init__(parent)
self._sides = 5
self._innerRadius = 20
self._outerRadius = 50
self._angle = 0
self.createPath()
self._innerColor = QtGui.QColor(255, 255, 128)
self._outerColor = QtGui.QColor(255, 0, 128)
self.createGradient()
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.begin(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setBrush(QtGui.QBrush(QtGui.QColor(192, 192, 255)))
painter.drawRect(event.rect())
painter.translate(self.width()/2.0, self.height()/2.0)
painter.rotate(self._angle)
painter.setBrush(QtGui.QBrush(self.gradient))
painter.drawPath(self.path)
painter.end()
def sizeHint(self):
return QtCore.QSize(2*self._outerRadius + 20, 2*self._outerRadius + 20)
def createPath(self):
self.path = QtGui.QPainterPath()
angle = 2*math.pi/self._sides
self.path.moveTo(self._outerRadius, 0)
for step in range(1, self._sides + 1):
self.path.lineTo(
self._innerRadius * math.cos((step - 0.5) * angle),
self._innerRadius * math.sin((step - 0.5) * angle)
)
self.path.lineTo(
self._outerRadius * math.cos(step * angle),
self._outerRadius * math.sin(step * angle)
)
self.path.closeSubpath()
def createGradient(self):
center = QtCore.QPointF(0, 0)
self.gradient = QtGui.QRadialGradient(center, self._outerRadius, center)
self.gradient.setColorAt(0.5, QtGui.QColor(self._innerColor))
self.gradient.setColorAt(1.0, QtGui.QColor(self._outerColor))
# The angle property is implemented using the getAngle() and setAngle()
# methods.
def getAngle(self):
return self._angle
# The setAngle() setter method is also a slot.
@QtCore.pyqtSlot(int)
def setAngle(self, angle):
self._angle = min(max(0, angle), 360)
self.update()
angle = QtCore.pyqtProperty(int, getAngle, setAngle)
# The innerRadius property is implemented using the getInnerRadius() and
# setInnerRadius() methods.
def getInnerRadius(self):
return self._innerRadius
# The setInnerRadius() setter method is also a slot.
@QtCore.pyqtSlot(int)
def setInnerRadius(self, radius):
self._innerRadius = radius
self.createPath()
self.createGradient()
self.update()
innerRadius = QtCore.pyqtProperty(int, getInnerRadius, setInnerRadius)
# The outerRadius property is implemented using the getOuterRadius() and
# setOuterRadius() methods.
def getOuterRadius(self):
return self._outerRadius
# The setOuterRadius() setter method is also a slot.
@QtCore.pyqtSlot(int)
def setOuterRadius(self, radius):
self._outerRadius = radius
self.createPath()
self.createGradient()
self.update()
outerRadius = QtCore.pyqtProperty(int, getOuterRadius, setOuterRadius)
# The numberOfSides property is implemented using the getNumberOfSides()
# and setNumberOfSides() methods.
def getNumberOfSides(self):
return self._sides
# The setNumberOfSides() setter method is also a slot.
@QtCore.pyqtSlot(int)
def setNumberOfSides(self, sides):
self._sides = max(3, sides)
self.createPath()
self.update()
numberOfSides = QtCore.pyqtProperty(int, getNumberOfSides, setNumberOfSides)
# The innerColor property is implemented using the getInnerColor() and
# setInnerColor() methods.
def getInnerColor(self):
return self._innerColor
def setInnerColor(self, color):
self._innerColor = max(3, color)
self.createGradient()
self.update()
innerColor = QtCore.pyqtProperty(QtGui.QColor, getInnerColor, setInnerColor)
# The outerColor property is implemented using the getOuterColor() and
# setOuterColor() methods.
def getOuterColor(self):
return self._outerColor
def setOuterColor(self, color):
self._outerColor = color
self.createGradient()
self.update()
outerColor = QtCore.pyqtProperty(QtGui.QColor, getOuterColor, setOuterColor)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
window = PolygonWidget()
window.show()
sys.exit(app.exec_())
| 30.769634 | 80 | 0.650672 | 4,783 | 0.813851 | 0 | 0 | 608 | 0.103454 | 0 | 0 | 1,845 | 0.313936 |
0fbc19796e7acda6f3394479071dfd67a7de3e33 | 4,864 | py | Python | wflow-water-index/wflow-water-index.py | Fondus/Fondus-Python-SDK | ce6cfaffe5d978ac728f35b1b4bfcf49f7519576 | [
"Apache-2.0"
] | null | null | null | wflow-water-index/wflow-water-index.py | Fondus/Fondus-Python-SDK | ce6cfaffe5d978ac728f35b1b4bfcf49f7519576 | [
"Apache-2.0"
] | null | null | null | wflow-water-index/wflow-water-index.py | Fondus/Fondus-Python-SDK | ce6cfaffe5d978ac728f35b1b4bfcf49f7519576 | [
"Apache-2.0"
] | 1 | 2021-08-05T06:28:36.000Z | 2021-08-05T06:28:36.000Z | import argparse
import datetime
import math
from netCDF4 import Dataset
import numpy as np
import threading
import queue
# Worker 類別,負責處理資料
class Worker(threading.Thread):
def __init__(self, req, res, num):
threading.Thread.__init__(self)
self.req = req
self.res = res
self.num = num
self.running = True
def run(self):
while self.running or not self.req.empty():
try:
data = self.req.get(timeout=1)
x, y, cx, cy, array = data
res.put((x, y, np.max(array), np.count_nonzero(array > 0.01)))
req.task_done()
# print("Worker %d: %f,%f" % (self.num, x, y))
except queue.Empty:
print("Worker %d: Nothing to do, wait 1 second" % (self.num))
def stop(self):
self.running = False
def get_range(v, low, high):
dim = v.get_dims()[0].size
d = (v[1]-v[0])/2
i0 = None
i1 = dim
for i in range(0, dim):
if i0 is None and v[i]+d >= low:
i0 = i
if v[i]-d <= high:
i1 = i
if not i0:
i0 = 0
return [i0, i1]
def get_skip_index(v_time):
dim = v_time.get_dims()[0].size
n0 = v_time[0]
for i in range(1, dim):
if (v_time[i] - n0) >= 1440:
return i
return 0
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="source NC filename")
parser.add_argument("--output", help="output XYM ascii filename")
parser.add_argument(
"--bbox", help="bounding box to filter grids, format as x1,y1,x2,y2")
parser.add_argument(
"--skip", help="skip data of first N steps when model is warming up, default = 24 hour")
args = parser.parse_args()
input_filename = args.input if args.input else "wflow.nc"
print("Input file:", input_filename)
output_filename = args.output if args.output else "flow.asc"
print("Output file:", output_filename)
bbox = list(map(lambda s: float(s), args.bbox.split(','))
) if args.bbox else [-180.0, -90.0, 180, 90.0]
print("Bounding box:", bbox)
wflow = Dataset(input_filename, "r")
print("NETCDF metadata")
# print(wflow.dimensions)
# print(wflow.variables)
v_time = wflow.variables['time']
# print(v_time[:])
skip = args.skip if args.skip else get_skip_index(v_time)
print("Will skip first", skip, "steps")
v_lon = wflow.variables['x']
v_lat = wflow.variables['y']
dx = (v_lon[1]-v_lon[0])/2
dy = (v_lat[1]-v_lat[0])/2
lon_range = get_range(v_lon, bbox[0], bbox[2])
lat_range = get_range(v_lat, bbox[1], bbox[3])
v_data = wflow.variables['flow_simulated']
print("Loading data...")
# cache all data in memory
time = v_time[:]
lon = v_lon[:]
lat = v_lat[:]
data = v_data[:, lat_range[0]:lat_range[1], lon_range[0]:lon_range[1]]
print("Start processing...")
dim_t = wflow.dimensions['time'].size
tt = dim_t - skip
max_v = np.empty([lon.size, lat.size])
count_0_01_v = np.empty([lon.size, lat.size])
progress = dict(processed=0, queued=0, total_grid=(
lon_range[1]-lon_range[0])*(lat_range[1]-lat_range[0]))
# prepare for multithread
req = queue.Queue()
res = queue.Queue()
workers = [Worker(req, res, 1), Worker(req, res, 2)]
for i in range(0, len(workers)):
workers[i].start()
def processing_res():
while workers[0].is_alive() or workers[1].is_alive() or not res.empty():
try:
result = res.get(timeout=1)
(x, y, m, c) = result
max_v[x, y] = m
count_0_01_v[x, y] = 1.0 * c / tt
progress["processed"] += 1
res.task_done()
except queue.Empty:
print("Response worker: Nothing to do, wait 1 second")
if (progress["processed"] % 1000 == 0):
print("processed:", progress)
print("processed:", progress)
threading.Thread(target=processing_res, daemon=True).start()
for x in range(lon_range[0], lon_range[1]):
cx = lon[x]
print(cx, "queued:", progress)
for y in range(lat_range[0], lat_range[1]):
cy = lat[y]
id = 'x'+f'{cx:.4f}'+'y'+f'{cy:.4f}'
vt = np.array(data[skip:dim_t, y, x])
req.put((x, y, cx, cy, vt))
progress["queued"] += 1
# max_v[x,y]=np.max(vt)
# count_0_1_v=np.count_nonzero(vt>0.01)
wflow.close()
for i in range(0, len(workers)):
workers[i].stop()
for i in range(0, len(workers)):
workers[i].join()
res.join()
print("grid with flow value: ",np.count_nonzero(max_v > 0))
print("count with flow value: ",np.count_nonzero(count_0_01_v > 0))
with open(output_filename, 'w') as out_file:
for x in range(lon_range[0], lon_range[1]):
cx = lon[x]
for y in range(lat_range[0], lat_range[1]):
cy = lat[y]
m = max_v[x, y]
c = count_0_01_v[x, y]
if m > 0 or c > 0:
out_file.write("%f,%f,%f,%f\n" % (cx, cy, m, c))
| 27.480226 | 92 | 0.595189 | 720 | 0.147481 | 0 | 0 | 0 | 0 | 0 | 0 | 910 | 0.186399 |
0fbc4534160152a1c4aa5730f03972b8642ee17e | 8,532 | py | Python | plugins/input_fsx/datachannel.py | tunstek/hexi | ebb00e4e47ac90d96a26179a5786d768d95c4bd5 | [
"MIT"
] | 14 | 2017-10-07T23:19:09.000Z | 2021-10-08T12:13:59.000Z | plugins/input_fsx/datachannel.py | tunstek/hexi | ebb00e4e47ac90d96a26179a5786d768d95c4bd5 | [
"MIT"
] | 1 | 2018-07-16T17:03:43.000Z | 2018-07-16T17:03:43.000Z | plugins/input_fsx/datachannel.py | tunstek/hexi | ebb00e4e47ac90d96a26179a5786d768d95c4bd5 | [
"MIT"
] | 6 | 2018-05-18T14:25:26.000Z | 2021-03-28T12:37:21.000Z | import time
import asyncio
import random
import pyee
import logging
from plugins.input_fsx import fsx_pb2
from hexi.service import event
_logger = logging.getLogger(__name__)
class UDPServer(asyncio.DatagramProtocol):
def __init__(self, manager, token):
super().__init__()
self.manager = manager
self.token = token
self.sn = 0
def datagram_received(self, data, addr):
try:
# Note: there are no length prefix in UDP packets
msg = fsx_pb2.UdpResponseMessage()
msg.ParseFromString(data)
if msg.token != self.token:
_logger.warn('A message is discarded because of incorrect token')
self.manager.ee.emit('udp_discarded_message')
return
if msg.serialNumber <= self.sn:
_logger.warn('A message is discarded because of received newer message')
self.manager.ee.emit('udp_discarded_message')
return
self.sn = msg.serialNumber
self.manager.ee.emit('udp_received_message', msg)
except Exception as e:
_logger.warn(e)
self.manager.ee.emit('udp_discarded_message')
def connection_lost(self, exc):
self.manager.ee.emit('udp_closed')
class TCPClientManager(object):
def __init__(self, channel, host, port, retry_sec=2):
self.channel = channel
self.host = host
self.port = port
self.retry_sec = retry_sec
self.work_future = None
self.heartbeat_future = None
self.connect_future = None
self.reconnect_future = None
self.reader = None
self.writer = None
self.state = 'idle'
self.ee = channel.ee
async def connect_async(self):
while True and (self.state in ['connecting', 'reconnecting']):
try:
future = asyncio.open_connection(self.host, self.port)
reader, writer = await asyncio.wait_for(future, timeout=3)
_logger.info('Telemetry connected')
self.reader = reader
self.writer = writer
self.state = 'connected'
self.work_future = asyncio.ensure_future(self.work_async())
self.work_future.add_done_callback(self.on_work_done)
self.heartbeat_future = asyncio.ensure_future(self.heartbeat_async())
self.heartbeat_future.add_done_callback(self.on_heartbeat_done)
self.ee.emit('tcp_connected')
break
except (OSError, asyncio.TimeoutError):
#print('Server not connected, retry in {0} seconds'.format(self.retry_sec))
await asyncio.sleep(self.retry_sec)
def connect(self):
assert(self.state in ['idle', 'disconnected'])
assert(self.connect_future == None)
self.state = 'connecting'
self.connect_future = asyncio.ensure_future(self.connect_async())
self.connect_future.add_done_callback(self.on_connect_done)
return self.connect_future
def on_connect_done(self, future):
self.connect_future = None
async def heartbeat_async(self):
while True:
await asyncio.sleep(10)
msg = fsx_pb2.TcpRequestMessage()
msg.msgType = fsx_pb2.TcpRequestMessage.MSG_TYPE_PING
msg.pingBody.timeStamp = int(time.time())
self.write_message(msg)
def on_heartbeat_done(self, future):
self.heartbeat_future = None
async def work_async(self):
try:
while True:
size_buffer = await self.reader.readexactly(4)
size = int.from_bytes(size_buffer, byteorder='little')
body_buffer = await self.reader.readexactly(size)
msg = fsx_pb2.TcpResponseMessage()
msg.ParseFromString(body_buffer)
self.ee.emit('tcp_received_message', msg)
except (asyncio.IncompleteReadError, ConnectionResetError, ConnectionAbortedError):
pass
def on_work_done(self, future):
_logger.info('Telemetry connection lost')
self.work_future = None
if self.heartbeat_future != None:
self.heartbeat_future.cancel()
self.reader = None
self.writer = None
if self.state != 'disconnected':
self.reconnect()
async def reconnect_async(self):
await self.connect_async()
def reconnect(self):
assert(self.state == 'connected')
assert(self.reconnect_future == None)
_logger.info('Telemetry reconnecting')
self.state = 'reconnecting'
self.reconnect_future = asyncio.ensure_future(self.reconnect_async())
self.reconnect_future.add_done_callback(self.on_reconnect_done)
return self.reconnect_future
def on_reconnect_done(self, f):
self.reconnect_future = None
def disconnect(self):
assert(self.state in ['connecting', 'connected', 'reconnecting'])
self.state = 'disconnected'
if self.connect_future != None:
self.connect_future.cancel()
if self.reconnect_future != None:
self.reconnect_future.cancel()
if self.work_future != None:
self.work_future.cancel()
if self.heartbeat_future != None:
self.heartbeat_future.cancel()
if self.writer != None:
self.writer.close()
def write_message(self, msg):
data = msg.SerializeToString()
data = len(data).to_bytes(4, byteorder = 'little') + data
self.writer.write(data)
class UDPServerManager(object):
def __init__(self, channel, token, host, port):
self.channel = channel
self.token = token
self.host = host
self.port = port
self.transport = None
self.protocol = None
self.state = 'idle'
self.ee = channel.ee
def protocol_factory(self):
return UDPServer(self, self.token)
async def create_endpoint_async(self):
assert(self.state in ['idle', 'closed'])
self.state = 'opening'
loop = asyncio.get_event_loop()
transport, protocol = await loop.create_datagram_endpoint(
self.protocol_factory, local_addr=(self.host, self.port))
self.transport = transport
self.protocol = protocol
self.state = 'opened'
_logger.info('Telemetry receiver listening at {0}:{1}'.format(self.host, self.port))
def close(self):
assert(self.state in ['opening', 'opened'])
_logger.info('Telemetry receiver is closing')
self.state = 'closed'
if self.transport != None:
self.transport.close()
self.transport == None
self.protocol == None
class DataChannel(object):
def __init__(self, udp_port, tcp_host, tcp_port):
self.ee = pyee.EventEmitter()
self.udp_token = random.randint(0, 0x6FFFFFFF)
self.udp_port = udp_port
self.tcp = TCPClientManager(self, tcp_host, tcp_port)
self.udp = UDPServerManager(self, self.udp_token, '0.0.0.0', udp_port)
self.udp_receive_counter = 0
self.udp_discard_counter = 0
self.ee.on('tcp_connected', self.on_tcp_connected)
self.ee.on('tcp_received_message', self.on_tcp_received_message)
self.ee.on('udp_received_message', self.on_udp_received_message)
self.ee.on('udp_discarded_message', self.on_udp_discarded_message)
async def udp_analytics_async(self):
last_receive = 0
last_discard = 0
while True:
await asyncio.sleep(1)
delta_receive = self.udp_receive_counter - last_receive
delta_discard = self.udp_discard_counter - last_discard
last_receive = self.udp_receive_counter
last_discard = self.udp_discard_counter
self.ee.emit('udp_analytics_tick', {
'receive_all': last_receive,
'discard_all': last_discard,
'receive_tick': delta_receive,
'discard_tick': delta_discard})
def on_udp_analytics_done(self, future):
self.udp_analytics_future = None
async def start_async(self):
_logger.info('Starting telemetry channel')
self.udp_analytics_future = asyncio.ensure_future(self.udp_analytics_async())
self.udp_analytics_future.add_done_callback(self.on_udp_analytics_done)
await self.udp.create_endpoint_async()
await self.tcp.connect()
_logger.info('Telemetry channel started')
def stop(self):
_logger.info('Stopping telemetry channel')
if self.udp_analytics_future != None:
self.udp_analytics_future.cancel()
self.tcp.disconnect()
self.udp.close()
def on_tcp_connected(self):
self.udp.protocol.sn = 0
msg = fsx_pb2.TcpRequestMessage()
msg.msgType = fsx_pb2.TcpRequestMessage.MSG_TYPE_SET_CONFIG
msg.setConfigBody.udpPort = self.udp_port
msg.setConfigBody.udpToken = self.udp_token
self.tcp.write_message(msg)
def on_tcp_received_message(self, msg):
if msg.success != True:
_logger.error('Telemetry command failed')
def on_udp_received_message(self, msg):
self.udp_receive_counter = self.udp_receive_counter + 1
def on_udp_discarded_message(self):
self.udp_discard_counter = self.udp_discard_counter + 1
| 33.328125 | 88 | 0.703821 | 8,343 | 0.977848 | 0 | 0 | 0 | 0 | 2,995 | 0.351031 | 1,034 | 0.121191 |
0fbc5d08eb45c80c3b2464b89870b7a0e0d89550 | 4,722 | py | Python | shell/my_shell.py | utep-cs-systems-courses/os-shell-joshuaramoscs | 96b8bef864d097be693ec021783c9dcd4e08bb1d | [
"BSD-3-Clause"
] | null | null | null | shell/my_shell.py | utep-cs-systems-courses/os-shell-joshuaramoscs | 96b8bef864d097be693ec021783c9dcd4e08bb1d | [
"BSD-3-Clause"
] | null | null | null | shell/my_shell.py | utep-cs-systems-courses/os-shell-joshuaramoscs | 96b8bef864d097be693ec021783c9dcd4e08bb1d | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python3
import os, sys, time, re
from my_readLine import my_readLine
# Set prompt to '$' if not set
def set_prompt():
# if not os.environ["PS1"]:
os.environ["PS1"] = '$'
# Change directory
def my_cd(args):
if len(args) == 1 or len(args) > 2: # If just cd or multiple args, then print invalid
os.write(2, "Invalid argument for command cd\n".encode())
else: # Try changing dir if available
try:
os.chdir(args[1])
except:
os.write(2, ("\"%s\" is not an available directory\n" % args[1]).encode())
# Determine input redirection and piping
def process_data(args):
pipeFlag = False
rc = os.fork() # create a new process to handle input
if rc < 0: # Failed forking, terminate with error 1
os.write(2, ("fork failed, returning %d\n" % rc).encode())
sys.exit(1)
elif rc == 0: # Child
if '>' in args: # If '>', redirect output
redirect_out(args)
if '<' in args: # If '<', redirect input
redirect_in(args)
if '|' in args: # If '|', pipe command
left = args[0:args.index("|")]
right = args[args.index("|")+1:]
pr, pw = os.pipe()
os.close(1) # close display fd
os.dup(pw) # duplicate pw
os.set_inheritable(1, True)
os.close(pr) # close pr
os.close(pw) # close pw
pipeFlag = True # Used to make sure parent closes pipe
execute_cmd(left)
os.write(2, ("Could not execute :(").encode())
sys.exit(1)
else:
execute_cmd(args) # execute command (args)
else: # Parent (forked ok)
if pipeFlag:
os.close(0) # close keyboard fd
os.dup(pr) # duplicate pr
os.set_inheritable(0, True)
os.close(pr) # close pr
os.close(pw) # close pw
pipeFlag = False
execute_cmd(right)
os.write(2, ("Could not execute :("). encode())
sys.exit(1)
# Redirects fd 1 to file
def redirect_out(args):
if ('|' in args) and (args.index('>') < args.index('|')): # check for a > f | b
os.write(2, "Output redirect, '>', can only be for the last subcommand of a pipe".encode())
sys.exit(3)
os.close(1) # Close display fd and replace it with file
os.open(args[args.index('>')+1], os.O_CREAT | os.O_WRONLY);
os.set_inheritable(1, True)
args.remove(args[args.index('>')+1]) # Remove '>' from argument
args.remove('>')
# Redirects fd 0 to file
def redirect_in(args):
if '|' in args and args.index('>') < args.index('|'): # check for a | b < f
os.write(1, "Input redirect, '<', can only be for the first subcommand of a pipe".encode())
sys.exit(4)
os.close(0) # Close keyboard fd and replace it with file
os.open(args[args.index('<')+1], os.O_RDONLY);
os.set_inheritable(0, True)
args.remove(args[args.index('<') + 1]) # Remove '<' from argument
args.remove('<')
# Executes given command if available
def execute_cmd(args): # Execute command
for dir in re.split(":", os.environ['PATH']): # Try each directory in the path
program = "%s/%s" % (dir, args[0])
try:
os.execve(program, args, os.environ) # Try to exec program
except FileNotFoundError: # ...expected
pass # ...fail quietly
os.write(2, ("\"%s\" is not an available command in your system\n" % args[0]).encode())
sys.exit(2) # If not a cmd, terminate with error 2
# MAIN
set_prompt()
while(True): # Loop forever
os.write(1,("%s" % os.environ["PS1"]).encode()) # Print shell prompt
args = my_readLine().split() # Get and tokenize input
if args == []: # If no input, continue
continue
elif args[0] == "exit": # If input is exit, then exit
sys.exit(0)
elif args[0] == "cd": # If input is cd, then change directory
my_cd(args)
continue
else: # Handle any other input
process_data(args)
| 42.540541 | 102 | 0.489199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,577 | 0.333969 |
0fbcbe12ef9216cd827006560b9e93c286ad369b | 531 | py | Python | activity/views.py | AkshayVKumar/ActivityManager | e87987a532b8441b41ecc038b51e4614b8d1a526 | [
"BSD-3-Clause"
] | null | null | null | activity/views.py | AkshayVKumar/ActivityManager | e87987a532b8441b41ecc038b51e4614b8d1a526 | [
"BSD-3-Clause"
] | 5 | 2020-06-06T01:47:24.000Z | 2022-02-10T14:42:15.000Z | activity/views.py | AkshayVKumar/ActivityManager | e87987a532b8441b41ecc038b51e4614b8d1a526 | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import viewsets
from .serializers import MemberSerializer, ActivityPeriodSerializer
from activity.models import UserProfile, ActivityPeriod
from django.http import JsonResponse
#class for displaying members
class MemberViewSet(viewsets.ModelViewSet):
queryset = UserProfile.objects.all()
serializer_class = MemberSerializer
#class for taking activity input
class ActivityPeriodViewSet(viewsets.ModelViewSet):
queryset = ActivityPeriod.objects.all()
serializer_class = ActivityPeriodSerializer
| 35.4 | 67 | 0.834275 | 267 | 0.502825 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.114878 |
0fbce71bbe2d1a218863819284d6848f63bba467 | 1,424 | py | Python | LJ_surrogates/parameter_modification.py | SimonBoothroyd/LJ_surrogates | 60b2c836ad571b384a7e962b7cec99db6e6fc7eb | [
"MIT"
] | null | null | null | LJ_surrogates/parameter_modification.py | SimonBoothroyd/LJ_surrogates | 60b2c836ad571b384a7e962b7cec99db6e6fc7eb | [
"MIT"
] | null | null | null | LJ_surrogates/parameter_modification.py | SimonBoothroyd/LJ_surrogates | 60b2c836ad571b384a7e962b7cec99db6e6fc7eb | [
"MIT"
] | null | null | null | from openff.toolkit.typing.engines.smirnoff.forcefield import ForceField
from simtk import openmm, unit
from scipy.stats import distributions
import copy
import numpy as np
import os
from smt.sampling_methods import LHS
def vary_parameters_lhc(filename, num_samples, output_directory):
forcefield = ForceField(filename, allow_cosmetic_attributes=True)
lj_params = forcefield.get_parameter_handler('vdW', allow_cosmetic_attributes=True)
smirks_types_to_change = ['[#6X4:1]', '[#1:1]-[#6X4]', '[#8X2H1+0:1]', '[#1:1]-[#8]']
param_range = np.asarray([0.75, 1.25])
n_dim = len(smirks_types_to_change) * 2
lj_sample_ranges = []
for i in range(n_dim):
lj_sample_ranges.append(param_range)
lj_sample_ranges = np.asarray(lj_sample_ranges)
sampling = LHS(xlimits=lj_sample_ranges)
values = sampling(num_samples)
os.makedirs(output_directory,exist_ok=True)
for i, value in enumerate(values):
reshape_values = value.reshape((int(n_dim/2), 2))
counter = 0
for lj in lj_params:
if lj.smirks in smirks_types_to_change:
lj.epsilon *= reshape_values[counter, 0]
lj.rmin_half *= reshape_values[counter, 1]
counter += 1
os.makedirs(os.path.join(output_directory,str(i+1)))
ff_name = 'force-field.offxml'
forcefield.to_file(os.path.join(output_directory, str(i+1),ff_name))
| 38.486486 | 89 | 0.691713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.054073 |
0fbe97554004825caf136fddc2974680e7d04552 | 635 | py | Python | order/models/customers.py | fbsamples/cp_reference | 028b384767d06158a64be8cbb1af613e2f3c881e | [
"MIT"
] | 2 | 2021-09-05T04:21:33.000Z | 2021-11-03T20:56:46.000Z | order/models/customers.py | fbsamples/cp_reference | 028b384767d06158a64be8cbb1af613e2f3c881e | [
"MIT"
] | null | null | null | order/models/customers.py | fbsamples/cp_reference | 028b384767d06158a64be8cbb1af613e2f3c881e | [
"MIT"
] | null | null | null | # Copyright 2004-present, Facebook. All Rights Reserved.
from django.db import models
from core.models import BaseModel
class Customer(BaseModel):
"""Represent a single customer instance
fields:
store: the store this customer belongs to
full_name: customer full name
email: customer email
addr: customer shipping addr
"""
store = models.ForeignKey("shop.Store", null=True, on_delete=models.SET_NULL)
full_name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
addr = models.TextField(blank=True, null=True)
def __str__(self):
return self.full_name
| 27.608696 | 81 | 0.722835 | 512 | 0.806299 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.420472 |
0fbee81484a9cb1c52b3aeed6de4e2fee4bd9dfe | 8,601 | py | Python | kuzushiji/classify/level2.py | littlerain2310/japances_character | bdca6b30f3058af30462dcd5729eacb69f6fa83b | [
"MIT",
"BSD-3-Clause"
] | 81 | 2019-10-15T00:31:56.000Z | 2022-01-04T08:32:19.000Z | kuzushiji/classify/level2.py | littlerain2310/japances_character | bdca6b30f3058af30462dcd5729eacb69f6fa83b | [
"MIT",
"BSD-3-Clause"
] | 1 | 2019-11-26T16:24:56.000Z | 2019-11-28T09:01:37.000Z | kuzushiji/classify/level2.py | littlerain2310/japances_character | bdca6b30f3058af30462dcd5729eacb69f6fa83b | [
"MIT",
"BSD-3-Clause"
] | 16 | 2019-10-15T01:09:45.000Z | 2021-12-30T04:49:19.000Z | import argparse
from collections import defaultdict
import pickle
import re
import lightgbm as lgb
import pandas as pd
import numpy as np
import xgboost as xgb
from ..data_utils import SEG_FP, get_encoded_classes
from ..utils import print_metrics
from ..metric import get_metrics
from .blend import (
score_predictions_by_image_id, submission_from_predictions_by_image_id)
def main():
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('detailed_then_features', nargs='+',
help='detailed dataframes and the features in the same order')
arg('--use-xgb', type=int, default=1)
arg('--use-lgb', type=int, default=1)
arg('--num-boost-round', type=int, default=400)
arg('--lr', type=float, default=0.05, help='for lightgbm')
arg('--eta', type=float, default=0.15, help='for xgboost')
arg('--save-model')
arg('--load-model')
arg('--output')
arg('--n-folds', type=int, default=5)
arg('--seg-fp-adjust', type=float)
args = parser.parse_args()
if len(args.detailed_then_features) % 2 != 0:
parser.error('number of detailed and features must be equal')
n = len(args.detailed_then_features) // 2
detailed_paths, feature_paths = (args.detailed_then_features[:n],
args.detailed_then_features[n:])
if args.output:
if not args.load_model:
parser.error('--output needs --load-model')
elif len(feature_paths) == 1:
parser.error('need more than one feature df for train/valid split')
print('\n'.join(
f'{f} | {d}' for f, d in zip(detailed_paths, feature_paths)))
detailed_dfs = [pd.read_csv(path) for path in detailed_paths]
feature_dfs = [pd.read_csv(path) for path in feature_paths]
valid_df = feature_dfs[0]
assert valid_df.columns[0] == 'item'
assert valid_df.columns[-1] == 'y'
feature_cols = [
col for col in valid_df.columns[1:-1] if col not in {
'width', 'height', 'aspect',
'candidate_count', 'candidate_count_on_page',
'candidate_freq_on_page',
}]
top_cls_re = re.compile('^top_\d+_cls')
def build_features(df):
df = df[feature_cols].copy()
for col in feature_cols:
if top_cls_re.match(col):
df[f'{col}_is_candidate'] = df[col] == df['candidate_cls']
# del df[col]
print(' '.join(df.columns))
return df
classes = get_encoded_classes()
cls_by_idx = {idx: cls for cls, idx in classes.items()}
cls_by_idx[-1] = SEG_FP
y_preds = []
all_metrics = []
for fold_num in range(args.n_folds):
print(f'fold {fold_num}')
detailed = (detailed_dfs[fold_num if len(detailed_dfs) != 1 else 0]
.copy())
valid_df = feature_dfs[fold_num if len(feature_dfs) != 1 else 0].copy()
valid_features = build_features(valid_df)
xgb_valid_data = xgb.DMatrix(valid_features, label=valid_df['y'])
fold_path = lambda path, kind: f'{path}.{kind}.fold{fold_num}'
if args.load_model:
lgb_load_path = (fold_path(args.load_model, 'lgb')
if args.use_lgb else None)
xgb_load_path = (fold_path(args.load_model, 'xgb')
if args.use_xgb else None)
print(f'loading from {lgb_load_path}, {xgb_load_path}')
if lgb_load_path:
lgb_model = lgb.Booster(model_file=lgb_load_path)
if xgb_load_path:
with open(xgb_load_path, 'rb') as f:
xgb_model = pickle.load(f)
else:
train_df = pd.concat([df for i, df in enumerate(feature_dfs)
if i != fold_num])
train_features = build_features(train_df)
if args.use_lgb:
lgb_model = train_lgb(
train_features, train_df['y'],
valid_features, valid_df['y'],
lr=args.lr,
num_boost_round=args.num_boost_round)
if args.use_xgb:
xgb_model = train_xgb(
train_features, train_df['y'],
valid_features, valid_df['y'],
eta=args.eta,
num_boost_round=args.num_boost_round)
if args.save_model:
lgb_save_path = (fold_path(args.save_model, 'lgb')
if args.use_lgb else None)
xgb_save_path = (fold_path(args.save_model, 'xgb')
if args.use_xgb else None)
print(f'saving to {lgb_save_path}, {xgb_save_path}')
if lgb_save_path:
lgb_model.save_model(
lgb_save_path, num_iteration=lgb_model.best_iteration)
if xgb_save_path:
with open(xgb_save_path, 'wb') as f:
pickle.dump(xgb_model, f)
print('prediction')
predictions = []
if args.use_lgb:
predictions.append(lgb_model.predict(
valid_features, num_iteration=lgb_model.best_iteration))
if args.use_xgb:
predictions.append(xgb_model.predict(
xgb_valid_data, ntree_limit=xgb_model.best_ntree_limit))
valid_df['y_pred'] = np.mean(predictions, axis=0)
if args.seg_fp_adjust:
valid_df.loc[valid_df['candidate_cls'] == -1, 'y_pred'] += \
args.seg_fp_adjust
y_preds.append(valid_df['y_pred'].values)
max_by_item = get_max_by_item(valid_df)
print('scoring')
detailed['pred'] = \
max_by_item['candidate_cls'].apply(cls_by_idx.__getitem__)
print(f'SEG_FP ratio: {(detailed["pred"] == SEG_FP).mean():.5f}')
predictions_by_image_id = get_predictions_by_image_id(detailed)
if not args.output:
metrics = {
'accuracy': (detailed["pred"] == detailed["true"]).mean(),
}
metrics.update(
score_predictions_by_image_id(predictions_by_image_id))
print_metrics(metrics)
all_metrics.append(metrics)
if args.output:
valid_df['y_pred'] = np.mean(y_preds, axis=0)
max_by_item = get_max_by_item(valid_df)
detailed['pred'] = \
max_by_item['candidate_cls'].apply(cls_by_idx.__getitem__)
predictions_by_image_id = get_predictions_by_image_id(detailed)
submission = submission_from_predictions_by_image_id(
predictions_by_image_id)
submission.to_csv(args.output, index=False)
else:
print('\nAll folds:')
print_metrics(get_metrics(all_metrics))
def train_lgb(train_features, train_y, valid_features, valid_y, *,
lr, num_boost_round):
train_data = lgb.Dataset(train_features, train_y)
valid_data = lgb.Dataset(valid_features, valid_y, reference=train_data)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'learning_rate': lr,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'feature_fraction': 0.9,
'min_data_in_leaf': 20,
'num_leaves': 41,
'scale_pos_weight': 1.2,
'lambda_l2': 1,
}
print(params)
return lgb.train(
params=params,
train_set=train_data,
num_boost_round=num_boost_round,
early_stopping_rounds=20,
valid_sets=[valid_data],
verbose_eval=10,
)
def train_xgb(train_features, train_y, valid_features, valid_y, *,
eta, num_boost_round):
train_data = xgb.DMatrix(train_features, label=train_y)
valid_data = xgb.DMatrix(valid_features, label=valid_y)
params = {
'eta': eta,
'objective': 'binary:logistic',
'gamma': 0.01,
'max_depth': 8,
}
print(params)
eval_list = [(valid_data, 'eval')]
return xgb.train(
params, train_data, num_boost_round, eval_list,
early_stopping_rounds=20,
verbose_eval=10,
)
def get_max_by_item(df):
return (df.iloc[df.groupby('item')['y_pred'].idxmax()]
.reset_index(drop=True))
def get_predictions_by_image_id(detailed):
predictions_by_image_id = defaultdict(list)
for item in detailed.itertuples():
if item.pred != SEG_FP:
predictions_by_image_id[item.image_id].append({
'cls': item.pred,
'center': (item.x + item.w / 2, item.y + item.h / 2),
})
return predictions_by_image_id
if __name__ == '__main__':
main()
| 37.073276 | 79 | 0.598303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,189 | 0.13824 |
0fc20baeb9198c74f7c0542feb89fb8850d4a334 | 401 | py | Python | vkwave/vkscript/__init__.py | krasnovmv/vkwave | e0db86cc16f97797765aadfb811ec87ff7945b1f | [
"MIT"
] | 222 | 2020-03-30T18:09:20.000Z | 2022-03-27T18:25:04.000Z | vkwave/vkscript/__init__.py | krasnovmv/vkwave | e0db86cc16f97797765aadfb811ec87ff7945b1f | [
"MIT"
] | 62 | 2020-03-30T18:31:25.000Z | 2021-12-21T17:00:44.000Z | vkwave/vkscript/__init__.py | krasnovmv/vkwave | e0db86cc16f97797765aadfb811ec87ff7945b1f | [
"MIT"
] | 91 | 2020-03-30T18:34:49.000Z | 2022-03-23T12:58:49.000Z | import vkwave.vkscript.handlers.assignments
import vkwave.vkscript.handlers.blocks
import vkwave.vkscript.handlers.calls
import vkwave.vkscript.handlers.expressions
import vkwave.vkscript.handlers.statements
import vkwave.vkscript.handlers.types
from .converter import VKScriptConverter
from .execute import Execute
from .execute import execute
__all__ = ("execute", "Execute", "VKScriptConverter")
| 30.846154 | 53 | 0.842893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.092269 |
0fc3095b8ef6cc10b6acc06d5aa68037afc74cc5 | 692 | py | Python | greeting/cliente.py | javalisson/Sockets | 90068c0b5a4b2f21ca789177c3c445c671732a86 | [
"MIT"
] | 2 | 2017-04-26T11:17:56.000Z | 2017-12-05T01:55:20.000Z | greeting/cliente.py | javalisson/Sockets | 90068c0b5a4b2f21ca789177c3c445c671732a86 | [
"MIT"
] | 2 | 2017-02-22T12:35:13.000Z | 2017-03-29T12:44:22.000Z | greeting/cliente.py | javalisson/Sockets | 90068c0b5a4b2f21ca789177c3c445c671732a86 | [
"MIT"
] | 24 | 2017-02-22T12:26:04.000Z | 2020-10-13T05:19:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# adaptado de https://wiki.python.org/moin/TcpCommunication
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 5005
BUFFER_SIZE = 1024
NOME = "Javalisson"
print ("[CLIENTE] Iniciando")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print ("[CLIENTE] Conectando")
s.connect((TCP_IP, TCP_PORT))
print ("[CLIENTE] Enviando dados: \"" + NOME + "\"")
s.send(NOME.encode('utf-8'))
print ("[CLIENTE] Recebendo dados do servidor")
resposta = s.recv(BUFFER_SIZE)
print ("[CLIENTE] Dados recebidos em resposta do servidor: \"" + resposta.decode('utf-8') + "\"")
print ("[CLIENTE] Fechando conexão com o servidor")
s.close()
print ("[CLIENTE] Fim") | 25.62963 | 97 | 0.690751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 374 | 0.539683 |
0fc3569e087d76a290fb758960cbd352ee6f27dc | 2,132 | py | Python | blog/migrations/0001_initial.py | jxtxzzw/eoj3 | 468c16ed6de8b9b542972d0e83b02fd2cfa35e4f | [
"MIT"
] | 1 | 2020-11-17T13:08:07.000Z | 2020-11-17T13:08:07.000Z | blog/migrations/0001_initial.py | zerolfx/eoj3 | 156060399d1c3e5f7bcdbf34eaffbe2be66e1b20 | [
"MIT"
] | 2 | 2020-09-23T21:27:55.000Z | 2021-06-25T15:24:46.000Z | blog/migrations/0001_initial.py | zerolfx/eoj3 | 156060399d1c3e5f7bcdbf34eaffbe2be66e1b20 | [
"MIT"
] | 1 | 2019-07-13T00:44:39.000Z | 2019-07-13T00:44:39.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-06 16:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128, verbose_name='Title')),
('text', models.TextField(verbose_name='Text')),
('visible', models.BooleanField(default=False, verbose_name='Visible')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Created time')),
('edit_time', models.DateTimeField(auto_now=True, verbose_name='Edit time')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-edit_time'],
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(verbose_name='Text')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Created time')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('blog', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Blog')),
('problem', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='problem.Problem')),
],
options={
'ordering': ['-create_time'],
},
),
]
| 42.64 | 125 | 0.604597 | 1,908 | 0.894934 | 0 | 0 | 0 | 0 | 0 | 0 | 362 | 0.169794 |
0fc40b5d3302c591d7c87cbcec40ba11831456cc | 1,187 | py | Python | crawler-pcstore/CrawlerForPcstore.py | hms5232/CaiBiBa | a5e9724dc3832d6f7bdd46ea69460c1112028278 | [
"MIT"
] | null | null | null | crawler-pcstore/CrawlerForPcstore.py | hms5232/CaiBiBa | a5e9724dc3832d6f7bdd46ea69460c1112028278 | [
"MIT"
] | null | null | null | crawler-pcstore/CrawlerForPcstore.py | hms5232/CaiBiBa | a5e9724dc3832d6f7bdd46ea69460c1112028278 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
# 聽說要空兩行,註解要空一格
# 搜尋
def search_title(html):
key = input("請輸入關鍵字:") # input key words
starttag = html.find(key) # the index of word user wants
# I got it!
if starttag != -1:
# 找到關鍵字之後把往後到下一個標籤之前的字抓起來
for j in range(starttag, len(html)):
if html[j].find("<") != -1:
print("標題止於", j)
# 現在有關鍵字+往後剩下的標題,該是找關鍵字之前的標題部份了
for k in range(starttag, 0, -1):
if html[k].find(">") != -1: # 驀然回首,那人卻在燈火闌珊處
print("標題始於", k)
# 都找到了,印出來吧
for i in range(k, j+1):
print(html[i], end="")
break
break
return html.find(key)
else:
return "NULL"
"""
http://blog.castman.net/%E6%95%99%E5%AD%B8/2018/01/27/python-name-main.html
避免被引用的時候本身也執行一次
__name__:模組名稱。當被引用的時候就是模組名稱;如果是被直接執行就會變成__main__
藉此控制式判斷是被直接執行還是引用
"""
if __name__ == '__main__':
res = requests.get('https://www.pcstore.com.tw/')
res.encoding = 'big5' # 網站設定是 big5 編碼
print("\n關鍵字位於", search_title(res.text))
| 27.604651 | 75 | 0.53075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 794 | 0.513251 |
0fc78435f35004709d8d5a9264257cf022be0b7d | 4,337 | py | Python | Posts/migrations/0001_initial.py | rosekairu/Neighbourly | 3381b4472cd9d7de988ea57f1ecd931976e95853 | [
"MIT"
] | null | null | null | Posts/migrations/0001_initial.py | rosekairu/Neighbourly | 3381b4472cd9d7de988ea57f1ecd931976e95853 | [
"MIT"
] | 6 | 2021-03-30T13:56:43.000Z | 2021-09-22T19:26:50.000Z | Posts/migrations/0001_initial.py | rosekairu/Neighbourly | 3381b4472cd9d7de988ea57f1ecd931976e95853 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-07-19 17:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phone_field.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(max_length=100)),
('hood_name', models.CharField(max_length=100)),
('population', models.PositiveIntegerField(null=True)),
('user', models.ForeignKey(default=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prof_pic', models.ImageField(blank=True, upload_to='images/')),
('bio', models.CharField(max_length=250, null=True)),
('email', models.EmailField(max_length=100)),
('date_joined', models.DateTimeField(auto_now_add=True)),
('neighborhood', models.ForeignKey(default=2, null=True, on_delete=django.db.models.deletion.CASCADE, to='Posts.Neighborhood')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Police',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('station_name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=100)),
('tel', phone_field.models.PhoneField(blank=True, help_text='Police Station Phone Number', max_length=31)),
('neighborhood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Posts.Neighborhood')),
],
),
migrations.CreateModel(
name='Notice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notice_title', models.CharField(max_length=100, null=True)),
('notice_pic', models.ImageField(blank=True, null=True, upload_to='images/')),
('notice_details', models.CharField(max_length=250, null=True)),
('post_date', models.DateField(auto_now_add=True, null=True)),
('neighborhood', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Posts.Neighborhood')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='HealthCenter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hospital_name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=100)),
('tel', phone_field.models.PhoneField(blank=True, help_text='Hospital Phone Number', max_length=31)),
('neighborhood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Posts.Neighborhood')),
],
),
migrations.CreateModel(
name='Business',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bsns_name', models.CharField(max_length=250)),
('bsns_email', models.EmailField(max_length=100)),
('neighborhood', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Posts.Neighborhood')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 52.253012 | 144 | 0.611944 | 4,152 | 0.957344 | 0 | 0 | 0 | 0 | 0 | 0 | 619 | 0.142725 |
0fc80bccdde996736cc835d370d64e069c7c8b30 | 1,787 | py | Python | pip_services3_container/build/DefaultContainerFactory.py | banalna/pip-services3-container-python | d26aee1f49840eb0dac4ccb290b4808550494354 | [
"MIT"
] | null | null | null | pip_services3_container/build/DefaultContainerFactory.py | banalna/pip-services3-container-python | d26aee1f49840eb0dac4ccb290b4808550494354 | [
"MIT"
] | null | null | null | pip_services3_container/build/DefaultContainerFactory.py | banalna/pip-services3-container-python | d26aee1f49840eb0dac4ccb290b4808550494354 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pip_services3_container.build.DefaultContainerFactory
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Default container factory implementation
:copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from pip_services3_commons.refer import Descriptor
from pip_services3_components.build import CompositeFactory
from pip_services3_components.log import DefaultLoggerFactory
from pip_services3_components.count import DefaultCountersFactory
from pip_services3_components.config import DefaultConfigReaderFactory
from pip_services3_components.cache import DefaultCacheFactory
from pip_services3_components.auth import DefaultCredentialStoreFactory
from pip_services3_components.connect import DefaultDiscoveryFactory
from pip_services3_components.info._DefaultInfoFactory import DefaultInfoFactory
class DefaultContainerFactory(CompositeFactory):
"""
Creates default container components (loggers, counters, caches, locks, etc.) by their descriptors.
"""
DefaultContainerFactoryDescriptor = Descriptor(
"pip-services", "factory", "container", "default", "1.0"
)
def __init__(self, *factories):
"""
Create a new instance of the factory and sets nested factories.
:param factories: a list of nested factories
"""
super(DefaultContainerFactory, self).__init__(factories)
self.add(DefaultInfoFactory())
self.add(DefaultLoggerFactory())
self.add(DefaultCountersFactory())
self.add(DefaultConfigReaderFactory())
self.add(DefaultCacheFactory())
self.add(DefaultCredentialStoreFactory())
self.add(DefaultDiscoveryFactory())
| 39.711111 | 103 | 0.739228 | 847 | 0.473979 | 0 | 0 | 0 | 0 | 0 | 0 | 643 | 0.359821 |
0fc8765eef19cf60478c282b2ce414022ec0df47 | 5,148 | py | Python | com/vmware/vapi/protocol/server/transport/msg_handler.py | sumitrsystems/Vmware | 7705d9979bee71f02c71d63890616409044cba08 | [
"MIT"
] | null | null | null | com/vmware/vapi/protocol/server/transport/msg_handler.py | sumitrsystems/Vmware | 7705d9979bee71f02c71d63890616409044cba08 | [
"MIT"
] | null | null | null | com/vmware/vapi/protocol/server/transport/msg_handler.py | sumitrsystems/Vmware | 7705d9979bee71f02c71d63890616409044cba08 | [
"MIT"
] | null | null | null | """
Msg based protocol handler
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2015, 2017 VMware, Inc. All rights reserved. -- VMware Confidential' # pylint: disable=line-too-long
from collections import deque
from vmware.vapi.lib.log import get_vapi_logger
from vmware.vapi.protocol.server.api_handler import ApiHandler, AsyncApiHandler
from vmware.vapi.protocol.server.transport.async_protocol_handler import AsyncProtocolHandler
logger = get_vapi_logger(__name__)
def get_async_api_handler(api_handler):
"""
get async api handler
:type api_handler: :class:`vmware.vapi.protocol.server.api_handler.ApiHandler`
:param api_handler: api handler instance
:rtype: :class:`vmware.vapi.protocol.server.async_api_handler_adapter.PooledAsyncApiHandlerAdapter`
:return: Threaded async api handler
"""
if isinstance(api_handler, ApiHandler):
from vmware.vapi.protocol.server.async_api_handler_adapter import PooledAsyncApiHandlerAdapter
from vmware.vapi.lib.workers_pool import get_workers_pool
workers_pool = get_workers_pool('api_handler')
api_handler = PooledAsyncApiHandlerAdapter(api_handler, workers_pool)
return api_handler
class MsgBasedProtocolHandler(AsyncProtocolHandler):
""" Message based protocol handler """
def __init__(self, api_handler):
"""
Message based protocol handler init
:type api_handler: :class:`vmware.vapi.protocol.server.api_handler.ApiHandler`
:param api_handler: api handler instance
"""
AsyncProtocolHandler.__init__(self)
assert(api_handler)
self.api_handler = get_async_api_handler(api_handler)
## Begin AsyncProtocolHandler interface
def get_data_handler(self, connection):
data_handler = self.DataHandler(self, connection)
return data_handler
## End AsyncProtocolHandler interface
class DataHandler(AsyncProtocolHandler.DataHandler):
""" Message based protocol data handler """
def __init__(self, parent, connection):
""" Message based protocol data handler init """
AsyncProtocolHandler.DataHandler.__init__(self)
self.parent = parent
self.connection = connection
self.data = deque()
## Begin AsyncProtocolHandler.DataHandler interface
def data_ready(self, data):
if data:
self.data.append(data)
def data_end(self):
connection = self.connection
def state_change_cb(*args, **kwargs):
""" state change callback """
self.request_state_change(connection, *args, **kwargs)
self.parent.api_handler.async_handle_request(
b''.join(self.data), state_change_cb)
self._cleanup()
def data_abort(self):
self._cleanup()
# Used to throttle the lower layer from sending more data
def can_read(self):
# TODO: Throttle if needed
return True
## End AsyncProtocolHandler.DataHandler interface
def request_state_change(self, connection, state, response=None): # pylint: disable=R0201
"""
request state changed
:type connection: :class:`file`
:param connection: response connection
:type state: :class:`int`
:param state: refer to :class:`vmware.vapi.protocol.server.api_handler.\
AsyncApiHandler.async_handle_request` state_change_cb
:type response: :class:`object`
:param response: refer to :class:`vmware.vapi.protocol.server.api_handler.\
AsyncApiHandler.async_handle_request` state_change_cb
"""
if state in AsyncApiHandler.END_STATES:
# Reached one of the end state
try:
if state == AsyncApiHandler.SUCCESS:
try:
connection.write(response)
except Exception as err:
# Connection closed
logger.error('write: Failed to write %s', err)
elif state == AsyncApiHandler.ERROR:
if response is None:
response = Exception("Error")
raise response # pylint: disable=E0702
elif state == AsyncApiHandler.CANCELLED:
# Cancelled
pass
else:
# Unexpected state
raise NotImplementedError('Unexpected state %d' % state)
finally:
connection.close() # Close the virtual connection
connection = None
else:
# Transition state change
pass
def _cleanup(self):
""" Cleanup """
self.data = None
self.connection = None
self.parent = None
def __del__(self):
self._cleanup()
| 37.576642 | 129 | 0.605089 | 3,933 | 0.763986 | 0 | 0 | 0 | 0 | 0 | 0 | 1,924 | 0.373737 |
0fc891bc1997dfbe1f1be5e5c5524986e39b1d74 | 5,345 | py | Python | validation_tests/case_studies/merewether/plot_results.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
] | 136 | 2015-05-07T05:47:43.000Z | 2022-02-16T03:07:40.000Z | validation_tests/case_studies/merewether/plot_results.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
] | 184 | 2015-05-03T09:27:54.000Z | 2021-12-20T04:22:48.000Z | validation_tests/case_studies/merewether/plot_results.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
] | 70 | 2015-03-18T07:35:22.000Z | 2021-11-01T07:07:29.000Z | from anuga.utilities import plot_utils as util
from matplotlib import pyplot as pyplot
import numpy
verbose= True
swwfile = 'merewether_1m.sww'
p=util.get_output(swwfile)
p2=util.get_centroids(p)
# Time index at last time
tindex = len(p2.time)-1
if verbose: print('calculating experimental transect')
x_data = [ 0.0, 3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0, 30.0, 33.0]
#vel = [ 0.0, 0.0, 1.1, 3.2, 3.4, 2.4, 3.2, 3.2, 3.7, 3.1, 0.4, 0.0]
vel_data = [ 0.0, 0.4, 3.1, 3.7, 3.2, 3.2, 2.4, 3.4, 3.2, 1.1, 0.0, 0.0]
#depth = [ 0.0, 0.0, 0.1, 0.5, 0.45, 0.4, 0.55, 0.1, 0.1, 0.05, 0.04, 0.0]
depth_data = [ 0.0, 0.04, 0.05, 0.1, 0.1, 0.55, 0.4, 0.45, 0.5, 0.1, 0.0, 0.0]
from scipy import interpolate
fvel = interpolate.interp1d(x_data, vel_data)
fdepth = interpolate.interp1d(x_data, depth_data)
if verbose: print('calculating model heights at observation points')
# Get nearest wet points to 'point observations'
point_observations = numpy.genfromtxt(
'Observations/ObservationPoints.csv',
delimiter=",",skip_header=1)
nearest_points = point_observations[:,0]*0. - 1
for i in range(len(nearest_points)):
# Compute distance of ANUGA points to observation, and
# if the ANUGA point is dry then add a large value
# Then find index of minimum
n = ( (p2.x+p2.xllcorner-point_observations[i,0])**2 + \
(p2.y+p2.yllcorner-point_observations[i,1])**2 + \
(p2.stage[tindex,:] <= p2.elev)*1.0e+06).argmin()
nearest_points[i] = n
f = open('Stage_point_comparison.csv','w')
f.writelines( 'Field, ANUGA, TUFLOW, ANUGA minus Field, ANUGA minus TUFLOW \n' )
if verbose: print nearest_points.tolist()
for i in range(len(nearest_points)):
po = point_observations[i,-2]
tu = point_observations[i,-1]
anuga_data = p2.stage[tindex, nearest_points.tolist()[i]]
newline = str(round(po,2)) + ', ' + str(round(anuga_data,2)) + ', ' + str(tu) + ', ' + \
str(round(anuga_data - po,2)) + ', ' + str(round(anuga_data - tu,2)) + '\n'
f.writelines(newline)
f.flush()
f.close()
if verbose: print('Plot transect')
## Plot transect 1 [need to guess appropriate end points as these are not so
## clear from the report]
xx=util.near_transect(p2,[103, 100.], [130.,80.],tol=0.5)
xx2=xx[0]
pyplot.clf()
pyplot.figure(figsize=(16,10.5))
pyplot.subplot(121)
pyplot.scatter(p2.x, p2.y, c=p2.elev,edgecolors='none')
# Add nice elevation data
colVals = numpy.maximum(numpy.minimum(p2.elev, 25.), 19.)
util.plot_triangles(p, values = colVals, edgecolors='none')
pyplot.gca().set_aspect('equal')
pyplot.scatter(p2.x[xx2],p2.y[xx2],color='green')
pyplot.xlim( (40., 160.))
pyplot.ylim( (0.,140.))
pyplot.title('Transect points in green')
pyplot.subplot(222)
pyplot.scatter(xx[1],p2.vel[tindex,xx[0]],color='green',label='model')
pyplot.scatter(xx[1],fvel(xx[1]),color='blue',label='data')
pyplot.legend(loc='upper left')
#pyplot.xlim(0,25)
pyplot.title('Final flow speed along the transect')
pyplot.subplot(224)
pyplot.scatter(xx[1],p2.stage[tindex,xx[0]]-p2.elev[xx[0]],color='green',label='model')
pyplot.scatter(xx[1],fdepth(xx[1]),color='blue',label='data')
pyplot.legend(loc='upper left')
#pyplot.xlim(0,25)
pyplot.title('Final depth along the transect')
pyplot.savefig('Transect1.png', bbox_inches='tight')
if verbose: print('Plot velocity field')
pyplot.clf()
# Velocity vector plot
pyplot.figure(figsize=(16,22))
pyplot.scatter(p2.x,p2.y,c=(p2.elev>24.),edgecolors='none', s=0.2)
pyplot.gca().set_aspect('equal')
pyplot.xlim((100,180))
pyplot.ylim((100,210))
#k=range(0,len(p2.x),2) # Thin out the vectors for easier viewing
colVals = numpy.maximum(numpy.minimum(p2.elev, 25.), 19.)
util.plot_triangles(p, values = colVals, edgecolors='white')
k = range(len(p2.x))
# Thin out the triangles
#k = (((10.*(p2.x - p2.x.round())).round()%2 == 0.0)*((10.*(p2.y - p2.y.round())).round()%2 == 0.0)).nonzero()[0]
pyplot.quiver(p2.x[k],p2.y[k],p2.xvel[tindex,k], p2.yvel[tindex,k],
scale_units='xy',units='xy',width=0.1,
color='black',scale=1.0)
pyplot.savefig('velocity_stationary.png',dpi=100, bbox_inches='tight')
## Froude number plot
if verbose: print('Plot Froude number plot')
pyplot.clf()
pyplot.figure(figsize=(6,8))
froude_number = p2.vel[tindex]/(numpy.maximum(p2.height[tindex], 1.0e-03)*9.8)**0.5
froude_category = (froude_number>1.).astype(float) + (froude_number > 0.).astype(float)
pyplot.scatter(p2.x,p2.y,edgecolors='none', s=0.2)
## Fake additions to plot to hack matplotlib legend
pyplot.scatter(0.,0., color='FireBrick',label='>1', marker='s')
pyplot.scatter(0.,0., color='PaleGreen',label='0-1', marker='s')
pyplot.scatter(0.,0., color='blue',label='0',marker='s')
pyplot.gca().set_aspect('equal')
util.plot_triangles(p, values = froude_category, edgecolors='none')
pyplot.xlim((p.x.min(), p.x.max()))
pyplot.ylim((p.y.min(), p.y.max()))
pyplot.title("Froude Number zones: 0, (0,1], or >1")
import matplotlib.patches as mpatches
#red_patch = mpatches.Patch(color='red', label='>1')
#green_patch = mpatches.Patch(color='green', label='(0-1]')
#blue_patch = mpatches.Patch(color='blue', label='0.')
#pyplot.legend(handles=[red_patch, green_patch, blue_patch], labels=['>1', '(0-1]', '0.'], loc='best')
pyplot.legend(loc='upper left')
pyplot.savefig('froudeNumber.png',dpi=100,bbox_inches='tight')
| 35.397351 | 113 | 0.674088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,831 | 0.342563 |
0fc967fb38a574ad027030e148c321d06351ec3c | 2,287 | py | Python | examples/ignore_early_canceled_tests.py | adavidzh/openhtf | 0f34f6c0b53d8d0c42e2da1ddde2f07534d8286c | [
"Apache-2.0"
] | 1 | 2020-07-25T19:05:49.000Z | 2020-07-25T19:05:49.000Z | examples/ignore_early_canceled_tests.py | adavidzh/openhtf | 0f34f6c0b53d8d0c42e2da1ddde2f07534d8286c | [
"Apache-2.0"
] | 80 | 2020-09-15T04:44:28.000Z | 2021-03-16T19:26:25.000Z | examples/ignore_early_canceled_tests.py | adavidzh/openhtf | 0f34f6c0b53d8d0c42e2da1ddde2f07534d8286c | [
"Apache-2.0"
] | 2 | 2018-12-20T07:01:20.000Z | 2020-05-18T21:13:17.000Z | # Copyright 2018 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of excluding certain test records from the output callbacks.
In this case, we exclude tests which were aborted before a DUT ID was set, since
they are unlikely to contain any useful information. Note that "abort" refers to
a KeyboardInterrupt. If any other error occurs before the DUT ID is set, those
records are not excluded, since they may be relevant for debugging.
It may make sense to implement this check if your hardware tests follow the
common pattern of waiting for the DUT ID to be entered via a prompt at the test
start.
"""
import openhtf as htf
from openhtf.output.callbacks import json_factory
from openhtf.core import test_record
from openhtf.plugs import user_input
from openhtf.util import console_output
DEFAULT_DUT_ID = '<UNSET_DUT_ID>'
class CustomOutputToJSON(json_factory.OutputToJSON):
def __call__(self, record):
if (record.outcome == test_record.Outcome.ABORTED
and record.dut_id == DEFAULT_DUT_ID):
console_output.cli_print(
'Test was aborted at test start. Skipping output to JSON.')
else:
console_output.cli_print('Outputting test record to JSON.')
super(CustomOutputToJSON, self).__call__(record)
@htf.plug(user=user_input.UserInput)
def HelloWorldPhase(test, user):
test.logger.info('Hello World!')
user.prompt('The DUT ID is `%s`. Press enter to continue.' %
test.test_record.dut_id)
def main():
test = htf.Test(HelloWorldPhase)
test.configure(default_dut_id=DEFAULT_DUT_ID)
test.add_output_callbacks(
CustomOutputToJSON('./{dut_id}.hello_world.json', indent=2))
test.execute(test_start=user_input.prompt_for_test_start())
if __name__ == '__main__':
main()
| 35.184615 | 80 | 0.758636 | 416 | 0.181898 | 0 | 0 | 206 | 0.090074 | 0 | 0 | 1,335 | 0.583734 |