hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acea46e65b650c26d9f0d43e994933729bcbf8de | 1,303 | py | Python | RhinoMethod.py | AlanWu9296/Labyrinth_Python-Rhino | e2d162d43c2134ab1a1d32b894e2f3b3ee11965a | [
"MIT"
] | 3 | 2016-08-30T01:22:33.000Z | 2016-09-02T20:24:09.000Z | RhinoMethod.py | AlanWu9296/Labyrinth_Python-Rhino | e2d162d43c2134ab1a1d32b894e2f3b3ee11965a | [
"MIT"
] | null | null | null | RhinoMethod.py | AlanWu9296/Labyrinth_Python-Rhino | e2d162d43c2134ab1a1d32b894e2f3b3ee11965a | [
"MIT"
] | 1 | 2018-11-14T06:48:23.000Z | 2018-11-14T06:48:23.000Z | import rhinoscriptsyntax as rs
def drawAtLayer(drawMethod, rhinoObject, layer, color = None):
if rs.IsLayer(layer):
rs.LayerColor(layer, color)
pass
else:
rs.AddLayer(layer, color)
rs.CurrentLayer(layer)
drawMethod(rhinoObject)
def drawAtLayer2(drawMethod, rhinoObject1, rhinoObject2, layer, color = None):
if rs.IsLayer(layer):
rs.LayerColor(layer, color)
pass
else:
rs.AddLayer(layer, color)
rs.CurrentLayer(layer)
drawMethod(rhinoObject1, rhinoObject2)
def drawOpenLine(EndPoints, ratio, layer, color = None):
if rs.IsLayer(layer):
pass
else:
rs.AddLayer(layer, color)
rs.CurrentLayer(layer)
pt1 = EndPoints[0]
pt2 = EndPoints[1]
vector12 = rs.VectorCreate(pt2, pt1)
vectorRatio = (1 - ratio) * 0.5
pt1End = rs.VectorAdd(pt1, (vector12*vectorRatio))
pt2End = rs.VectorAdd(pt2,((-vector12)*vectorRatio))
rs.AddLine(pt1, pt1End)
rs.AddLine(pt2, pt2End)
pass
def setChoromeColor(index, max, colorHBL, isChromed = False):
if isChromed == True:
colorNew = (colorHBL[0], colorHBL[1], colorHBL[2]/max*(max-index))
else: colorNew = (colorHBL[0]/max*index, colorHBL[1], colorHBL[2])
color = rs.ColorHLSToRGB (colorNew)
return color
| 27.145833 | 78 | 0.656946 |
acea47259dad805fd8a2142cef570746e3283217 | 14,205 | py | Python | actions/autoSign.py | jzksnsjswkw/ruoli-sign-optimization | 8ef1f90613bfb94a7ceb1d870a29442673f6451b | [
"MIT"
] | 1 | 2022-02-21T10:18:18.000Z | 2022-02-21T10:18:18.000Z | actions/autoSign.py | jzksnsjswkw/ruoli-sign-optimization | 8ef1f90613bfb94a7ceb1d870a29442673f6451b | [
"MIT"
] | null | null | null | actions/autoSign.py | jzksnsjswkw/ruoli-sign-optimization | 8ef1f90613bfb94a7ceb1d870a29442673f6451b | [
"MIT"
] | null | null | null | import json
import re
from requests_toolbelt import MultipartEncoder
from todayLoginService import TodayLoginService
from liteTools import LL, DT, RT, MT, TaskError, CpdailyTools
class AutoSign:
# 初始化签到类
def __init__(self, todayLoginService: TodayLoginService, userInfo):
self.session = todayLoginService.session
self.host = todayLoginService.host
self.userInfo = userInfo
self.taskInfo = None
self.task = None
self.form = {}
self.fileName = None
# 获取未签到的任务
def getUnSignTask(self):
LL.log(1, '获取未签到的任务')
headers = self.session.headers
headers['Content-Type'] = 'application/json'
# 第一次请求接口获取cookies(MOD_AUTH_CAS)
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getStuSignInfosInOneDay'
self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
# 第二次请求接口,真正的拿到具体任务
res = self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
res = DT.resJsonEncode(res)
LL.log(1, '返回的列表数据', res['datas'])
signLevel = self.userInfo.get('signLevel', 1)
if signLevel >= 0:
taskList = res['datas']['unSignedTasks'] # 未签到任务
if signLevel >= 1:
taskList += res['datas']['leaveTasks'] # 不需签到任务
if signLevel == 2:
taskList += res['datas']['signedTasks'] # 已签到任务
# 查询是否没有未签到任务
if len(taskList) < 1:
LL.log(1, '无需要签到的任务')
raise TaskError('无需要签到的任务')
if self.userInfo.get('title'):
# 获取匹配标题的任务
for righttask in taskList:
if re.search(self.userInfo['title'], righttask['taskName']):
self.taskName = righttask['taskName']
LL.log(1, '匹配标题的任务', righttask['taskName'])
self.taskInfo = {'signInstanceWid': righttask['signInstanceWid'],
'signWid': righttask['signWid'], 'taskName': righttask['taskName']}
return self.taskInfo
# 如果没有找到匹配的任务
LL.log(1, '没有匹配标题的任务')
raise TaskError('没有匹配标题的任务')
else: # 如果没有填title字段
# 自动获取最后一个未签到任务
latestTask = taskList[0]
self.taskName = latestTask['taskName']
LL.log(1, '最后一个未签到的任务', latestTask['taskName'])
self.taskInfo = {'signInstanceWid': latestTask['signInstanceWid'],
'signWid': latestTask['signWid'], 'taskName': latestTask['taskName']}
return self.taskInfo
# 获取历史签到任务详情
def getHistoryTaskInfo(self):
'''获取历史签到任务详情'''
headers = self.session.headers
headers['Content-Type'] = 'application/json;charset=UTF-8'
# 获取签到月历
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getStuIntervalMonths'
res = self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
res = DT.resJsonEncode(res)
monthList = [i['id'] for i in res['datas']['rows']]
monthList.sort(reverse=True) # 降序排序月份
# 按月遍历
for month in monthList:
# 获取对应历史月签到情况
req = {"statisticYearMonth": month}
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getStuSignInfosByWeekMonth'
res = self.session.post(
url, headers=headers, data=json.dumps(req), verify=False)
res = DT.resJsonEncode(res)
monthSignList = list(res['datas']['rows'])
# 遍历查找历史月中每日的签到情况
monthSignList.sort(
key=lambda x: x['dayInMonth'], reverse=True) # 降序排序日信息
for daySignList in monthSignList:
# 遍历寻找和当前任务匹配的历史已签到任务
for task in daySignList['signedTasks']:
if task['signWid'] == self.taskInfo['signWid']:
# 找到和当前任务匹配的历史已签到任务,开始获取表单
historyTaskId = {
"wid": task['signInstanceWid'], "content": task['signWid']}
# 更新cookie
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getUnSeenQuestion'
self.session.post(url, headers=headers, data=json.dumps(
historyTaskId), verify=False)
# 获取历史任务详情
historyTaskId = {
"signInstanceWid": task['signInstanceWid'], "signWid": task['signWid']}
url = f'{self.host}wec-counselor-sign-apps/stu/sign/detailSignInstance'
res = self.session.post(
url, headers=headers, data=json.dumps(historyTaskId), verify=False)
res = DT.resJsonEncode(res)
# 其他模拟请求
url = f'{self.host}wec-counselor-sign-apps/stu/sign/queryNotice'
self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
url = f'{self.host}wec-counselor-sign-apps/stu/sign/getQAconfigration'
self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
# 一些数据处理
result = res['datas']
# 坐标随机
result['longitude'] = float(result['longitude'])
result['latitude'] = float(result['latitude'])
result['longitude'], result['latitude'] = RT.locationOffset(
result['longitude'], result['latitude'], self.userInfo['global_locationOffsetRange'])
result['photograph'] = result['photograph'] if len(
result['photograph']) != 0 else ""
result['extraFieldItems'] = [{"extraFieldItemValue": i['extraFieldItem'],
"extraFieldItemWid": i['extraFieldItemWid']} for i in result['signedStuInfo']['extraFieldItemVos']]
# 返回结果
LL.log(1, '历史签到情况的详情', result)
self.historyTaskInfo = result
return result
# 如果没有遍历找到结果
LL.log(2, "没有找到匹配的历史任务")
raise TaskError("没有找到匹配的历史任务")
def getDetailTask(self):
LL.log(1, '获取具体的签到任务详情')
url = f'{self.host}wec-counselor-sign-apps/stu/sign/detailSignInstance'
headers = self.session.headers
headers['Content-Type'] = 'application/json;charset=UTF-8'
res = self.session.post(url, headers=headers, data=json.dumps(
self.taskInfo), verify=False)
res = DT.resJsonEncode(res)
LL.log(1, '签到任务的详情', res['datas'])
self.task = res['datas']
# 填充表单
def fillForm(self):
LL.log(1, '填充表单')
if self.userInfo['getHistorySign']:
self.getHistoryTaskInfo()
hti = self.historyTaskInfo
self.form['isNeedExtra'] = self.task['isNeedExtra']
self.form['signInstanceWid'] = self.task['signInstanceWid']
self.form['signPhotoUrl'] = hti['signPhotoUrl']
self.form['extraFieldItems'] = hti['extraFieldItems']
self.form['longitude'], self.form['latitude'] = hti['longitude'], hti['latitude']
# 检查是否在签到范围内
self.form['isMalposition'] = 1
for place in self.task['signPlaceSelected']:
if MT.geoDistance(self.form['longitude'], self.form['latitude'], place['longitude'], place['latitude']) < place['radius']:
self.form['isMalposition'] = 0
break
self.form['abnormalReason'] = hti.get(
'abnormalReason', '回家') # WARNING: 未在历史信息中找到这个
self.form['position'] = hti['signAddress']
self.form['uaIsCpadaily'] = True
self.form['signVersion'] = '1.0.0'
else:
# 填充位置
self.form['position'] = self.userInfo['address']
self.form['longitude'] = self.userInfo['lon']
self.form['latitude'] = self.userInfo['lat']
# 填充基本参数
self.form['signVersion'] = '1.0.0'
self.form['uaIsCpadaily'] = True
# 检查是否需要照片
if self.task.get('isPhoto') == 1:
pic = self.userInfo['photo']
picBlob, picType = RT.choicePhoto(pic, dirTimeFormat=True)
# 上传图片
url_getUploadPolicy = f'{self.host}wec-counselor-sign-apps/stu/obs/getUploadPolicy'
ossKey = CpdailyTools.uploadPicture(
url_getUploadPolicy, self.session, picBlob, picType)
# 获取图片url
url_previewAttachment = f'{self.host}wec-counselor-sign-apps/stu/sign/previewAttachment'
imgUrl = CpdailyTools.getPictureUrl(
url_previewAttachment, self.session, ossKey)
self.form['signPhotoUrl'] = imgUrl
else:
self.form['signPhotoUrl'] = ''
# 检查是否需要额外信息
self.form['isNeedExtra'] = self.task['isNeedExtra']
if self.task['isNeedExtra'] == 1:
extraFields = self.task['extraField']
userItems = self.userInfo['forms']
extraFieldItemValues = []
for i in range(len(extraFields)):
userItem = userItems[i]['form']
extraField = extraFields[i]
if self.userInfo['checkTitle'] == 1:
if userItem['title'] != extraField['title']:
raise Exception(
f'\r\n第{i + 1}个配置出错了\r\n您的标题为:{userItem["title"]}\r\n系统的标题为:{extraField["title"]}')
extraFieldItems = extraField['extraFieldItems']
flag = False
for extraFieldItem in extraFieldItems:
if extraFieldItem['isSelected']:
data = extraFieldItem['content']
if extraFieldItem['content'] == userItem['value']:
flag = True
extraFieldItemValue = {'extraFieldItemValue': userItem['value'],
'extraFieldItemWid': extraFieldItem['wid']}
# 其他 额外的文本
if extraFieldItem['isOtherItems'] == 1:
flag = True
extraFieldItemValue = {'extraFieldItemValue': userItem['extraValue'],
'extraFieldItemWid': extraFieldItem['wid']}
extraFieldItemValues.append(extraFieldItemValue)
if not flag:
raise Exception(
f'\r\n第{ i + 1 }个配置出错了\r\n表单未找到你设置的值:{userItem["value"]}\r\n,你上次系统选的值为:{ data }')
self.form['extraFieldItems'] = extraFieldItemValues
self.form['abnormalReason'] = self.userInfo['abnormalReason']
# 通过用户是否填写qrUuid判断是否为二维码签到
if self.userInfo['qrUuid']:
self.form['qrUuid'] = self.userInfo['qrUuid']
else:
self.form['signInstanceWid'] = self.task['signInstanceWid']
# 检查是否在签到范围内
self.form['isMalposition'] = 1
for place in self.task['signPlaceSelected']:
if MT.geoDistance(self.form['longitude'], self.form['latitude'], place['longitude'], place['latitude']) < place['radius']:
self.form['isMalposition'] = 0
break
LL.log(1, "填充完毕的表单", self.form)
def getSubmitExtension(self):
'''生成各种额外参数'''
extension = {
"lon": self.form['longitude'],
"lat": self.form['latitude'],
"model": self.userInfo['model'],
"appVersion": self.userInfo['appVersion'],
"systemVersion": self.userInfo['systemVersion'],
"userId": self.userInfo['username'],
"systemName": self.userInfo['systemName'],
"deviceId": self.userInfo['deviceId']
}
self.cpdailyExtension = CpdailyTools.encrypt_CpdailyExtension(
json.dumps(extension))
self.bodyString = CpdailyTools.encrypt_BodyString(
json.dumps(self.form))
self.submitData = {
"lon": self.form['longitude'],
"version": self.userInfo['signVersion'],
"calVersion": self.userInfo['calVersion'],
"deviceId": self.userInfo['deviceId'],
"userId": self.userInfo['username'],
"systemName": self.userInfo['systemName'],
"bodyString": self.bodyString,
"lat": self.form['latitude'],
"systemVersion": self.userInfo['systemVersion'],
"appVersion": self.userInfo['appVersion'],
"model": self.userInfo['model'],
}
self.submitData['sign'] = CpdailyTools.signAbstract(self.submitData)
# 提交签到信息
def submitForm(self):
LL.log(1, '提交签到信息')
self.getSubmitExtension()
headers = {
'User-Agent': self.session.headers['User-Agent'],
'CpdailyStandAlone': '0',
'extension': '1',
'Cpdaily-Extension': self.cpdailyExtension,
'Content-Type': 'application/json; charset=utf-8',
'Accept-Encoding': 'gzip',
'Host': re.findall('//(.*?)/', self.host)[0],
'Connection': 'Keep-Alive'
}
LL.log(1, '即将提交的信息', headers, self.submitData)
res = self.session.post(f'{self.host}wec-counselor-sign-apps/stu/sign/submitSign', headers=headers,
data=json.dumps(self.submitData), verify=False)
res = DT.resJsonEncode(res)
LL.log(1, '提交后返回的信息', res['message'])
return '[%s]%s' % (res['message'], self.taskInfo['taskName'])
| 46.57377 | 153 | 0.526223 |
acea47959c2a5636050390f0ab798993ee8d98fc | 1,415 | py | Python | mysite/polls/urls.py | jashburn8020/djangoproject-tutorial | f849c62a44ad3598a83201934c4e63d0f4ecc4af | [
"Apache-2.0"
] | null | null | null | mysite/polls/urls.py | jashburn8020/djangoproject-tutorial | f849c62a44ad3598a83201934c4e63d0f4ecc4af | [
"Apache-2.0"
] | null | null | null | mysite/polls/urls.py | jashburn8020/djangoproject-tutorial | f849c62a44ad3598a83201934c4e63d0f4ecc4af | [
"Apache-2.0"
] | null | null | null | """'include()'ed in mysite/urls.py"""
from django.urls import path
from . import views
# Namespace to differentiate URL names between apps in a Django project
# Templates refer to path names using app_name:path_name
app_name = "polls"
urlpatterns = [
# path() function is passed four arguments, two required: route and view, and two
# optional: kwargs, and name
# - route: a string that contains a URL pattern
# - view: when Django finds a matching pattern, it calls the specified view
# function with an HttpRequest object as the first argument and any "captured"
# values from the route as keyword arguments
# - kwargs: arbitrary keyword arguments can be passed in a dictionary to the target
# view
# - name: naming your URL lets you refer to it unambiguously from elsewhere in
# Django, especially from within templates
# path("", views.index, name="index"),
# Generic list view
path("", views.IndexView.as_view(), name="index"),
# path("<int:question_id>/", views.detail, name="detail"),
# Generic detail view - expects the primary key value captured from the URL to be
# called "pk", so we've changed question_id to pk.
path("<int:pk>/", views.DetailView.as_view(), name="detail"), # Generic detail view
path("<int:question_id>/results/", views.results, name="results"),
path("<int:question_id>/vote/", views.vote, name="vote"),
]
| 47.166667 | 88 | 0.69894 |
acea48950032273dce8b9cc3f140ade350290237 | 4,798 | py | Python | rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py | Tao2301230/rasa_learn | 50093cbc696ee72fec81ab69d74a80399c6277ca | [
"Apache-2.0"
] | 1 | 2020-09-23T11:04:38.000Z | 2020-09-23T11:04:38.000Z | rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py | Tao2301230/rasa_learn | 50093cbc696ee72fec81ab69d74a80399c6277ca | [
"Apache-2.0"
] | null | null | null | rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py | Tao2301230/rasa_learn | 50093cbc696ee72fec81ab69d74a80399c6277ca | [
"Apache-2.0"
] | null | null | null | import numpy as np
import typing
from typing import Any, List, Text, Optional, Dict, Type, Tuple
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.components import Component
from rasa.nlu.featurizers.featurizer import DenseFeaturizer
from rasa.utils.features import Features
from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer
from rasa.nlu.utils.mitie_utils import MitieNLP
from rasa.nlu.training_data import Message, TrainingData
from rasa.nlu.constants import (
TEXT,
DENSE_FEATURIZABLE_ATTRIBUTES,
FEATURE_TYPE_SENTENCE,
FEATURE_TYPE_SEQUENCE,
FEATURIZER_CLASS_ALIAS,
TOKENS_NAMES,
)
from rasa.utils.tensorflow.constants import MEAN_POOLING, POOLING
if typing.TYPE_CHECKING:
import mitie
class MitieFeaturizer(DenseFeaturizer):
@classmethod
def required_components(cls) -> List[Type[Component]]:
return [MitieNLP, Tokenizer]
defaults = {
# Specify what pooling operation should be used to calculate the vector of
# the complete utterance. Available options: 'mean' and 'max'
POOLING: MEAN_POOLING
}
def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None:
super().__init__(component_config)
self.pooling_operation = self.component_config["pooling"]
@classmethod
def required_packages(cls) -> List[Text]:
return ["mitie", "numpy"]
def ndim(self, feature_extractor: "mitie.total_word_feature_extractor") -> int:
return feature_extractor.num_dimensions
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
mitie_feature_extractor = self._mitie_feature_extractor(**kwargs)
for example in training_data.training_examples:
for attribute in DENSE_FEATURIZABLE_ATTRIBUTES:
self.process_training_example(
example, attribute, mitie_feature_extractor
)
def process_training_example(
self, example: Message, attribute: Text, mitie_feature_extractor: Any
):
tokens = example.get(TOKENS_NAMES[attribute])
if tokens is not None:
sequence_features, sentence_features = self.features_for_tokens(
tokens, mitie_feature_extractor
)
self._set_features(example, sequence_features, sentence_features, attribute)
def process(self, message: Message, **kwargs: Any) -> None:
mitie_feature_extractor = self._mitie_feature_extractor(**kwargs)
for attribute in DENSE_FEATURIZABLE_ATTRIBUTES:
tokens = message.get(TOKENS_NAMES[attribute])
if tokens:
sequence_features, sentence_features = self.features_for_tokens(
tokens, mitie_feature_extractor
)
self._set_features(
message, sequence_features, sentence_features, attribute
)
def _set_features(
self,
message: Message,
sequence_features: np.ndarray,
sentence_features: np.ndarray,
attribute: Text,
):
final_sequence_features = Features(
sequence_features,
FEATURE_TYPE_SEQUENCE,
attribute,
self.component_config[FEATURIZER_CLASS_ALIAS],
)
message.add_features(final_sequence_features)
final_sentence_features = Features(
sentence_features,
FEATURE_TYPE_SENTENCE,
attribute,
self.component_config[FEATURIZER_CLASS_ALIAS],
)
message.add_features(final_sentence_features)
def _mitie_feature_extractor(self, **kwargs) -> Any:
mitie_feature_extractor = kwargs.get("mitie_feature_extractor")
if not mitie_feature_extractor:
raise Exception(
"Failed to train 'MitieFeaturizer'. "
"Missing a proper MITIE feature extractor. "
"Make sure this component is preceded by "
"the 'MitieNLP' component in the pipeline "
"configuration."
)
return mitie_feature_extractor
def features_for_tokens(
self,
tokens: List[Token],
feature_extractor: "mitie.total_word_feature_extractor",
) -> Tuple[np.ndarray, np.ndarray]:
# calculate features
sequence_features = []
for token in tokens:
sequence_features.append(feature_extractor.get_feature_vector(token.text))
sequence_features = np.array(sequence_features)
sentence_fetaures = self._calculate_sentence_features(
sequence_features, self.pooling_operation
)
return sequence_features, sentence_fetaures
| 34.517986 | 88 | 0.666528 |
acea48a8caf82e190c0c0aa34a10350fcbc32535 | 2,519 | py | Python | HR App/airflow/dags/etl_employees_dataset.py | Rafaschreinert/portfolio_rafael | 008a4a607ac2bd726644cb5b5282a39847d90893 | [
"MIT"
] | null | null | null | HR App/airflow/dags/etl_employees_dataset.py | Rafaschreinert/portfolio_rafael | 008a4a607ac2bd726644cb5b5282a39847d90893 | [
"MIT"
] | null | null | null | HR App/airflow/dags/etl_employees_dataset.py | Rafaschreinert/portfolio_rafael | 008a4a607ac2bd726644cb5b5282a39847d90893 | [
"MIT"
] | null | null | null | from datetime import datetime,date, timedelta
import pandas as pd
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash import BashOperator
from airflow.models import Variable
from minio import Minio
DEFAULT_ARGS = {
'owner': 'Airflow',
'depends_on_past': False,
'start_date': datetime(2021, 1, 13),
}
dag = DAG('etl_employees_dataset',
default_args=DEFAULT_ARGS,
schedule_interval="@once"
)
data_lake_server = Variable.get("data_lake_server")
data_lake_login = Variable.get("data_lake_login")
data_lake_password = Variable.get("data_lake_password")
client = Minio(
data_lake_server,
access_key=data_lake_login,
secret_key=data_lake_password,
secure=False
)
def extract():
#cria a estrutura para o dataframe.
df = pd.DataFrame(data=None)
#busca a lista de objetos no data lake.
objects = client.list_objects('processing', recursive=True)
#faz o download de cada arquivo e concatena com o dataframe vazio.
for obj in objects:
print("Downloading file...")
print(obj.bucket_name, obj.object_name.encode('utf-8'))
client.fget_object(
obj.bucket_name,
obj.object_name.encode('utf-8'),
"/tmp/temp_.parquet",
)
df_temp = pd.read_parquet("/tmp/temp_.parquet")
df = pd.concat([df,df_temp],axis=1)
#persiste os arquivos na área de Staging.
df.to_csv("/tmp/employees_dataset.csv"
,index=False
)
def load():
#carrega os dados a partir da área de staging.
df_ = pd.read_csv("/tmp/employees_dataset.csv")
#converte os dados para o formato parquet.
df_.to_parquet(
"/tmp/employees_dataset.parquet"
,index=False
)
#carrega os dados para o Data Lake.
client.fput_object(
"processing",
"employees_dataset.parquet",
"/tmp/employees_dataset.parquet"
)
extract_task = PythonOperator(
task_id='extract_data_from_datalake',
provide_context=True,
python_callable=extract,
dag=dag
)
load_task = PythonOperator(
task_id='load_file_to_data_lake',
provide_context=True,
python_callable=load,
dag=dag
)
clean_task = BashOperator(
task_id="clean_files_on_staging",
bash_command="rm -f /tmp/*.csv;rm -f /tmp/*.json;rm -f /tmp/*.parquet;",
dag=dag
)
extract_task >> load_task >> clean_task | 26.239583 | 76 | 0.658992 |
acea4992da86d6ca10fae06d0c3d48bd07b48e27 | 1,347 | py | Python | test/practice6_pystan_hmc_Qi_loop_test.py | LiwenxuanNJU/TVpgGLM | d07f81cf3a404474b640777a3ab01b0a79ad9187 | [
"MIT"
] | 1 | 2018-03-19T06:12:48.000Z | 2018-03-19T06:12:48.000Z | test/practice6_pystan_hmc_Qi_loop_test.py | LiwenxuanNJU/TVpgGLM | d07f81cf3a404474b640777a3ab01b0a79ad9187 | [
"MIT"
] | null | null | null | test/practice6_pystan_hmc_Qi_loop_test.py | LiwenxuanNJU/TVpgGLM | d07f81cf3a404474b640777a3ab01b0a79ad9187 | [
"MIT"
] | null | null | null | import pickle
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
from pyglm.utils.utils import expand_scalar, compute_optimal_rotation
dim = 2
N = 20
r = 1 + np.arange(N) // (N/2.)
th = np.linspace(0, 4 * np.pi, N, endpoint=False)
x = r * np.cos(th)
y = r * np.sin(th)
L = np.hstack((x[:, None], y[:, None]))
L1 = np.random.randn(N, dim)
W = np.zeros((N, N))
# Distance matrix
D = ((L[:, None, :] - L[None, :, :]) ** 2).sum(2)
sig = np.exp(-D/2)
Sig = np.tile(sig[:, :, None, None], (1, 1, 1, 1))
Mu = expand_scalar(0, (N, N, 1))
for n in range(N):
for m in range(N):
W[n, m] = npr.multivariate_normal(Mu[n, m], Sig[n, m])
aa = 1.0
bb = 1.0
cc = 1.0
sm = pickle.load(open('/Users/pillowlab/Dropbox/pyglm-master/Practices/model.pkl', 'rb'))
new_data = dict(N=N, W=W, B=dim)
for i in range(100):
fit = sm.sampling(data=new_data, iter=100, warmup=50, chains=1, init=[dict(l=L1, sigma=aa)],
control=dict(stepsize=0.001))
samples = fit.extract(permuted=True)
aa = np.mean(samples['sigma'])
#aa = samples['sigma'][-1]
#bb = np.mean(samples['eta'])
#cc = np.mean(samples['rho'])
L1 = np.mean(samples['l'], 0)
#L1 = samples['l'][-1]
R = compute_optimal_rotation(L1, L)
L1 = np.dot(L1, R)
plt.scatter(L1[:,0],L1[:,1])
plt.scatter(L[:,0],L[:,1]) | 25.903846 | 96 | 0.589458 |
acea4a2c73a18f407e191b5254898e320c988946 | 19,182 | py | Python | polyxsim/help_input.py | jonwright/PolyXSim | 2a6a4826803a9aae52800c5f2794a4aa7addd321 | [
"BSD-3-Clause"
] | 6 | 2018-09-15T12:48:44.000Z | 2022-03-22T21:25:18.000Z | polyxsim/help_input.py | jonwright/PolyXSim | 2a6a4826803a9aae52800c5f2794a4aa7addd321 | [
"BSD-3-Clause"
] | 7 | 2017-11-07T16:43:00.000Z | 2022-03-24T19:16:37.000Z | polyxsim/help_input.py | jonwright/PolyXSim | 2a6a4826803a9aae52800c5f2794a4aa7addd321 | [
"BSD-3-Clause"
] | 6 | 2018-01-16T07:52:24.000Z | 2021-12-07T21:55:56.000Z | def show_input():
"""
This function returns information about the structure, keywords and syntax
of the PolyXSim input file as one string.
"""
return \
"""
The input to PolyXSim is given as an ascii file. The file can be given any name.
A detailed definition of the experimental setup and parameters can be found on the wiki http://fable.sourceforge.net.
1.0 INPUT SYNTAX
The input is given by a set of input keywords followed by one or several values dependent on the type of keyword specified.
The ordering of the keywords is irrelevant.
The syntax is as follows
* keyword ''values'' [units]
Followed by an input example:
keyword value # some explanation
if the value is a string in should be given in quotation marks 'value'
keyword 'value' # string values should en enclosed by quotation marks
if the value is a numeric number the format is free. [[br]]
E.g.
keyword 10
keyword 10.0
keyword 1e1
keyword 10**1
keyword 20/2.0
anything goes - or almost anything!
2.0 INPUT PARAMETERS
2.1 Instrumentation
2.1.1 Beam specs
* wavelength ''wavelength'' [AA]
* beam_width ''width-of-beam'' [mm]
* beamflux ''beam-flux'' [photons/sec/mm{{{^}}}2]
* beampol_factor ''polarisation-factor-of-beam'' [fraction]
* beampol_direct ''direction-of-polarisation-with-respect-to-roation-axis'' [degrees]
wavelength 0.24344 # wavelength in Angstrom
beam_width 0.8 # Beam width (mm)
# If no beam width is specified it is assumed that the entire sample
# width is illuminated
beamflux 1e13 # Beam flux (Ph/s/mm2)
beampol_factor 1.0 # Beam polarisation factor: 1 = fully plane polarised, 0 = unpolarised
beampol_direct 0.0 # Direction of the normal to the plane of the primary beam polarisation
# with respect to the sample rotation axis (degrees) e.g. if the omega
# rotation axis is parallel to the laboratory z-axis the value is
# 0.0 degrees and if along y-axis it is 90.0 degrees
}}}
2.1.2 Detector specification
Beam center on detector -
* dety_center ''beam-center-in-y-direction'' [pixels]
* detz_center ''beam-center-in-y-direction'' [pixels]
E.g.
dety_center 1023.5 # beamcenter, y in pixel coordinates
detz_center 1023.5 # beamcenter, z in pixel coordinates
Detector pixel size -
* y_size ''pixel-size-y-direction'' [mm]
* z_size ''pixel-size-z-direction'' [mm]
E.g.
y_size 0.04677648 # Pixel size y (mm)
z_size 0.04808150 # Pixel size z (mm)
Detector size -
* dety_size ''detector-size-y-direction'' [pixels]
* detz_size ''detector-size-z-direction'' [pixels]
E.g.
dety_size 2048.0 # detector y size (pixels)
detz_size 2048.0 # detector z size (pixels)
Distance from sample to detector -
* distance ''distance-sample-to-detector'' [mm]
E.g.
distance 55.0 # sample-detector distance (mm)
Detector tilts -
* tilt_x ''tilt-of-detector-ccw-around-x-axis'' [radians]
* tilt_y ''tilt-of-detector-ccw-around-y-axis'' [radians]
* tilt_z ''tilt-of-detector-ccw-around-z-axis'' [radians]
The order of the tilts is Rx*Ry*Rz, thus the tilt (rotation)
about z is performed first, then about y and finally about x.
E.g.
tilt_x 0.0
tilt_y 0.01
tilt_z 0.0
OBS: If diffraction images are to be formed it is also possible to
simulate detector point spread, background and noise. See below.
Detector orientation -
Two things determine the detector orientation:
1. How it is mounted in the beam line setup
2. How the device server reads out the image.
To get the image in the standard FABLE geometry a orientation matrix o
can be specifed:
o = [o11 o21]
[o21 o22]
(see this document Geometry_version_1.0.7.pdf on the wiki)
There are eight possible '''o''' matrices for the eight possible orientations.
* o11 ''element-in-orientation-matrix'' [-1,0,1]
* o12 ''element-in-orientation-matrix'' [-1,0,1]
* o21 ''element-in-orientation-matrix'' [-1,0,1]
* o22 ''element-in-orientation-matrix'' [-1,0,1]
E.g.
o11 1 # Frelon2k, Frelon4m detector orientation
o12 0 #
o21 0 #
o22 -1 #
OBS: In principle the choice of matrix does not matter -
only if you want to get the images in the same orientation
as what you get on a specific setup.
Remember the values given here has to be the same if
analysing the data in ImageD11.
Background -
A constant background can be added to the diffraction images using:
* bg ''number_of_counts''
E.g.
bg 100 # Add 100 counts to background
Noise -
Random Poisson noise can be added to the diffraction images using:
* noise ''flag'' [0= no noise, 1= add Poisson noise]
E.g.
noise 1 # Add Poisson noise
Detector point spread -
A Gaussian detector point spread can be simulated in the diffraction
images by specifying the FWHM of the point spread in pixels:
* psf ''fwhm'' [in pixels]
E.g.
psf 2 # Add Gaussian detector psf with a FWHM of 2 pixels
Spatial distortion -
Spatial distortion of the detector can be taken into account
by specifying the corresponding spline. The spots will not be
deformed due to the operation, but merely the center position
of the peak is distorted.
* spatial '''spline file'''
E.g.
spatial 'frelon4m.spline' # Add spatial distortion
2.1.3 Omega scan range, step size and speed
Omega scan range -
* omega_start ''start-omega-value-of-scan'' [degrees]
* omega_end ''end-omega-value-of-scan'' [degrees]
E.g.
omega_start -45.0 # Minimum Omega in range of interest (in deg)
omega_end 45.0 # Maximum Omega in range of interest (in deg)
Omega step size -
* omega_step ''omega_step_size_of_each_frame'' [degrees]
E.g.
omega_step 1.0 # Omega step size (in deg)
Omega rotation direction -
* omega_sign ''omega_rotation_direction'' [+1/-1]
E.g.
omega_sign 1 # Sign of omega rotation (cw = +1, ccw = -1)
OBS: The above omega specifications will create
(omega_end - omega_start)/omega_step images, and the first image
will be centered at omega_start + 0.5*omega_step. In the file header
Omega will be set to the midpoint of the rotation interval for the frame.
Wedge angle of omega axis -
The angle between the omega rotation axis and the z-laboratory axis in the
plane of x and z, i.e. if 0 the rotation axis is perfectly aligned with the
z-axis. Hence the rotation of the rotation axis about the y-axis
(left-handed).
* wedge ''wedge_angle'' [degrees]
E.g.
wedge 0.023 # wedge of omega rotation axis (in deg)
2.2 CRYSTAL/GRAIN PARAMETERS
Grain/crystal number in sample to be simulated.
* no_grains ''number-of-simulated-grains''
The number is the total of all grains summed over all phases to be
simulated. This number needs to match the number of e.g.
U_grains_X keywords
E.g.
no_grains 10
Grain phase -
The phase of the individual grains can be specified or appointed by
PolyXSim. If you want to let the PolyXSim appoint which grain belongs
to which phase the following keyword can be used.
* gen_phase ''flag'' [0= do not, or 1= do] ''phase-id no-of-grains-of-this-id'' .... ''phase-id no-of-grains-of-this-id''
So if choosing to randomly appoint the phase a list of phase id's each
succeded by the number of grains of the present phase.
E.g. (example with three phases with id'd 0,1 and 2)
gen_phase 1 0 10 1 20 2 5
10 grains with phase 0,
20 grains having phase 1, and
5 grains with phase 2
Naturally the number of grains should match the number of grains specified
with keyword 'no_grains'
Alternatively the phase id of each phase can be specified
* phase_grains_X ''phase-id-of-grain''
where X is the grain id number.
Grain orientations -
Can be either randomly generated, or specific orientation matrices
can be input by the user.
* gen_U ''flag'' [0= do not, or 1= do]
If flag = 1 then 'no_grains' random orientations will be generated
E.g.
gen_U 1 # Generate orientations
If gen_U is set to 0 the orientations have to be provided by the user
* U_grains_''X'' ''U11 U12 U13 U21 U22 U23 U31 U32 U33
X needs to be integer - its used to make certain that a grain orientation
is correctly matched with its position, size etc.
E.g.
U_grains_0 -0.888246 0.411253 -0.204671 -0.201101 -0.748709 -0.631659 -0.413011 -0.519909 0.747741
U_grains_1 -0.158282 -0.986955 0.029458 -0.929214 0.158978 0.333597 -0.333929 0.025430 -0.942255
..........
Grain positions -
Can be either randomly generated or specific positions can be input by
the user.
* gen_pos value1 [0= do not, or 1= do] value2 [0= all at (0,0,0), 1= generate randomly within box or cylinder]
If ''flag1''=1 ''no_grains'' random positions will be generated
E.g.
gen_pos 1 1 # Generate random positions within box or cylinder
OBS: The function of gen_pos is dependent on other keywords (or lack
of keywords). For generation of a position different from (0,0,0)
one of the keywords sample_cyl or sample_xyz should be given in
order define the borders of the sample area.
* pos_grains_X ''x y z'' [mm]
X needs to be integer - its used to make certain that a grain position
is correctly matched with its orientation, size etc.''
E.g.
pos_grains_0 0 0 0
pos_grains_1 0.01 -0.05 0.2
.......
Sample shape and dimensions -
The sample can be specified to have either cylindrical or box shape:
* sample_cyl ''diameter height'' (dimensions given in mm)
or
* sample_xyz x_dimension y_dimension z_dimension (all in mm)
OBS: Only one of sample_cyl and sample_xyz can be given.
If no beam_width is given it is assumed that the entire
width of the sample is illuminated.
E.g.
sample_cyl 0.8 0.1 # Cylindrical sample shape
or
sample_xyz 0.5 0.5 0.5 # Box shaped sample
Grain strains -
Can be either randomly generated, or specific strains can be input
by the user. Note that the strain tensor is given in the Cartesian
grain coordinate system, which for each grain is related to the
overall sample system via the grain specific orienation matrix U.
* gen_eps flag [0= do not, or 1= do] mean-value-for-diagonal-elemets-of-strain-tensor
spread-for-diagonal-elements-of-strain-tensor
mean-value-for-offdiagonal-elemets-of-strain-tensor
spread-for-offdiagonal-elements-of-strain-tensor
If flag = 1 then no_grains strain tensors with with elements from a normal distribution with the specified mean and spread will be generated
E.g.
gen_eps 1 0 0.001 0 0 # Generate random diagonal strain tensors
OBS: if a multiphase material is simulated using the above keyword the
strain for all grain independt of phase will be generated with the
same distribution. It is also possible to have different distributions
for every phase.
* gen_eps_phase_Y has the same entries as gen_eps given above
Y being the phase number id
E.g.
gen_eps_phase_0 1 0 0.001 0 0 # Generate random diagonal strain tensors
gen_eps_phase_1 1 0 0.02 0 0.01
The strain tensors can also been specifically input for every grain
* eps_grains_X eps11 eps12 eps13 eps22 eps23 eps33
X needs to be integer - its used to make certain that the strain tensor
of the grain is correctly matched with its position, size etc.
E.g.
eps_grains_0 0.001 0.0015 -0.005 0 0 0
eps_grains_1 0.001 -0.005 0.002 0.006 -0.005 -0.001
.......
Grain sizes -
Again these can either be user supplied or generated by PolyXSim.
The grain sizes will be simulated having a log-normal distribution
with a specified median grain size and optionally the distribution
tails can be cut off. If only one phase is to be simulated or one
wishes to use the same grain distribution for all structural phase
the following keyword can be used to specific the distribution
* gen_size ''flag'' [0= do not, or 1= do] ''median-grain-size-of-distribution'' [mm] ''minimum-grain-size'' [mm] ''maximum-grain-size'' [mm]
OBS: if value ''median-grain-size-of-distribution'' is negative
the grain size of all grains will be the value of the median.
E.g.
gen_size 1 0.05 0.01 0.25
Different grain size distributions can be used for the different phase
(if more than one are present). This is specified as follows
* gen_size_phase_Y ''flag'' [0= do not, or 1= do] ''median-grain-size-of-distribution'' [mm] ''minimum-grain-size'' [mm] ''maximum-grain-size'' [mm]
where Y again is the phase number id. [[br]]
E.g.
So the input can look like this for two or more phases,
gen_size_phase_0 1 0.05 0.010 0.25
gen_size_phase_1 1 0.02 0.005 0.05
.....
Or the grain size of each grain can be specified
* size_grains_''X'' ''grain-diameter'' [mm]
E.g.
size_grains_0 0.04
size_grains_1 0.06
......
Structural parameters -
It is possible to simulate both mono- and multiphase polycrystalline
samples. If there is no interest in the actual peak intensities - only
the unit cell and space group have to be specified.
* unit_cell_phase_Y a [AA] b [AA] c [AA] alpha [deg] beta [deg] gamma [deg]
and
* sgno_phase_Y ''number-of-space-group'', Y being the phase number id
or
* sgname_phase_Y ''name-of-space-group'', Y being the phase number id
Y being the phase number id
Presently only the standard space groups can be used, i.e. P 21/n is
for example not a possibility.
E.g.
unit_cell_phase_0 8.531200 4.832100 10.125000 90.000000 92.031000 90.000000
sgno_phase_0 4 # space group number
or
sgname_phase_0 'P21' # remember to put quotation marks around the string
OBS: if more phases the next set will then have the keywords
unit_cell_phase_1, sgno_phase_1 etc.
OBS2: If monophase materails are simulated the old keyword (i.e.
without _phase_Y ) can still used.
If 'real' intensities are to be calculated the structural parameters can
either be supplied as a [http://www.iucr.org/iucr-top/cif/index.html cif]
file or a [http://www.wwpdb.org/docs.html pdb] file.
* structure_phase_Y '''structure-file-name'''
The file can either be a .pdb or a .cif file
Y being the phase number id
E.g.
structure_phase_0 'glycine.cif'
Again if a monophase is simulated the old keyword - structure_file - can
be used instead.
If a structure file has information about unit cell and/or space group,
these parameter will be chosen over parameters introduced with the keywords
unit_cell and/or sgno.
File names and formats -
Directory to save output from PolyXSim -
* direc '''directory-name'''
If the specified directory does not exist it will be created.
E.g.
direc 'simulation_glycine'
Name stem -
The base of all out put files
* stem '''name-stem'''
i.e. image files will get the names name-stem_frame0001.edf etc.
E.g.
stem 'glycine'
File formats to output -
There is a number of files which will be made by default. Whether one
likes it or not. But the simulated reflections can be out put in
different file formats if requested. What can be chosen
1. '.edf' or '.tif' or '.edf.gz' - presently the supported diffraction image formats
2. '.flt' - a peak file definitions on wiki:"imaged11 - file formats" format
3. '.gve' - defined on the wiki:"imaged11 - file formats" g-vector file.
4. '.ubi' - grain orientations as inv(U*B)
5. '.par' - the input parameters for PolyXSim written in the par format
of ImageD11
The output files are specified as one after the other in the following manner
* output '''format1' 'format2' ......''
E.g.
output '.edf' '.flt' '.gve' '.ubi' '.par'
Peak intensities -
The total peak intensity can be either be a
1. constant value, or
2. based on the '''structure factor''' of the reflections
The following keyword is used to control this choice
* intensity_const ''constant_intensity_value'' [counts]
if the value is zero (0) the structure factor squared are used to
calculate the intensities. Otherwise the value given will be the
total intensity.
In the calculation of the intensity the effects of
1. the Lorentz factor and/or
2. the beam polarisation factor
can also be taken into account using
* lorentz_apply ''flag'' [0=do not,1=do]
* beampol_apply ''flag'' [0=do not,1=do]
Peak shapes -
Currently only three different peak shapes/profiles are available
* peakshape ''type''
Depending on the chosen type more parameters can be added after ''type''.
Type can be
* 0 - spike peaks
* 1 - Gaussian peak shape (isotropic), and Gaussian
rocking curve (spread along omega)
* 2 - peak shape calculated from a grain orientation distribution function
Below these three types are documented in more detail.
0. Spike peak - A square 2-by-2 spike peak
* peakshape 0
E.g.
peakshape 0
1. Gaussian peaks
Spot with one FWHM in the y and z detector directions (in pixels)
and another in omega (degress)
* peakshape 1 ''spot-full-width-half-maximum''[pixels] ''spot-rocking-curve'' [degrees]
E.g.
peakshape 1 2 0.2
2. Orientation spread dependent peaks
Use the 'real' orientation spread of the crystal (mosaicity).
OBS: This will not smear intensity in the 2theta direction.
NB: Only one common ODF can presently be used for all grains
irrespective of phase.
E.g.
peakshape 2 # Make peak spread from orientation distribution function
There is two possibilities of providing/defining an orientation
distribution function (ODF)
* odf_type ''type-code'' [1,2, or 3]
1. The simplest one is to give to ODF as a isotropic Gaussian mosaic spread
* odf_type 1
the mosaicity is then given by the keyword:
* mosaicity ''mosaic-spread'' [degrees]
Optionally a scale of the ODF grid can be given - by default it is given
a value of half the anguler size of a pixel having the smallest angular
dimension.
* odf_scale ''grid_voxel_size'' [degrees]
E.g.
odf_type 1
mosaicity 0.2 # The mosaic spread in degrees
If no odf_type or mosaicity the values above will be used by default.
2. The other is to give the ODF as voxelated grid defined in Rodrigues space.
The ODF has to read from a file [wiki:"PolyXSim - odf_file format" format]
* odt_type 2
* odf_file '''odf-data-file''' [file format]
E.g.
odf_type 2
odf_file 'my_odf.odf'
"""
| 34.562162 | 150 | 0.68361 |
acea4ab2521c71692be0360c82f9e8ed58c20517 | 4,625 | py | Python | stdplugins/got_thoughts.py | aashiq075/PepeBot | 5f40f4316c84ec3875bcbcd476e10448f9214f31 | [
"Apache-2.0"
] | null | null | null | stdplugins/got_thoughts.py | aashiq075/PepeBot | 5f40f4316c84ec3875bcbcd476e10448f9214f31 | [
"Apache-2.0"
] | null | null | null | stdplugins/got_thoughts.py | aashiq075/PepeBot | 5f40f4316c84ec3875bcbcd476e10448f9214f31 | [
"Apache-2.0"
] | null | null | null | # BY @Deonnn // Thanks from cHAuHaN
"""
Game of Thrones Thoughts plugin
Command .gott
"""
from telethon import events
import asyncio
import random
@borg.on(events.NewMessage(pattern=r"\.gott", outgoing=True))
async def _(event):
if event.fwd_from:
return
await event.edit("`Typing...`")
await asyncio.sleep(2)
x = (random.randrange(1, 40))
if x == 1:
await event.edit("`\"The man who passes the sentence should swing the sword.\"`")
if x == 2:
await event.edit("`\"When the snows fall and the white winds blow, the lone wolf dies but the pack survives!\"`")
if x == 3:
await event.edit("`\"The things I do for love!\"`")
if x == 4:
await event.edit("`\"I have a tender spot in my heart for cripples, bastards and broken things.\"`")
if x == 5:
await event.edit("`\"Death is so terribly final, while life is full of possibilities.\"`")
if x == 6:
await event.edit("`\"Once you’ve accepted your flaws, no one can use them against you.\"`")
if x == 7:
await event.edit("`\"If I look back I am lost.\"`")
if x == 8:
await event.edit("`\"When you play the game of thrones, you win or you die.\"`")
if x == 9:
await event.edit("`\"I grew up with soldiers. I learned how to die a long time ago.\"`")
if x == 10:
await event.edit("`\"What do we say to the Lord of Death?\nNot Today!\"`")
if x == 11:
await event.edit("`\"Every flight begins with a fall.\"`")
if x == 12:
await event.edit("`\"Different roads sometimes lead to the same castle.\"`")
if x == 13:
await event.edit("`\"Never forget what you are. The rest of the world will not. Wear it like armour, and it can never be used to hurt you.\"`")
if x == 14:
await event.edit("`\"The day will come when you think you are safe and happy, and your joy will turn to ashes in your mouth.\"`")
if x == 15:
await event.edit("`\"The night is dark and full of terrors.\"`")
if x == 16:
await event.edit("`\"You know nothing, Jon Snow.\"`")
if x == 17:
await event.edit("`\"Night gathers, and now my watch begins!\"`")
if x == 18:
await event.edit("`\"A Lannister always pays his debts.\"`")
if x == 19:
await event.edit("`\"Burn them all!\"`")
if x == 20:
await event.edit("`\"What do we say to the God of death?\"`")
if x == 21:
await event.edit("`\"There's no cure for being a c*nt.\"`")
if x == 22:
await event.edit("`\"Winter is coming!\"`")
if x == 23:
await event.edit("`\"That's what I do: I drink and I know things.\"`")
if x == 24:
await event.edit("`\"I am the dragon's daughter, and I swear to you that those who would harm you will die screaming.\"`")
if x == 25:
await event.edit("`\"A lion does not concern himself with the opinion of sheep.\"`")
if x == 26:
await event.edit("`\"Chaos isn't a pit. Chaos is a ladder.\"`")
if x == 27:
await event.edit("`\"I understand that if any more words come pouring out your c*nt mouth, I'm gonna have to eat every f*cking chicken in this room.\"`")
if x == 28:
await event.edit("`\"If you think this has a happy ending, you haven't been paying attention.\"`")
if x == 29:
await event.edit("`\"If you ever call me sister again, I'll have you strangled in your sleep.\"`")
if x == 30:
await event.edit("`\"A girl is Arya Stark of Winterfell. And I'm going home.\"`")
if x == 31:
await event.edit("`\"Any man who must say 'I am the King' is no true King.\"`")
if x == 32:
await event.edit("`\"If I fall, don't bring me back.\"`")
if x == 33:
await event.edit("`\"Lannister, Targaryen, Baratheon, Stark, Tyrell... they're all just spokes on a wheel. This one's on top, then that one's on top, and on and on it spins, crushing those on the ground.\"`")
if x == 34:
await event.edit("`\"Hold the door!`")
if x == 35:
await event.edit("`\"When people ask you what happened here, tell them the North remembers. Tell them winter came for House Frey.\"`")
if x == 36:
await event.edit("`\"Nothing f*cks you harder than time.\"`")
if x == 37:
await event.edit("`\"There is only one war that matters. The Great War. And it is here.\"`")
if x == 38:
await event.edit("`\"Power is power!\"`")
if x == 39:
await event.edit("`\"I demand a trial by combat!\"`")
if x == 40:
await event.edit("`\"I wish I was the monster you think I am!\"`")
| 47.193878 | 216 | 0.59027 |
acea4aec9e5a7f56ea2f8bb7f67fe7f9ee806cb4 | 8,735 | py | Python | tools/upgrade-generator.py | cryptoadvance/specter-bootloader | 6fa6157906dcb0018781432daac4a22e1bba86d7 | [
"MIT"
] | 4 | 2020-10-02T21:24:56.000Z | 2021-06-24T13:32:41.000Z | tools/upgrade-generator.py | cryptoadvance/specter-bootloader | 6fa6157906dcb0018781432daac4a22e1bba86d7 | [
"MIT"
] | 4 | 2020-10-19T20:53:32.000Z | 2021-01-24T17:19:07.000Z | tools/upgrade-generator.py | cryptoadvance/specter-bootloader | 6fa6157906dcb0018781432daac4a22e1bba86d7 | [
"MIT"
] | 3 | 2020-09-29T12:14:17.000Z | 2021-05-30T14:08:26.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Upgrade file generator"""
import getpass
from intelhex import IntelHex
import click
import core.signature as sig
from core.blsection import *
__author__ = "Mike Tolkachev <contact@miketolkachev.dev>"
__copyright__ = "Copyright 2020 Crypto Advance GmbH. All rights reserved"
__version__ = "1.0.0"
@click.group()
@click.version_option(__version__, message="%(version)s")
# @click.command(no_args_is_help=True)
def cli():
"""Upgrade file generator."""
@cli.command(
'gen',
short_help='generate a new upgrade file'
)
@click.option(
'-b', '--bootloader', 'bootloader_hex',
type=click.File('r'),
help='Intel HEX file containing the Bootloader.',
metavar='<file.hex>'
)
@click.option(
'-f', '--firmware', 'firmware_hex',
type=click.File('r'),
help='Intel HEX file containing the Main Firmware.',
metavar='<file.hex>'
)
@click.option(
'-k', '--private-key', 'key_pem',
type=click.File('rb'),
help='Private key in PEM container.',
metavar='<file.pem>'
)
@click.option(
'-p', '--platform',
type=str,
help='Platform identifier, i.e. stm32f469disco.',
metavar='<platform>'
)
@click.argument(
'upgrade_file',
required=True,
type=click.File('wb'),
metavar='<upgrade_file.bin>'
)
def generate(upgrade_file, bootloader_hex, firmware_hex, platform, key_pem):
"""This command generates an upgrade file from given firmware files
in Intel HEX format. It is required to specify at least one firmware
file: Firmware or Bootloader.
In addition, if a private key is provided it is used to sign produced
upgrade file. Private key should be in PEM container with or without
encryption.
"""
# Load private key if needed
seckey = None
if key_pem:
seckey = load_seckey(key_pem)
# Create payload sections from HEX files
sections = []
if bootloader_hex:
sections.append(create_payload_section(
bootloader_hex, 'boot', platform))
if firmware_hex:
sections.append(create_payload_section(
firmware_hex, 'main', platform))
if not len(sections):
raise click.ClickException("No input file specified")
# Sign firmware if requested
if seckey:
do_sign(sections, seckey)
# Write upgrade file to disk
write_sections(upgrade_file, sections)
@ cli.command(
'sign',
short_help='sign an existing upgrade file'
)
@ click.option(
'-k', '--private-key', 'key_pem',
required=True,
type=click.File('rb'),
help='Private key in PEM container used to sign produced upgrade file.',
metavar='<filename.pem>'
)
@ click.argument(
'upgrade_file',
required=True,
type=click.File('rb+'),
metavar='<upgrade_file.bin>'
)
def sign(upgrade_file, key_pem):
"""This command adds a signature to an existing upgrade file. Private key
should be provided in PEM container with or without encryption.
The signature is checked for duplication, and any duplicating signatures
are removed automatically.
"""
# Load sections from firmware file
sections = load_sections(upgrade_file)
# Load private key and sign firmware
seckey = load_seckey(key_pem)
do_sign(sections, seckey)
# Write new upgrade file to disk
upgrade_file.truncate(0)
upgrade_file.seek(0)
write_sections(upgrade_file, sections)
@ cli.command(
'dump',
short_help='dump sections and signatures from upgrade file'
)
@ click.argument(
'upgrade_file',
required=True,
type=click.File('rb'),
metavar='<upgrade_file.bin>'
)
def dump(upgrade_file):
""" This command dumps information regarding firmware sections and lists
signatures with public key fingerprints.
"""
sections = load_sections(upgrade_file)
for s in sections:
version_str = s.version_str
print(f'SECTION "{s.name}"')
print(f' attributes: {s.attributes_str}')
if s.version_str:
print(f' version: {s.version_str}')
if isinstance(s, SignatureSection):
sigs = [f"{f.hex()}: {s.hex()}" for f, s in s.signatures.items()]
print(" signatures:\n " + "\n ".join(sigs))
@ cli.command(
'message',
short_help='outputs a hash message to be signed externally'
)
@ click.argument(
'upgrade_file',
required=True,
type=click.File('rb'),
metavar='<upgrade_file.bin>'
)
def message(upgrade_file):
""" This command outputs a message in Bech32 format containing payload
version(s) and hash to be signed using external tools.
"""
sections = load_sections(upgrade_file)
pl_sections = [s for s in sections if isinstance(s, PayloadSection)]
message = make_signature_message(pl_sections)
print(message.decode('ascii'))
@ cli.command(
'import-sig',
short_help='imports a signature into upgrade file'
)
@ click.option(
'-s', '--signature', 'b64_signature',
required=True,
help='Bitcoin message signature in Base64 format.',
metavar='<signature_base64>'
)
@ click.argument(
'upgrade_file',
required=True,
type=click.File('rb+'),
metavar='<upgrade_file.bin>'
)
def import_sig(upgrade_file, b64_signature):
""" This command imports an externally made signature into an upgrade file.
The signature is expected to be a standard Bitcoin message signature in
Base64 format.
"""
sections = load_sections(upgrade_file)
pl_sections, _ = parse_sections(sections)
sig_message = make_signature_message(pl_sections)
signature, pubkey = parse_recoverable_sig(b64_signature, sig_message)
add_signature(sections, signature, pubkey)
# Write new upgrade file to disk
upgrade_file.truncate(0)
upgrade_file.seek(0)
write_sections(upgrade_file, sections)
def create_payload_section(hex_file, section_name, platform):
ih = IntelHex(hex_file)
attr = {'bl_attr_base_addr': ih.minaddr()}
if platform:
attr['bl_attr_platform'] = platform
entry = ih.start_addr.get('EIP', ih.start_addr.get('IP', None))
if isinstance(entry, int):
attr['bl_attr_entry_point'] = entry
exp_len = ih.maxaddr() - ih.minaddr() + 1
if exp_len > MAX_PAYLOAD_SIZE:
raise click.ClickException(f"Error while parsing '{hex_file.name}'")
pl_bytes = ih.tobinstr()
if len(pl_bytes) != exp_len:
raise click.ClickException(f"Error while parsing '{hex_file.name}'")
return PayloadSection(name=section_name, payload=pl_bytes, attributes=attr)
def load_seckey(key_pem):
data = key_pem.read()
if(sig.is_pem_encrypted(data)):
password = getpass.getpass("Passphrase:").encode('ascii')
try:
seckey = sig.seckey_from_pem(data, password)
except InvalidPassword:
raise click.ClickException("Passphrase invalid")
return seckey
return sig.seckey_from_pem(data)
def write_sections(upgrade_file, sections):
for sect in sections:
upgrade_file.write(sect.serialize())
def load_sections(upgrade_file):
file_data = upgrade_file.read()
offset = 0
sections = []
while offset < len(file_data):
sect, offset = Section.deserialize(file_data, offset)
sections.append(sect)
return sections
def parse_sections(sections):
"""Validates an upgrade file and separates Payload and Signature sections.
"""
if not len(sections):
raise click.ClickException("Upgrade file is empty")
if not isinstance(sections[-1], SignatureSection):
sections.append(SignatureSection())
sig_section = sections[-1]
pl_sections = sections[:-1]
for sect in pl_sections:
if not isinstance(sect, PayloadSection):
err = "Unexpected section within payload sections"
raise click.ClickException(err)
if not isinstance(sig_section, SignatureSection):
err = "Last section must be the Signature Section"
raise click.ClickException(err)
return (pl_sections, sig_section)
def add_signature(sections, signature, pubkey):
"""Adds a new signature to Signature section.
"""
_, sig_section = parse_sections(sections)
fp = pubkey_fingerprint(pubkey)
if fp in sig_section.signatures:
err = "Upgrade file is already signed using this key"
raise click.ClickException(err)
sig_section.signatures[fp] = signature
def do_sign(sections, seckey):
"""Signs payload sections.
"""
pl_sections, _ = parse_sections(sections)
msg = make_signature_message(pl_sections)
pubkey = pubkey_from_seckey(seckey)
signature = sig.sign(msg, seckey)
add_signature(sections, signature, pubkey)
if __name__ == '__main__':
cli()
| 29.914384 | 79 | 0.679222 |
acea4b8190a1eab86b436eb8fa9e6a76f1fb5798 | 30 | py | Python | hello.py | BobbyJacobs/cs3240-demo | 53b6b1f4b7f58b1a7d748f67e220bd4da147df0e | [
"MIT"
] | null | null | null | hello.py | BobbyJacobs/cs3240-demo | 53b6b1f4b7f58b1a7d748f67e220bd4da147df0e | [
"MIT"
] | null | null | null | hello.py | BobbyJacobs/cs3240-demo | 53b6b1f4b7f58b1a7d748f67e220bd4da147df0e | [
"MIT"
] | null | null | null | def main():
print("Hello!")
| 10 | 17 | 0.566667 |
acea4c531d1a860fea87dadb4a23a248ef07d022 | 4,114 | py | Python | airflow/providers/google/marketing_platform/operators/analytics.py | Rcharriol/airflow | 9575706509aa72ea84e3c19ff2e738a9303c3c2f | [
"Apache-2.0"
] | null | null | null | airflow/providers/google/marketing_platform/operators/analytics.py | Rcharriol/airflow | 9575706509aa72ea84e3c19ff2e738a9303c3c2f | [
"Apache-2.0"
] | null | null | null | airflow/providers/google/marketing_platform/operators/analytics.py | Rcharriol/airflow | 9575706509aa72ea84e3c19ff2e738a9303c3c2f | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google Analytics 360 operators.
"""
from airflow.models import BaseOperator
from airflow.providers.google.marketing_platform.hooks.analytics import GoogleAnalyticsHook
from airflow.utils.decorators import apply_defaults
class GoogleAnalyticsListAccountsOperator(BaseOperator):
"""
Lists all accounts to which the user has access.
.. seealso::
Check official API docs:
https://developers.google.com/analytics/devguides/config/mgmt/v3/mgmtReference/management/accounts/list
and for python client
http://googleapis.github.io/google-api-python-client/docs/dyn/analytics_v3.management.accounts.html#list
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleAnalyticsListAccountsOperator`
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
"""
template_fields = (
"api_version",
"gcp_connection_id",
)
@apply_defaults
def __init__(
self,
api_version: str = "v3",
gcp_connection_id: str = "google_cloud_default",
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.api_version = api_version
self.gcp_connection_id = gcp_connection_id
def execute(self, context):
hook = GoogleAnalyticsHook(
api_version=self.api_version, gcp_connection_id=self.gcp_connection_id
)
result = hook.list_accounts()
return result
class GoogleAnalyticsRetrieveAdsLinksListOperator(BaseOperator):
"""
Lists webProperty-Google Ads links for a given web property
.. seealso::
Check official API docs:
https://developers.google.com/analytics/devguides/config/mgmt/v3/mgmtReference/management/webPropertyAdWordsLinks/list#http-request
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleAnalyticsListAccountsOperator`
:param account_id: ID of the account which the given web property belongs to.
:type account_id: str
:param web_property_id: Web property UA-string to retrieve the Google Ads links for.
:type web_property_id: str
"""
template_fields = (
"api_version",
"gcp_connection_id",
"account_id",
"web_property_id",
)
@apply_defaults
def __init__(
self,
account_id: str,
web_property_id: str,
api_version: str = "v3",
gcp_connection_id: str = "google_cloud_default",
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.account_id = account_id
self.web_property_id = web_property_id
self.api_version = api_version
self.gcp_connection_id = gcp_connection_id
def execute(self, context):
hook = GoogleAnalyticsHook(
api_version=self.api_version, gcp_connection_id=self.gcp_connection_id
)
result = hook.list_ad_words_links(
account_id=self.account_id, web_property_id=self.web_property_id,
)
return result
| 33.447154 | 139 | 0.695673 |
acea4c9b5f1d71fa052527dcf3241e9c291af51a | 1,113 | py | Python | backend_django/type/migrations/0001_initial.py | mehranagh20/Typonent | 702ae3e018dcffb952c870b0a680f2475845b744 | [
"MIT"
] | 4 | 2017-03-24T19:14:29.000Z | 2017-04-02T18:23:25.000Z | backend_django/type/migrations/0001_initial.py | mehranagh20/typing_site | 702ae3e018dcffb952c870b0a680f2475845b744 | [
"MIT"
] | 4 | 2017-03-25T20:25:04.000Z | 2017-03-31T17:18:23.000Z | backend_django/type/migrations/0001_initial.py | mehranagh20/typing_site | 702ae3e018dcffb952c870b0a680f2475845b744 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-15 09:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=254, unique=True)),
('username', models.CharField(max_length=40, unique=True)),
('is_admin', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'abstract': False,
},
),
]
| 33.727273 | 114 | 0.58221 |
acea4cec192161b8ba9d186a1d42656767416e43 | 10,586 | py | Python | public/sdks/python/mastertemplate/mastertemplate.py | APEEYEDOTCOM/hapi-bells | 6c49518a075cc2246f803a3f9179f4b94846e421 | [
"MIT"
] | 1 | 2018-07-17T15:07:18.000Z | 2018-07-17T15:07:18.000Z | public/sdks/python/mastertemplate/mastertemplate.py | APEEYEDOTCOM/hapi-bells | 6c49518a075cc2246f803a3f9179f4b94846e421 | [
"MIT"
] | null | null | null | public/sdks/python/mastertemplate/mastertemplate.py | APEEYEDOTCOM/hapi-bells | 6c49518a075cc2246f803a3f9179f4b94846e421 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from msrest.pipeline import ClientRawResponse
from msrest.exceptions import HttpOperationError
from .operations.get_system_api_operations import GetSystemApiOperations
from .operations.get_system_session_api_operations import GetSystemSessionApiOperations
from .operations.get_system_useragent_api_operations import GetSystemUseragentApiOperations
from .operations.get_system_generatesdk_api_operations import GetSystemGeneratesdkApiOperations
from . import models
class MASTERTEMPLATEConfiguration(Configuration):
"""Configuration for MASTERTEMPLATE
Note that all parameters used to create this instance are saved as instance
attributes.
:param str base_url: Service URL
"""
def __init__(
self, base_url=None):
if not base_url:
base_url = 'http://localhost:8082/'
super(MASTERTEMPLATEConfiguration, self).__init__(base_url)
self.add_user_agent('mastertemplate/{}'.format(VERSION))
class MASTERTEMPLATE(object):
"""Master API Teplate with cross cutting concerns baked into the template.
:ivar config: Configuration for client.
:vartype config: MASTERTEMPLATEConfiguration
:ivar get_system_api: GetSystemApi operations
:vartype get_system_api: silica.operations.GetSystemApiOperations
:ivar get_system_session_api: GetSystemSessionApi operations
:vartype get_system_session_api: silica.operations.GetSystemSessionApiOperations
:ivar get_system_useragent_api: GetSystemUseragentApi operations
:vartype get_system_useragent_api: silica.operations.GetSystemUseragentApiOperations
:ivar get_system_generatesdk_api: GetSystemGeneratesdkApi operations
:vartype get_system_generatesdk_api: silica.operations.GetSystemGeneratesdkApiOperations
:param str base_url: Service URL
"""
def __init__(
self, base_url=None):
self.config = MASTERTEMPLATEConfiguration(base_url)
self._client = ServiceClient(None, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '1'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.get_system_api = GetSystemApiOperations(
self._client, self.config, self._serialize, self._deserialize)
self.get_system_session_api = GetSystemSessionApiOperations(
self._client, self.config, self._serialize, self._deserialize)
self.get_system_useragent_api = GetSystemUseragentApiOperations(
self._client, self.config, self._serialize, self._deserialize)
self.get_system_generatesdk_api = GetSystemGeneratesdkApiOperations(
self._client, self.config, self._serialize, self._deserialize)
def post_user_refreshtokenforuseraccount(
self, body=None, custom_headers=None, raw=False, **operation_config):
"""Step 3. Create new user account bearer token.
Step 3. Create a new bearer token associated with the user account.
:param body:
:type body: :class:`Model1 <silica.models.Model1>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/User/RefreshTokenForUserAccount/'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if body is not None:
body_content = self._serialize.body(body, 'Model1')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post_user_registeraccounttoreceivetoken(
self, body=None, custom_headers=None, raw=False, **operation_config):
"""Step 1. Create a user account.
Step 1. Create a new user account to allow API interaction.
:param body:
:type body: :class:`Model1 <silica.models.Model1>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/User/RegisterAccountToReceiveToken/'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if body is not None:
body_content = self._serialize.body(body, 'Model1')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post_user_requestaudittrail(
self, body=None, custom_headers=None, raw=False, **operation_config):
"""Step 3. Create new user account bearer token.
Step 3. Create a new bearer token associated with the user account.
:param body:
:type body: :class:`Model1 <silica.models.Model1>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/User/RequestAuditTrail/'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if body is not None:
body_content = self._serialize.body(body, 'Model1')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post_user_retrievetokenforuseraccount(
self, body=None, custom_headers=None, raw=False, **operation_config):
"""Step 2. Retrieve user account bearer token.
Step 2. Return the bearer token associated with the user account.
:param body:
:type body: :class:`Model1 <silica.models.Model1>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/User/RetrieveTokenForUserAccount/'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if body is not None:
body_content = self._serialize.body(body, 'Model1')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| 38.919118 | 95 | 0.673248 |
acea4d385a51f4dc62688482ba30dcb095fcacb0 | 3,045 | py | Python | problem_solving/cracking_the_code_interview/3_stacks_queues/stack_of_plates.py | umitkaanusta/stuff-im-learning | 9d10f681ad49d6e598ef90fdc3a16ed8f2809bbc | [
"MIT"
] | 1 | 2021-06-23T17:23:27.000Z | 2021-06-23T17:23:27.000Z | problem_solving/cracking_the_code_interview/3_stacks_queues/stack_of_plates.py | umitkaanusta/stuff-im-learning | 9d10f681ad49d6e598ef90fdc3a16ed8f2809bbc | [
"MIT"
] | null | null | null | problem_solving/cracking_the_code_interview/3_stacks_queues/stack_of_plates.py | umitkaanusta/stuff-im-learning | 9d10f681ad49d6e598ef90fdc3a16ed8f2809bbc | [
"MIT"
] | null | null | null | """
Stack of Plates: Imagine a (literal) stack of plates. If the stack gets too high, it might topple.
Therefore, in real life, we would likely start a new stack when the previous stack exceeds some
threshold. Implement a data structure SetOfStacks that mimics this. SetO-fStacks should be
composed of several stacks and should create a new stack once the previous one exceeds capacity.
SetOfStacks. push() and SetOfStacks. pop() should behave identically to a single stack
(that is, pop () should return the same values as it would if there were just a single stack).
FOLLOW UP
Implement a function popAt ( int index) which performs a pop operation on a specific sub-stack.
"""
from typing import List
class Stack:
def __init__(self):
self.data = []
self.capacity = 5
def push(self, el):
if len(self.data) < self.capacity:
self.data.append(el)
else:
return False
def pop(self):
if len(self.data) > 0:
return self.data.pop()
return False
def __str__(self):
return str(self.data)
def __repr__(self):
return str(self.data)
class SetOfStacks:
def __init__(self, stacks=None):
# list of "Stack"s
self.stacks: List[Stack] = stacks if stacks is not None else []
def create_new_stack(self):
self.stacks.append(Stack())
def get_convenient_stack_idx_push(self):
convenient_idx = None
for i, st in enumerate(self.stacks):
if len(st.data) < st.capacity:
convenient_idx = i
break
if convenient_idx is None:
self.create_new_stack()
convenient_idx = len(self.stacks) - 1
return convenient_idx
def get_convenient_stack_idx_pop(self):
convenient_idx = None
for i in range(len(self.stacks) - 1, -1, -1):
if len(self.stacks[i].data) > 0:
convenient_idx = i
break
return convenient_idx
def push(self, el):
stack_idx = self.get_convenient_stack_idx_push()
self.stacks[stack_idx].push(el)
def pop(self):
pop_idx = self.get_convenient_stack_idx_pop()
if pop_idx is not None:
return self.stacks[pop_idx].pop()
return False
def pop_at(self, idx):
return self.stacks[idx].pop()
def __str__(self):
return str(self.stacks)
def __getitem__(self, item):
return self.stacks[item]
def test_solution():
case = SetOfStacks([Stack()])
case.push(3)
assert case[0].data == [3]
case.push(3)
case.push(3)
case.push(3)
case.push(3)
case.push(3)
assert case[0].data == [3, 3, 3, 3, 3] and case[1].data == [3]
case.pop()
assert case[1].data == []
case.push(5)
case.pop_at(0)
assert case[0].data == [3, 3, 3, 3] and case[1].data == [5]
if __name__ == '__main__':
test_solution()
| 29.278846 | 99 | 0.597044 |
acea4dffcc508d9d17f50f5818cfc370bab4f8b6 | 5,185 | py | Python | powerapi/dispatcher/handlers.py | Kayoku/powerapi | b66e013a830270cecd9a452acc0f12f15fdc4a9b | [
"BSD-3-Clause"
] | null | null | null | powerapi/dispatcher/handlers.py | Kayoku/powerapi | b66e013a830270cecd9a452acc0f12f15fdc4a9b | [
"BSD-3-Clause"
] | null | null | null | powerapi/dispatcher/handlers.py | Kayoku/powerapi | b66e013a830270cecd9a452acc0f12f15fdc4a9b | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2018, INRIA
Copyright (c) 2018, University of Lille
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from powerapi.handler import InitHandler, Handler, StartHandler
from powerapi.message import OKMessage, StartMessage
def _clean_list(id_list):
"""
return a list where all elements are unique
"""
id_list.sort()
r_list = []
last_element = None
for x in id_list:
if x != last_element:
r_list.append(x)
last_element = x
return r_list
class FormulaDispatcherReportHandler(InitHandler):
"""
Split received report into sub-reports (if needed) and return the sub
reports and formulas ids to send theses reports.
"""
def handle(self, msg):
"""
Split the received report into sub-reports (if needed) and send them to
their corresponding formula.
If the corresponding formula does not exist, create and return the
actor state, containing the new formula.
:param powerapi.Report msg: Report message
:param powerapi.State state: Actor state
:return: List of the (formula_id, report) where formula_id is a tuple
that identitfy the formula_actor
:rtype: list(tuple(formula_id, report))
"""
dispatch_rule = self.state.route_table.get_dispatch_rule(msg)
primary_dispatch_rule = self.state.route_table.primary_dispatch_rule
for formula_id in self._extract_formula_id(msg, dispatch_rule,
primary_dispatch_rule):
primary_rule_fields = primary_dispatch_rule.fields
if len(formula_id) == len(primary_rule_fields):
formula = self.state.get_direct_formula(formula_id)
formula.send_data(msg)
else:
for formula in self.state.get_corresponding_formula(
list(formula_id)):
formula.send(msg)
def _extract_formula_id(self, report, dispatch_rule, primary_dispatch_rule):
"""
Use the dispatch rule to extract formula_id from the given report.
Formula id are then mapped to an identifier that match the primary
report identifier fields
ex: primary dispatch_rule (sensor, socket, core)
second dispatch_rule (sensor)
The second dispatch_rule need to match with the primary if sensor are
equal.
:param powerapi.Report report: Report to split
:param powerapi.DispatchRule dispatch_rule: DispatchRule rule
:return: List of formula_id associated to a sub-report of report
:rtype: [tuple]
"""
# List of tuple (id_report, report)
id_list = dispatch_rule.get_formula_id(report)
if dispatch_rule.is_primary:
return id_list
return _clean_list(list(map(
lambda id: (self._match_report_id(id, dispatch_rule,
primary_dispatch_rule)),
id_list)))
def _match_report_id(self, report_id, dispatch_rule, primary_rule):
"""
Return the new_report_id with the report_id by removing
every "useless" fields from it.
:param tuple report_id: Original report id
:param powerapi.DispatchRule dispatch_rule: DispatchRule rule
"""
new_report_id = ()
for i in range(len(report_id)):
if i >= len(primary_rule.fields):
return new_report_id
if dispatch_rule.fields[i] == primary_rule.fields[i]:
new_report_id += (report_id[i],)
else:
return new_report_id
return new_report_id
| 38.407407 | 80 | 0.673867 |
acea4e4838b3c1132780a800e74fa44b8e490983 | 2,685 | py | Python | liminal/core/environment.py | michaelloewenstein/incubator-liminal | b8439cfac6cd892ed08283ae75b5ab6e01e13b2c | [
"Apache-2.0"
] | null | null | null | liminal/core/environment.py | michaelloewenstein/incubator-liminal | b8439cfac6cd892ed08283ae75b5ab6e01e13b2c | [
"Apache-2.0"
] | null | null | null | liminal/core/environment.py | michaelloewenstein/incubator-liminal | b8439cfac6cd892ed08283ae75b5ab6e01e13b2c | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import subprocess
import logging
DEFAULT_DAGS_ZIP_NAME = 'liminal.zip'
DEFAULT_LIMINAL_HOME = os.path.expanduser('~/liminal_home')
DEFAULT_PIPELINES_SUBDIR = "pipelines"
LIMINAL_HOME_PARAM_NAME = "LIMINAL_HOME"
LIMINAL_VERSION_PARAM_NAME = 'LIMINAL_VERSION'
def get_liminal_home():
if not os.environ.get(LIMINAL_HOME_PARAM_NAME):
logging.info("no environment parameter called LIMINAL_HOME detected")
logging.info(f"registering {DEFAULT_LIMINAL_HOME} as the LIMINAL_HOME directory")
os.environ[LIMINAL_HOME_PARAM_NAME] = DEFAULT_LIMINAL_HOME
return os.environ.get(LIMINAL_HOME_PARAM_NAME, DEFAULT_LIMINAL_HOME)
def get_dags_dir():
# if we are inside airflow, we will take it from the configured dags folder
base_dir = os.environ.get("AIRFLOW__CORE__DAGS_FOLDER", get_liminal_home())
return os.path.join(base_dir, DEFAULT_PIPELINES_SUBDIR)
def get_liminal_version():
result = os.environ.get(LIMINAL_VERSION_PARAM_NAME, None)
if not result:
output = subprocess.run(['pip freeze | grep \'apache-liminal\''], capture_output=True,
env=os.environ, shell=True)
pip_res = output.stdout.decode('UTF-8').strip()
scripts_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'scripts'
)
whl_files = [file for file in os.listdir(scripts_dir) if file.endswith(".whl")]
if whl_files:
value = 'file://' + os.path.join(scripts_dir, whl_files[0])
elif ' @ ' in pip_res:
value = pip_res[pip_res.index(' @ ') + 3:]
else:
value = pip_res
logging.info(f'LIMINAL_VERSION not set. Setting it to currently installed version: {value}')
os.environ[LIMINAL_VERSION_PARAM_NAME] = value
return os.environ.get(LIMINAL_VERSION_PARAM_NAME, 'apache-liminal')
| 41.953125 | 100 | 0.716946 |
acea4e4e5c9c73400b11188e03b02d57cca283d6 | 35,921 | py | Python | cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_RTTMON_TC_MIB.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | 177 | 2016-03-15T17:03:51.000Z | 2022-03-18T16:48:44.000Z | cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_RTTMON_TC_MIB.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | 18 | 2016-03-30T10:45:22.000Z | 2020-07-14T16:28:13.000Z | cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_RTTMON_TC_MIB.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | 85 | 2016-03-16T20:38:57.000Z | 2022-02-22T04:26:02.000Z | """ CISCO_RTTMON_TC_MIB
This MIB contains textual conventions used by
CISCO\-RTTMON\-MIB, CISCO\-RTTMON\-RTP\-MIB and
CISCO\-RTTMON\-ICMP\-MIB, but they are not limited
to only these MIBs.
These textual conventions were originally defined in
CISCO\-RTTMON\-MIB.
Acronyms\:
FEC\: Forward Equivalence Class
LPD\: Label Path Discovery
LSP\: Label Switched Path
MPLS\: Multi Protocol Label Switching
RTT\: Round Trip Time
SAA\: Service Assurance Agent
VPN\: Virtual Private Network
CFM\: Connection Fault Management
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class RttMonCodecType(Enum):
"""
RttMonCodecType (Enum Class)
Specifies the codec type to be used with the jitter probe.
The following codec types are defined\:
notApplicable \- no CodecType is defined
g711ulaw \- uses G.711 U Law 64000 bps
g711alaw \- uses G.711 A Law 64000 bps
g729a \- uses G.729 8000 bps
.. data:: notApplicable = 0
.. data:: g711ulaw = 1
.. data:: g711alaw = 2
.. data:: g729a = 3
"""
notApplicable = Enum.YLeaf(0, "notApplicable")
g711ulaw = Enum.YLeaf(1, "g711ulaw")
g711alaw = Enum.YLeaf(2, "g711alaw")
g729a = Enum.YLeaf(3, "g729a")
class RttMonLSPPingReplyMode(Enum):
"""
RttMonLSPPingReplyMode (Enum Class)
Specifies the Reply mode for the MPLS LSP Echo request
packets. The following reply modes are supported\:
replyIpv4Udp(1) \- an mpls echo request will normally
have reply via IPv4 UDP packets.
replyIpv4UdpRA(2) \- reply via IPv4 UDP Router Alert. Used
when IPv4 return path is deemed
unreliable.
.. data:: replyIpv4Udp = 1
.. data:: replyIpv4UdpRA = 2
"""
replyIpv4Udp = Enum.YLeaf(1, "replyIpv4Udp")
replyIpv4UdpRA = Enum.YLeaf(2, "replyIpv4UdpRA")
class RttMonOperation(Enum):
"""
RttMonOperation (Enum Class)
The following are specific RTT operations for a
particular probe type\:
notApplicable(0) \- This object is not applicable for the
probe type.
httpGet(1) \- HTTP get request
httpRaw(2) \- HTTP request with user defined payload
ftpGet(3) \- FTP get request
ftpPassive(4) \- FTP passive mode
ftpActive(5) \- FTP active mode
voipDTAlertRinging(6) \- Voip post dial delay detect point\:
Alerting / Ringing
voipDTConnectOK(7) \- Voip post dial delay detect point\:
Connect /OK
.. data:: notApplicable = 0
.. data:: httpGet = 1
.. data:: httpRaw = 2
.. data:: ftpGet = 3
.. data:: ftpPassive = 4
.. data:: ftpActive = 5
.. data:: voipDTAlertRinging = 6
.. data:: voipDTConnectOK = 7
"""
notApplicable = Enum.YLeaf(0, "notApplicable")
httpGet = Enum.YLeaf(1, "httpGet")
httpRaw = Enum.YLeaf(2, "httpRaw")
ftpGet = Enum.YLeaf(3, "ftpGet")
ftpPassive = Enum.YLeaf(4, "ftpPassive")
ftpActive = Enum.YLeaf(5, "ftpActive")
voipDTAlertRinging = Enum.YLeaf(6, "voipDTAlertRinging")
voipDTConnectOK = Enum.YLeaf(7, "voipDTConnectOK")
class RttMonProtocol(Enum):
"""
RttMonProtocol (Enum Class)
Specifies the protocol to be used to perform the timed
echo request/response. The following protocols are
defined\:
NOTE\: All protocols that end in 'Appl' will support
the asymetric request/response (ARR) protocol.
See the DESCRIPTION for ciscoRttMonMIB for a
complete description of the asymetric
request/response protocol.
notApplicable \- no protocol is defined
ipIcmpEcho \- uses Echo Request/Reply as defined
in RFC792 for Internet Protocol
networks
ipUdpEchoAppl \- uses the UDP based echo server
snaRUEcho \- uses the REQECHO and ECHOTEST RU's
to an SSCP over an SNA LU\-SSCP
session
snaLU0EchoAppl \- uses test RU's sent to the Echo
Server over an SNA LU0\-LU0 session
snaLU2EchoAppl \- uses test RU's sent to the Echo
Server over an SNA LU2\-LU2 session
snaLU62Echo \- uses the native appn ping ie. aping
snaLU62EchoAppl \- uses test RU's sent to the ARR
Echo Server over an SNA LU6.2\-LU6.2
session
appleTalkEcho \- uses Echo Request/Reply as defined
for appleTalk networks
appleTalkEchoAppl \- uses the appleTalk based echo
server
decNetEcho \- uses Echo Request/Reply as defined
for DECNet networks
decNetEchoAppl \- uses the DECnet based echo server
ipxEcho \- uses Echo Request/Reply as defined
for Novell IPX networks
ipxEchoAppl \- uses the Novel IPX based echo
server
isoClnsEcho \- uses Echo Request/Reply as defined
for ISO CLNS networks
isoClnsEchoAppl \- uses the ISO CLNS based echo
server
vinesEcho \- uses Echo Request/Reply as defined
for VINES networks
vinesEchoAppl \- uses the VINES based echo server
xnsEcho \- uses Echo Request/Reply as defined
for XNS networks
xnsEchoAppl \- uses the XNS based echo server
apolloEcho \- uses Echo Request/Reply as defined
for APOLLO networks
apolloEchoAppl \- uses the APOLLO based echo
server
netbiosEchoAppl \- uses the netbios based echo
server
ipTcpConn \- uses the tcp's connect mechanism
httpAppl \- uses udp for name resolution,
tcp connect and tcp data transfer
mechanisms for HTTP data download
from a particular HTTP Server
dnsAppl \- uses udp for name resolution
jitterAppl \- uses udp for packet transfers
dlswAppl \- uses tcp for sending keepalives
dhcpAppl \- uses udp for sending dhcp requests
ftpAppl \- uses tcp for connect & data transfer
mplsLspPingAppl \- uses MPLS Echo Request/Response as per
draft\-ietf\-mpls\-lsp\-ping\-04 ietf
standard
voipAppl \- uses Symphony infrastructure to measure
H.323/SIP call set up time
rtpAppl \- uses Symphony infrastructure to measure
rtp packets delay variance.
icmpJitterAppl \- uses ICMP Timestamp for packet transfer
to measure jitter.
ethernetPingAppl \- uses regular 802.1ag loopback frame
ethernetJitterAppl \- uses CFM frames .
videoAppl \- uses synthetic traffic depending on video
profile
y1731dmm \- used to measure Y1731 delay
y17311dm \- used to measure Y1731 1DM
y1731lmm \- used to measure Y1731 Loss measurement
y1731slm \- used to measure Y1731 Synthetic Loss measurement
mcastJitterAppl \- uses udp jitter to measure multicast network
performance
.. data:: none = 0
.. data:: notApplicable = 1
.. data:: ipIcmpEcho = 2
.. data:: ipUdpEchoAppl = 3
.. data:: snaRUEcho = 4
.. data:: snaLU0EchoAppl = 5
.. data:: snaLU2EchoAppl = 6
.. data:: snaLU62Echo = 7
.. data:: snaLU62EchoAppl = 8
.. data:: appleTalkEcho = 9
.. data:: appleTalkEchoAppl = 10
.. data:: decNetEcho = 11
.. data:: decNetEchoAppl = 12
.. data:: ipxEcho = 13
.. data:: ipxEchoAppl = 14
.. data:: isoClnsEcho = 15
.. data:: isoClnsEchoAppl = 16
.. data:: vinesEcho = 17
.. data:: vinesEchoAppl = 18
.. data:: xnsEcho = 19
.. data:: xnsEchoAppl = 20
.. data:: apolloEcho = 21
.. data:: apolloEchoAppl = 22
.. data:: netbiosEchoAppl = 23
.. data:: ipTcpConn = 24
.. data:: httpAppl = 25
.. data:: dnsAppl = 26
.. data:: jitterAppl = 27
.. data:: dlswAppl = 28
.. data:: dhcpAppl = 29
.. data:: ftpAppl = 30
.. data:: mplsLspPingAppl = 31
.. data:: voipAppl = 32
.. data:: rtpAppl = 33
.. data:: icmpJitterAppl = 34
.. data:: ethernetPingAppl = 35
.. data:: ethernetJitterAppl = 36
.. data:: videoAppl = 37
.. data:: y1731dmm = 38
.. data:: y17311dm = 39
.. data:: y1731lmm = 40
.. data:: mcastJitterAppl = 41
.. data:: y1731slm = 42
.. data:: y1731dmmv1 = 43
"""
none = Enum.YLeaf(0, "none")
notApplicable = Enum.YLeaf(1, "notApplicable")
ipIcmpEcho = Enum.YLeaf(2, "ipIcmpEcho")
ipUdpEchoAppl = Enum.YLeaf(3, "ipUdpEchoAppl")
snaRUEcho = Enum.YLeaf(4, "snaRUEcho")
snaLU0EchoAppl = Enum.YLeaf(5, "snaLU0EchoAppl")
snaLU2EchoAppl = Enum.YLeaf(6, "snaLU2EchoAppl")
snaLU62Echo = Enum.YLeaf(7, "snaLU62Echo")
snaLU62EchoAppl = Enum.YLeaf(8, "snaLU62EchoAppl")
appleTalkEcho = Enum.YLeaf(9, "appleTalkEcho")
appleTalkEchoAppl = Enum.YLeaf(10, "appleTalkEchoAppl")
decNetEcho = Enum.YLeaf(11, "decNetEcho")
decNetEchoAppl = Enum.YLeaf(12, "decNetEchoAppl")
ipxEcho = Enum.YLeaf(13, "ipxEcho")
ipxEchoAppl = Enum.YLeaf(14, "ipxEchoAppl")
isoClnsEcho = Enum.YLeaf(15, "isoClnsEcho")
isoClnsEchoAppl = Enum.YLeaf(16, "isoClnsEchoAppl")
vinesEcho = Enum.YLeaf(17, "vinesEcho")
vinesEchoAppl = Enum.YLeaf(18, "vinesEchoAppl")
xnsEcho = Enum.YLeaf(19, "xnsEcho")
xnsEchoAppl = Enum.YLeaf(20, "xnsEchoAppl")
apolloEcho = Enum.YLeaf(21, "apolloEcho")
apolloEchoAppl = Enum.YLeaf(22, "apolloEchoAppl")
netbiosEchoAppl = Enum.YLeaf(23, "netbiosEchoAppl")
ipTcpConn = Enum.YLeaf(24, "ipTcpConn")
httpAppl = Enum.YLeaf(25, "httpAppl")
dnsAppl = Enum.YLeaf(26, "dnsAppl")
jitterAppl = Enum.YLeaf(27, "jitterAppl")
dlswAppl = Enum.YLeaf(28, "dlswAppl")
dhcpAppl = Enum.YLeaf(29, "dhcpAppl")
ftpAppl = Enum.YLeaf(30, "ftpAppl")
mplsLspPingAppl = Enum.YLeaf(31, "mplsLspPingAppl")
voipAppl = Enum.YLeaf(32, "voipAppl")
rtpAppl = Enum.YLeaf(33, "rtpAppl")
icmpJitterAppl = Enum.YLeaf(34, "icmpJitterAppl")
ethernetPingAppl = Enum.YLeaf(35, "ethernetPingAppl")
ethernetJitterAppl = Enum.YLeaf(36, "ethernetJitterAppl")
videoAppl = Enum.YLeaf(37, "videoAppl")
y1731dmm = Enum.YLeaf(38, "y1731dmm")
y17311dm = Enum.YLeaf(39, "y17311dm")
y1731lmm = Enum.YLeaf(40, "y1731lmm")
mcastJitterAppl = Enum.YLeaf(41, "mcastJitterAppl")
y1731slm = Enum.YLeaf(42, "y1731slm")
y1731dmmv1 = Enum.YLeaf(43, "y1731dmmv1")
class RttMonReactVar(Enum):
"""
RttMonReactVar (Enum Class)
The following are specific Reaction variables for a
particular probe type\:
rtt(1) \- Round Trip Time
jitterSDAvg(2) \- Jitter average from source to Destination
jitterDSAvg(3) \- Jitter average from destination to source
packetLossSD(4) \- Packet loss from source to destination
packetLossDS(5) \- Packet loss from destination to source
mos(6) \- Mean Opinion Score
timeout(7) \- Timeout of the Operation
connectionLoss(8) \- Connection Failed to the destination
verifyError(9) \- Data corruption occurs
jitterAvg(10) \- Jitter Average in both the directions
icpif(11) \- Calculated Planning Impairment Factor
packetMIA(12) \- Missing In Action
packetLateArrival(13) \- Packets arriving Late
packetOutOfSequence(14) \- Packets arriving out of sequence
maxOfPositiveSD(15) \- Maximum positive jitter from
Source to Destination
maxOfNegativeSD(16) \- Maximum negative jitter from
Source to Destination
maxOfPositiveDS(17) \- Maximum positive jitter from
Destination to Source
maxOfNegativeDS(18) \- Maximum negative jitter from
Destination to Source.
iaJitterDS(19) \- Inter arrival jitter from
Destination to Source
frameLossDS(20) \- Number of frame loss recorded
at source DSP
mosLQDS(21) \- Listener quality MOS at Source
mosCQDS(22) \- Conversational quality MOS at source
rFactorDS(23) \- R\-Factor value at Destination.
successivePacketLoss(24)\- Successive Dropped Packet
maxOfLatencyDS(25) \- Maximum Latency from Destination
to Source
maxOfLatencySD(26) \- Maximum Latency from Source
to Destination
latencyDSAvg(27) \- Latency average from Destination
to Source
latencySDAvg(28) \- Latency average from Source
to Destination
packetLoss(29) \- Packets loss in both directions
iaJitterSD(30) \- Inter arrival jitter from
Source to Destination
mosCQSD(31) \- Conversational quality MOS at
Destination
rFactorSD(32) \- R\-Factor value at Destination.
.. data:: rtt = 1
.. data:: jitterSDAvg = 2
.. data:: jitterDSAvg = 3
.. data:: packetLossSD = 4
.. data:: packetLossDS = 5
.. data:: mos = 6
.. data:: timeout = 7
.. data:: connectionLoss = 8
.. data:: verifyError = 9
.. data:: jitterAvg = 10
.. data:: icpif = 11
.. data:: packetMIA = 12
.. data:: packetLateArrival = 13
.. data:: packetOutOfSequence = 14
.. data:: maxOfPositiveSD = 15
.. data:: maxOfNegativeSD = 16
.. data:: maxOfPositiveDS = 17
.. data:: maxOfNegativeDS = 18
.. data:: iaJitterDS = 19
.. data:: frameLossDS = 20
.. data:: mosLQDS = 21
.. data:: mosCQDS = 22
.. data:: rFactorDS = 23
.. data:: successivePacketLoss = 24
.. data:: maxOfLatencyDS = 25
.. data:: maxOfLatencySD = 26
.. data:: latencyDSAvg = 27
.. data:: latencySDAvg = 28
.. data:: packetLoss = 29
.. data:: iaJitterSD = 30
.. data:: mosCQSD = 31
.. data:: rFactorSD = 32
"""
rtt = Enum.YLeaf(1, "rtt")
jitterSDAvg = Enum.YLeaf(2, "jitterSDAvg")
jitterDSAvg = Enum.YLeaf(3, "jitterDSAvg")
packetLossSD = Enum.YLeaf(4, "packetLossSD")
packetLossDS = Enum.YLeaf(5, "packetLossDS")
mos = Enum.YLeaf(6, "mos")
timeout = Enum.YLeaf(7, "timeout")
connectionLoss = Enum.YLeaf(8, "connectionLoss")
verifyError = Enum.YLeaf(9, "verifyError")
jitterAvg = Enum.YLeaf(10, "jitterAvg")
icpif = Enum.YLeaf(11, "icpif")
packetMIA = Enum.YLeaf(12, "packetMIA")
packetLateArrival = Enum.YLeaf(13, "packetLateArrival")
packetOutOfSequence = Enum.YLeaf(14, "packetOutOfSequence")
maxOfPositiveSD = Enum.YLeaf(15, "maxOfPositiveSD")
maxOfNegativeSD = Enum.YLeaf(16, "maxOfNegativeSD")
maxOfPositiveDS = Enum.YLeaf(17, "maxOfPositiveDS")
maxOfNegativeDS = Enum.YLeaf(18, "maxOfNegativeDS")
iaJitterDS = Enum.YLeaf(19, "iaJitterDS")
frameLossDS = Enum.YLeaf(20, "frameLossDS")
mosLQDS = Enum.YLeaf(21, "mosLQDS")
mosCQDS = Enum.YLeaf(22, "mosCQDS")
rFactorDS = Enum.YLeaf(23, "rFactorDS")
successivePacketLoss = Enum.YLeaf(24, "successivePacketLoss")
maxOfLatencyDS = Enum.YLeaf(25, "maxOfLatencyDS")
maxOfLatencySD = Enum.YLeaf(26, "maxOfLatencySD")
latencyDSAvg = Enum.YLeaf(27, "latencyDSAvg")
latencySDAvg = Enum.YLeaf(28, "latencySDAvg")
packetLoss = Enum.YLeaf(29, "packetLoss")
iaJitterSD = Enum.YLeaf(30, "iaJitterSD")
mosCQSD = Enum.YLeaf(31, "mosCQSD")
rFactorSD = Enum.YLeaf(32, "rFactorSD")
class RttMonRttType(Enum):
"""
RttMonRttType (Enum Class)
Specifies the type of RTT operation to be performed.
The value 'echo' will cause the RTT application to
perform a timed echo request/response operation directed
at the 'RttMonTargetAddress'.
The value 'pathEcho' will cause the RTT application
to perform path discovery to the 'RttMonTargetAddress',
then it will perform a timed echo request/response
operation directed at the each hop along the path.
This operation will provide two types of information,
first the path and second the time delay along the path.
NOTE\: The 'pathEcho' time delay operation is a heuristic
measurement because an intermediate hop may forward
the different echo request/response at different
rates. Thus the time delay difference between two
hops along a path may contain very little 'true'
statistical meaning.
The value 'fileIO' will cause the RTT application to
write, read, or write/read a file to a preconfigured
file server.
The value 'script' will cause the RTT application to
execute a preconfigured script.
The value 'udpEcho' will cause the RTT application
to perform a timed udp packet send/receive operation
directed at the 'RttMonTargetAddress'.
The value 'tcpConnect' will cause the RTT application
to perform a timed tcp connect operation directed at the
'RttMonTargetAddress'.
The value 'http' will cause the RTT application
to perform a download of the object specified in the URL.
The value 'dns' will cause the RTT application
to perform a name lookup of an IP Address or a hostname.
The value 'jitter' will cause the RTT application
to perform delay variance analysis.
The value 'dlsw' will cause the RTT application
to perform a keepalive operation to measure the response
time of a DLSw peer.
The value 'dhcp' will cause the RTT application
to perform an IP Address lease request/teardown operation.
The value 'voip' will cause the RTT application
to perform call set up operation to measure the response.
The value 'rtp' will cause the RTT application to perform
delay variance analysis for RTP packet.
The value 'lspGroup' will cause the RTT application to logically
group Label Switched Paths discovered as part of LSP Path
Discovery to the target and perform an RTT operation end to end
over each path in the Group. The type of operation configured
is determined by rttMplsVpnMonCtrlRttType.
The value 'icmpjitter' will cause the RTT application
to perform delay variance analysis using ICMP timestamp packets.
The value of 'lspPingIpv4' will cause the RTT application to
perform ping over LSP path.
The value of 'lspTraceIpv4' will cause the RTT application to
perform trace over LSP path.
The value of 'ethernetPing' will cause the RTT application to
perform delay variance analysis using regular 802.1ag loopback
frame.
The value of 'ethernetJitter' will cause the RTT application to
perform delay variance analysis using CFM frame.
The value of 'lspPingPseudowire' will cause the RTT application
to
perform LSP Ping over Pseudowire and measure response time.
The value 'video' will cause the the RTT application to perform
a video stream analysis directed at the 'RttMonTargetAddress
The value 'y1731Delay' will cause the RTT application to perform a ITU\-T standard Y.1731 delay variance analysis
The value 'y1731Loss' will cause the RTT application to perform a ITU\-T standard Y.1731 loss measure analysis
The value 'mcastJitter' will cause the RTT application to perform
udp jitter stream analysis on a multicast network.
.. data:: none = 0
.. data:: echo = 1
.. data:: pathEcho = 2
.. data:: fileIO = 3
.. data:: script = 4
.. data:: udpEcho = 5
.. data:: tcpConnect = 6
.. data:: http = 7
.. data:: dns = 8
.. data:: jitter = 9
.. data:: dlsw = 10
.. data:: dhcp = 11
.. data:: ftp = 12
.. data:: voip = 13
.. data:: rtp = 14
.. data:: lspGroup = 15
.. data:: icmpjitter = 16
.. data:: lspPing = 17
.. data:: lspTrace = 18
.. data:: ethernetPing = 19
.. data:: ethernetJitter = 20
.. data:: lspPingPseudowire = 21
.. data:: video = 22
.. data:: y1731Delay = 23
.. data:: y1731Loss = 24
.. data:: mcastJitter = 25
"""
none = Enum.YLeaf(0, "none")
echo = Enum.YLeaf(1, "echo")
pathEcho = Enum.YLeaf(2, "pathEcho")
fileIO = Enum.YLeaf(3, "fileIO")
script = Enum.YLeaf(4, "script")
udpEcho = Enum.YLeaf(5, "udpEcho")
tcpConnect = Enum.YLeaf(6, "tcpConnect")
http = Enum.YLeaf(7, "http")
dns = Enum.YLeaf(8, "dns")
jitter = Enum.YLeaf(9, "jitter")
dlsw = Enum.YLeaf(10, "dlsw")
dhcp = Enum.YLeaf(11, "dhcp")
ftp = Enum.YLeaf(12, "ftp")
voip = Enum.YLeaf(13, "voip")
rtp = Enum.YLeaf(14, "rtp")
lspGroup = Enum.YLeaf(15, "lspGroup")
icmpjitter = Enum.YLeaf(16, "icmpjitter")
lspPing = Enum.YLeaf(17, "lspPing")
lspTrace = Enum.YLeaf(18, "lspTrace")
ethernetPing = Enum.YLeaf(19, "ethernetPing")
ethernetJitter = Enum.YLeaf(20, "ethernetJitter")
lspPingPseudowire = Enum.YLeaf(21, "lspPingPseudowire")
video = Enum.YLeaf(22, "video")
y1731Delay = Enum.YLeaf(23, "y1731Delay")
y1731Loss = Enum.YLeaf(24, "y1731Loss")
mcastJitter = Enum.YLeaf(25, "mcastJitter")
class RttMplsVpnMonLpdFailureSense(Enum):
"""
RttMplsVpnMonLpdFailureSense (Enum Class)
These are the defined values for the causes of failure in
LSP Path Discovery.
unknown(1) \- The cause of failure for the
LSP Path Discovery cannot be
determined. The discovery for
the target PE may not have
started.
noPath(2) \- No paths were found to the
target FEC while doing the
LSP Path Discovery.
allPathsBroken(3) \- All paths to the target FEC
are broken. This means an
untagged interface on the LSP
to the target.
allPathsUnexplorable(4) \- All paths to the target FEC are
unexplorable. This identifies
a case where there is some
problem in reaching the next
hop while doing Discovery.
allPathsBrokenOrUnexplorable(5) \- All paths to the target FEC are
are either broken or
unexplorable.
timeout(6) \- The LSP Path Discovery could
not be completed for the
target FEC within the
configured time.
error(7) \- Error occurred while
performing LSP Path Discovery.
It might be also due to some
reasons unrelated to LSP Path
Discovery.
.. data:: unknown = 1
.. data:: noPath = 2
.. data:: allPathsBroken = 3
.. data:: allPathsUnexplorable = 4
.. data:: allPathsBrokenOrUnexplorable = 5
.. data:: timeout = 6
.. data:: error = 7
"""
unknown = Enum.YLeaf(1, "unknown")
noPath = Enum.YLeaf(2, "noPath")
allPathsBroken = Enum.YLeaf(3, "allPathsBroken")
allPathsUnexplorable = Enum.YLeaf(4, "allPathsUnexplorable")
allPathsBrokenOrUnexplorable = Enum.YLeaf(5, "allPathsBrokenOrUnexplorable")
timeout = Enum.YLeaf(6, "timeout")
error = Enum.YLeaf(7, "error")
class RttMplsVpnMonLpdGrpStatus(Enum):
"""
RttMplsVpnMonLpdGrpStatus (Enum Class)
These are the defined values for the status of the LPD Group.
unknown(1) \- This indicates that some/all of the probes which are
part of the LPD group have not completed even
a single operation, so the group status cannot be
identified.
up(2) \- This state indicates that all the probes which are
part of the LPD group are up with latest return
code as 'ok'.
partial(3) \- This state indicates that some probes are up and
running fine and some are not 'ok'.
down(4) \- This state indicates that all the probes to the
target are not running fine. This state indicates
that there is connectivity problem to the target
PE.
.. data:: unknown = 1
.. data:: up = 2
.. data:: partial = 3
.. data:: down = 4
"""
unknown = Enum.YLeaf(1, "unknown")
up = Enum.YLeaf(2, "up")
partial = Enum.YLeaf(3, "partial")
down = Enum.YLeaf(4, "down")
class RttMplsVpnMonRttType(Enum):
"""
RttMplsVpnMonRttType (Enum Class)
Specifies the type of RTT operation to be performed for
Auto SAA L3 MPLS VPN.
The value 'jitter' will cause the Auto SAA L3 MPLS VPN to
automatically configure jitter operations.
The value 'echo' will cause the Auto SAA L3 MPLS VPN to
automatically configure jitter operations.
The value 'pathEcho' will cause the Auto SAA L3 MPLS VPN to
automatically configure jitter operations.
.. data:: jitter = 1
.. data:: echo = 2
.. data:: pathEcho = 3
"""
jitter = Enum.YLeaf(1, "jitter")
echo = Enum.YLeaf(2, "echo")
pathEcho = Enum.YLeaf(3, "pathEcho")
class RttReset(Enum):
"""
RttReset (Enum Class)
When the value set to 'reset', the entire RTT application
goes through a reset sequence, making a best
effort to revert to its startup condition. At other times,
the value is 'ready'.
.. data:: ready = 1
.. data:: reset = 2
"""
ready = Enum.YLeaf(1, "ready")
reset = Enum.YLeaf(2, "reset")
class RttResponseSense(Enum):
"""
RttResponseSense (Enum Class)
These are the defined values for a completion status
of a RTT operation.
other(0) \- the operation is not started or completed
or this object is not applicable for
the probe type.
ok(1) \- a valid completion occurred and
timed successfully
disconnected(2) \- the operation did not occur because
the connection to the target
was lost
overThreshold(3) \- a valid completion was received but
the completion time exceeded a
threshold value
timeout(4) \- an operation timed out; no completion
time recorded
busy(5) \- the operation did not occur because a
previous operation is still
outstanding
notConnected(6) \- the operation did not occur because no
connection (session) exists with the
target
dropped(7) \- the operation did not occur due to lack
of internal resource
sequenceError(8) \- a completed operation did not contain
the correct sequence id; no completion
time recorded
verifyError(9) \- a completed operation was received, but
the data it contained did not match
the expected data; no completion time
recorded
applicationSpecific(10)
\- the application generating the operation
had a specific error
dnsServerTimeout(11)
\- DNS Server Timeout
tcpConnectTimeout(12)
\- TCP Connect Timeout
httpTransactionTimeout(13)
\- HTTP Transaction Timeout
dnsQueryError(14)
\- DNS Query error (because of unknown address
etc.,)
httpError(15)
\- HTTP Response StatusCode is not OK (200),
or permenent redirect(301), temporary redirect
(302) then HTTP error is set.
error(16)
\- if there are socket failures or some other
errors not relavant to the actual probe, they
are recorded under this error
mplsLspEchoTxError(17)
\- MPLS echo request transmission failure.
mplsLspUnreachable(18)
\- MPLS Target FEC not reachable or unsupported
mpls echo reply code.
mplsLspMalformedReq(19)
\- MPLS echo request was malformalformed, pointed
out by the reply router.
mplsLspReachButNotFEC(20)
\- MPLS echo request processed by the downstream
router but not the target.
enableOk(21)
\- Control enable request OK
enableNoConnect(22)
\- Control enable request fail due to no connection to
the target.
enableVersionFail(23)
\- Control enable request version fail.
enableInternalError(24)
\- Control enable request internal error.
enableAbort(25)
\- Control enable request abort.
enableFail(26)
\- Control enable request fail.
enableAuthFail(27)
\- Control enable request fail due to authentication
fail.
enableFormatError(28)
\- Control enable request fail due to format error.
enablePortInUse(29)
\- Control enable request fail due to port in use.
statsRetrieveOk(30)
\- Stats retrieve request OK
statsRetrieveNoConnect(31)
\- Stats retrieve request fail due to no connection
to the target.
statsRetrieveVersionFail(32)
\- Stats retrieve request version fail.
statsRetrieveInternalError(33)
\- Stats retrieve request internal error.
statsRetrieveAbort(34)
\- Stats retrieve request abort.
statsRetrieveFail(35)
\- Stats retrieve request fail.
statsRetrieveAuthFail(36)
\- Stats retrieve request fail due to authentication fail.
statsRetrieveFormatError(37)
\- Stats retrieve request fail due to format error.
statsRetrievePortInUse(38)
\- Stats retrieve request fail due to port in use.
.. data:: other = 0
.. data:: ok = 1
.. data:: disconnected = 2
.. data:: overThreshold = 3
.. data:: timeout = 4
.. data:: busy = 5
.. data:: notConnected = 6
.. data:: dropped = 7
.. data:: sequenceError = 8
.. data:: verifyError = 9
.. data:: applicationSpecific = 10
.. data:: dnsServerTimeout = 11
.. data:: tcpConnectTimeout = 12
.. data:: httpTransactionTimeout = 13
.. data:: dnsQueryError = 14
.. data:: httpError = 15
.. data:: error = 16
.. data:: mplsLspEchoTxError = 17
.. data:: mplsLspUnreachable = 18
.. data:: mplsLspMalformedReq = 19
.. data:: mplsLspReachButNotFEC = 20
.. data:: enableOk = 21
.. data:: enableNoConnect = 22
.. data:: enableVersionFail = 23
.. data:: enableInternalError = 24
.. data:: enableAbort = 25
.. data:: enableFail = 26
.. data:: enableAuthFail = 27
.. data:: enableFormatError = 28
.. data:: enablePortInUse = 29
.. data:: statsRetrieveOk = 30
.. data:: statsRetrieveNoConnect = 31
.. data:: statsRetrieveVersionFail = 32
.. data:: statsRetrieveInternalError = 33
.. data:: statsRetrieveAbort = 34
.. data:: statsRetrieveFail = 35
.. data:: statsRetrieveAuthFail = 36
.. data:: statsRetrieveFormatError = 37
.. data:: statsRetrievePortInUse = 38
"""
other = Enum.YLeaf(0, "other")
ok = Enum.YLeaf(1, "ok")
disconnected = Enum.YLeaf(2, "disconnected")
overThreshold = Enum.YLeaf(3, "overThreshold")
timeout = Enum.YLeaf(4, "timeout")
busy = Enum.YLeaf(5, "busy")
notConnected = Enum.YLeaf(6, "notConnected")
dropped = Enum.YLeaf(7, "dropped")
sequenceError = Enum.YLeaf(8, "sequenceError")
verifyError = Enum.YLeaf(9, "verifyError")
applicationSpecific = Enum.YLeaf(10, "applicationSpecific")
dnsServerTimeout = Enum.YLeaf(11, "dnsServerTimeout")
tcpConnectTimeout = Enum.YLeaf(12, "tcpConnectTimeout")
httpTransactionTimeout = Enum.YLeaf(13, "httpTransactionTimeout")
dnsQueryError = Enum.YLeaf(14, "dnsQueryError")
httpError = Enum.YLeaf(15, "httpError")
error = Enum.YLeaf(16, "error")
mplsLspEchoTxError = Enum.YLeaf(17, "mplsLspEchoTxError")
mplsLspUnreachable = Enum.YLeaf(18, "mplsLspUnreachable")
mplsLspMalformedReq = Enum.YLeaf(19, "mplsLspMalformedReq")
mplsLspReachButNotFEC = Enum.YLeaf(20, "mplsLspReachButNotFEC")
enableOk = Enum.YLeaf(21, "enableOk")
enableNoConnect = Enum.YLeaf(22, "enableNoConnect")
enableVersionFail = Enum.YLeaf(23, "enableVersionFail")
enableInternalError = Enum.YLeaf(24, "enableInternalError")
enableAbort = Enum.YLeaf(25, "enableAbort")
enableFail = Enum.YLeaf(26, "enableFail")
enableAuthFail = Enum.YLeaf(27, "enableAuthFail")
enableFormatError = Enum.YLeaf(28, "enableFormatError")
enablePortInUse = Enum.YLeaf(29, "enablePortInUse")
statsRetrieveOk = Enum.YLeaf(30, "statsRetrieveOk")
statsRetrieveNoConnect = Enum.YLeaf(31, "statsRetrieveNoConnect")
statsRetrieveVersionFail = Enum.YLeaf(32, "statsRetrieveVersionFail")
statsRetrieveInternalError = Enum.YLeaf(33, "statsRetrieveInternalError")
statsRetrieveAbort = Enum.YLeaf(34, "statsRetrieveAbort")
statsRetrieveFail = Enum.YLeaf(35, "statsRetrieveFail")
statsRetrieveAuthFail = Enum.YLeaf(36, "statsRetrieveAuthFail")
statsRetrieveFormatError = Enum.YLeaf(37, "statsRetrieveFormatError")
statsRetrievePortInUse = Enum.YLeaf(38, "statsRetrievePortInUse")
| 23.264896 | 126 | 0.593024 |
acea4f4f4857dce8e02d2bd154c8e259a2c8be21 | 758 | py | Python | assume_role.py | rumblefishinc/aws-lambda-mirror-dns-function | 6e045f49edcde3ac03d3c60ca793a08c95f067c1 | [
"Apache-2.0"
] | null | null | null | assume_role.py | rumblefishinc/aws-lambda-mirror-dns-function | 6e045f49edcde3ac03d3c60ca793a08c95f067c1 | [
"Apache-2.0"
] | null | null | null | assume_role.py | rumblefishinc/aws-lambda-mirror-dns-function | 6e045f49edcde3ac03d3c60ca793a08c95f067c1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Assume AWS role based on role_arn
import boto3
def aws_session(client, role_arn=None, session_name='my_session'):
"""
If role_arn is given assumes a role and returns boto3 session
otherwise return a regular session with the current IAM user/role
"""
if role_arn:
# client = boto3.client('route53')
response = client.assume_role(RoleArn=role_arn, RoleSessionName=session_name)
session = boto3.Session(
aws_access_key_id=response['Credentials']['AccessKeyId'],
aws_secret_access_key=response['Credentials']['SecretAccessKey'],
aws_session_token=response['Credentials']['SessionToken'])
return session
else:
return boto3.Session()
| 34.454545 | 85 | 0.6781 |
acea50807c4cfca2eedbef077097c15693c3adc2 | 22,418 | py | Python | mmdet/datasets/coco.py | InnovationLab-Top/CBNetV2 | 853061018631a8fbe7f1f2452ff744978574eb47 | [
"Apache-2.0"
] | null | null | null | mmdet/datasets/coco.py | InnovationLab-Top/CBNetV2 | 853061018631a8fbe7f1f2452ff744978574eb47 | [
"Apache-2.0"
] | null | null | null | mmdet/datasets/coco.py | InnovationLab-Top/CBNetV2 | 853061018631a8fbe7f1f2452ff744978574eb47 | [
"Apache-2.0"
] | null | null | null | import itertools
import logging
import os.path as osp
import tempfile
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .api_wrappers import COCO, COCOeval
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ("human", )
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
# The order of returned `cat_ids` will not
# change with the order of the CLASSES
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = OrderedDict()
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
iou_type = 'bbox' if metric == 'proposal' else metric
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
predictions = mmcv.load(result_files[metric])
if iou_type == 'segm':
# Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa
# When evaluating mask AP, if the results contain bbox,
# cocoapi will use the box area instead of the mask area
# for calculating the instance area. Though the overall AP
# is not affected, this leads to different
# small/medium/large mask AP results.
for x in predictions:
x.pop('bbox')
warnings.simplefilter('once')
warnings.warn(
'The key "bbox" is deleted for more accurate mask AP '
'of small/medium/large instances since v2.12.0. This '
'does not change the overall mAP calculation.',
UserWarning)
cocoDt = cocoGt.loadRes(predictions)
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 41.058608 | 124 | 0.527746 |
acea5157040adbbad0d6308d3037c6cef7450caf | 1,639 | py | Python | assigment#4.py | Docland-dev/Thinfik-Python-lerning-Course | 969170c34de364829d52938f361bd631aff89bbb | [
"BSD-2-Clause"
] | null | null | null | assigment#4.py | Docland-dev/Thinfik-Python-lerning-Course | 969170c34de364829d52938f361bd631aff89bbb | [
"BSD-2-Clause"
] | null | null | null | assigment#4.py | Docland-dev/Thinfik-Python-lerning-Course | 969170c34de364829d52938f361bd631aff89bbb | [
"BSD-2-Clause"
] | null | null | null | """Homework Assignment #4: Lists
Details:
Create a global variable called myUniqueList. It should be an empty list to start.
Next, create a function that allows you to add things to that list. Anything that's
passed to this function should get added to myUniqueList, unless its value already exists in myUniqueList.
If the value doesn't exist already,
it should be added and the function should return True. If the value does exist, it should not be added,
and the function should return False;
Finally, add some code below your function that tests it out. It should add a few different elements,
showcasing the different scenarios, and then finally it should print the value of myUniqueList to show that it worked.
Extra Credit:
Add another function that pushes all the rejected inputs into a separate global array called myLeftovers.
If someone tries to add a value to myUniqueList but it's rejected (for non-uniqueness), it should get added to myLeftovers instead."""
myUniqueList = []
myLeftOvers = []
"""Adding to items myUniqueList and checking existing values"""
def addList (students):
if students not in myUniqueList:
print(True)
myUniqueList.append(students)
print(myUniqueList)
else:
print(False)
myLeftOvers.append(students)
print(myLeftOvers)
addList("Joe")
addList("Joe")
addList("Sunday")
addList("Joe")
addList("Victor")
addList("Sam")
addList("Gilbert")
addList("Helen")
addList("Gilbert")
addList("Chuks")
addList("Joe")
addList("Han")
addList("Sam")
addList("Godin")
addList(4)
addList(4) | 29.267857 | 135 | 0.719341 |
acea5377b8b3ed28620e7c65b5399cafd795501c | 828 | py | Python | pinax/apps/photos/urls.py | jpic/pinax | a9b30e437e4298d74101b45cec0d4dba9702cdd1 | [
"MIT"
] | null | null | null | pinax/apps/photos/urls.py | jpic/pinax | a9b30e437e4298d74101b45cec0d4dba9702cdd1 | [
"MIT"
] | null | null | null | pinax/apps/photos/urls.py | jpic/pinax | a9b30e437e4298d74101b45cec0d4dba9702cdd1 | [
"MIT"
] | null | null | null | from django.conf.urls.defaults import *
urlpatterns = patterns("",
# all photos or latest photos
url(r"^$", "pinax.apps.photos.views.photos", name="photos"),
# a photos details
url(r"^details/(?P<id>\d+)/$", "pinax.apps.photos.views.details", name="photo_details"),
# upload photos
url(r"^upload/$", "pinax.apps.photos.views.upload", name="photo_upload"),
# your photos
url(r"^yourphotos/$", "pinax.apps.photos.views.yourphotos", name="photos_yours"),
# a members photos
url(r"^member/(?P<username>[\w]+)/$", "pinax.apps.photos.views.memberphotos", name="photos_member"),
#destory photo
url(r"^destroy/(?P<id>\d+)/$", "pinax.apps.photos.views.destroy", name="photo_destroy"),
#edit photo
url(r"^edit/(?P<id>\d+)/$", "pinax.apps.photos.views.edit", name="photo_edit"),
) | 41.4 | 104 | 0.644928 |
acea540f7613fb6f8c4efb01fec1e48702404fda | 227 | py | Python | polymetis/polymetis/python/torchcontrol/policies/__init__.py | ali-senguel/fairo | 1ec5d8ecbdfc782de63a92aad9bf8534110ce762 | [
"MIT"
] | 669 | 2020-11-21T01:20:20.000Z | 2021-09-13T13:25:16.000Z | polymetis/polymetis/python/torchcontrol/policies/__init__.py | ali-senguel/fairo | 1ec5d8ecbdfc782de63a92aad9bf8534110ce762 | [
"MIT"
] | 324 | 2020-12-07T18:20:34.000Z | 2021-09-14T17:17:18.000Z | polymetis/polymetis/python/torchcontrol/policies/__init__.py | ali-senguel/fairo | 1ec5d8ecbdfc782de63a92aad9bf8534110ce762 | [
"MIT"
] | 56 | 2021-01-04T19:57:40.000Z | 2021-09-13T21:20:08.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .impedance import *
from .trajectory import *
| 32.428571 | 65 | 0.762115 |
acea544c6212f608fe206ce766be8d237647eed1 | 2,830 | py | Python | load_and_save_coords.py | jadolfbr/protein_seq_des | 0b386a4d4dc98e99e628311fb025bc8e88ead3eb | [
"BSD-3-Clause"
] | 30 | 2021-01-20T08:21:09.000Z | 2022-03-28T23:16:56.000Z | load_and_save_coords.py | jadolfbr/protein_seq_des | 0b386a4d4dc98e99e628311fb025bc8e88ead3eb | [
"BSD-3-Clause"
] | null | null | null | load_and_save_coords.py | jadolfbr/protein_seq_des | 0b386a4d4dc98e99e628311fb025bc8e88ead3eb | [
"BSD-3-Clause"
] | 7 | 2021-06-24T14:51:40.000Z | 2022-03-21T03:52:26.000Z | import os
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
import common.run_manager
import glob
import seq_des.util.canonicalize as canonicalize
import pickle
import seq_des.util.data as datasets
from torch.utils import data
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
""" script to load PDB coords, canonicalize, save """
def main():
manager = common.run_manager.RunManager()
manager.parse_args()
args = manager.args
log = manager.log
dataset = datasets.PDB_domain_spitter(txt_file=args.txt, pdb_path=args.pdb_dir)
dataloader = data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=args.workers)
num_return = args.num_return
gen = iter(dataloader)
coords_out, data_out, ys, domain_ids, chis_out = [], [], [], [], []
cs = args.chunk_size
n = 0
for it in tqdm(range(len(dataloader)), desc="loading and saving coords"):
out = gen.next()
if len(out) == 0 or out is None:
print("out is none")
continue
atom_coords, atom_data, res_label, domain_id, chis = out
for i in range(len(atom_coords)):
coords_out.extend(atom_coords[i][0].cpu().data.numpy())
data_out.extend(atom_data[i][0].cpu().data.numpy())
ys.extend(res_label[i][0].cpu().data.numpy())
domain_ids.extend([domain_id[i][0]] * res_label[i][0].cpu().data.numpy().shape[0])
chis_out.extend(chis[i][0].cpu().data.numpy())
assert len(coords_out) == len(ys)
assert len(coords_out) == len(data_out)
assert len(coords_out) == len(domain_ids), (len(coords_out), len(domain_ids))
assert len(coords_out) == len(chis_out)
del atom_coords
del atom_data
del res_label
del domain_id
# intermittent save data
if len(coords_out) > cs or it == len(dataloader) - 1:
# shuffle then save
print(n, len(coords_out)) # -- NOTE keep this
idx = np.arange(min(cs, len(coords_out)))
np.random.shuffle(idx)
print(n, len(idx))
c, d, y, di, ch = map(lambda arr: np.array(arr[: len(idx)])[idx], [coords_out, data_out, ys, domain_ids, chis_out])
print("saving", args.save_dir + "/" + "data_%0.4d.pt" % (n))
torch.save((c, d, y, di, ch), args.save_dir + "/" + "data_%0.4d.pt" % (n))
print("Current num examples", (n) * cs + len(coords_out))
n += 1
coords_out, data_out, ys, domain_ids, chis_out = map(lambda arr: arr[len(idx) :], [coords_out, data_out, ys, domain_ids, chis_out])
if __name__ == "__main__":
main()
| 32.159091 | 143 | 0.621201 |
acea544cde0785236ea63b024462969db5107db5 | 7,285 | py | Python | ng_common_module.py | kotnyel/python-HTML-scraper-fastly-tutorial | 4bcdbf4529fce23068e47f1956d0d1187a818e52 | [
"MIT"
] | null | null | null | ng_common_module.py | kotnyel/python-HTML-scraper-fastly-tutorial | 4bcdbf4529fce23068e47f1956d0d1187a818e52 | [
"MIT"
] | null | null | null | ng_common_module.py | kotnyel/python-HTML-scraper-fastly-tutorial | 4bcdbf4529fce23068e47f1956d0d1187a818e52 | [
"MIT"
] | null | null | null | #news get common modul.py
# Usage import nc_common_module.py as ncm| ncm.foo(bar) | from ng_common_modul.py import foo()
# Basics:
# 1
#
#
#
#import libs
import urllib
import os
import time
from datetime import datetime
import os.path
#ssl support and context
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
#Bautiful Parser load
from bs4 import BeautifulSoup
#Word counter load and init
from collections import Counter
Text_Counter = Counter()
is_news_ok_banlist = ("megöl","elhunyt","meghalt","ölt meg","halálra","Gruevszki","megöltek")
def get_prev_file_content(filename):
#Check file exists and if it is
#read content, and write it out to log purposes
#Else eturn Flase
if os.path.exists(filename):
print("Previous file exists.")
#lets read to object
pftemp = open(filename,"r")
prevdata = pftemp.read()
print("File readed.")
pftemp.close()
print("Create old file.")
pftemp = open("old"+filename,"w")
pftemp.write(prevdata)
pftemp.close()
#print(prevdata)
return(prevdata)
else:
print("No preivous file exists.")
return(False)
# timer
def get_process_start_time():
process_start_time = int(time.time())
process_start_timestr = datetime.strftime(datetime.fromtimestamp(process_start_time),"%H:%M:%S")
print(process_start_timestr)
return(process_start_timestr)
def get_http_content(urllocal):
#urllocal = URL call with this
#import urllib
req = urllib.request.Request(urllocal)
req.add_header('User-Agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36')
try:
resp = urllib.request.urlopen(req)
cs = resp.headers.get_content_charset()
if not cs:
cs = charset
#it is UTF 8 do not handle :)
data = resp.read()
print ("-------------------------------------------------------")
html = data.decode("UTF-8")
#extract to soup
soup = BeautifulSoup(html,"html.parser")
return(soup)
#else: exit("Exit: wrong response code!")
except (URLError,HTTPError) as e:
print("Dowload error:",e.reason)
html = None
return(false)
def get_prev_wrapper(localftempName):
prev_data_local = get_prev_file_content(localftempName)
if prev_data_local:
#only when it is valid
psoup = BeautifulSoup(prev_data_local,"html.parser")
if psoup.find_all(id='pst'):
print("Previous check time: "+psoup.find_all(id = 'pst')[0].string)
return(psoup)
return(False)
def write_news_file_header(localftempName,process_start_timestr_local):
ftemp = open(localftempName,"w")
ftemp.write('''
<!DOCTYPE html>
<html lang='en'>
<head>
<title>lenI scraper</title>
<meta charset='utf-8'>
<meta name='viewport' content='width=device-width, initial-scale=1'>
<link rel='stylesheet' href='https://maxcdn.bootstrapcdn.com/bootstrap/3.4.0/css/bootstrap.min.css'>
<script src='https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js'></script>
<script src='https://maxcdn.bootstrapcdn.com/bootstrap/3.4.0/js/bootstrap.min.js'></script>
</head>
<style>
body {font-size:1.8rem; font-family: 'Helvetica Neue', HelveticaNeue, Helvetica, Arial, sans-serif; background-color:#11; color:rgba(22,22,22,0.7)}
li { list-style-type: decimal-leading-zero; line-height:1.8em; }
a:link {color:black; text-decoration:none; }
a:visited:hover {color:red} a:link:hover {color:green} a:visited{color:gray; background-color:#dbffd6;}
.btn{margin-bottom:5px;}
</style>
<body class="container">
''')
ftemp.write('<section><h1> hlcombinator.hu '+'<span id="pst">' + process_start_timestr_local + '</span></h1>')
ftemp.write('<ul class="list-group">')
return(ftemp)
def is_news_ok(text):
#Article checker
banned = is_news_ok_banlist
for i in banned:
if i in text.lower():
return False
return True
def is_href_ok(text,banneds,banned_endings):
#Href checker
#banneds = ("_stamp","sport","mindekozben","banner","Fzene","joautok")
#banned_endings = ("blog.hu%2F","index.hu%2F")
if banneds:
for i in banneds:
if i in text:
return False
if banned_endings:
for i in banned_endings:
if text.endswith(i):
return False
return True
def is_class_ok(text,allowed):
#positive list
#allowed = ("inxcl","inxinx2","inxtc","inxpfb","inxngb","inxport")
if allowed :
for i in allowed:
if i in text:
return True
return False
else:
return True
def get_class_pref(text,thisdict):
#igazából a classal majd össze kell vonni
for i in thisdict:
if i in text:
return thisdict[i]
return ""
def bs4_checker(new_href,bs4_object):
for link in bs4_object.find_all('a'):
if link.get('href') == new_href:
return(True)
return(False)
#word cloud generator
def word_cloud(link_texts):
print('------ words start ---------')
#reset counter!
Text_Counter = Counter()
count_banned_words = ("az","a","meg","be","hogy","nem","itt","ha","ez","-","mi","van","így","el","ezek","le","ha","már","egy","is","és","ki","is","amit")
if link_texts:
link_texts = link_texts.lower()
Text_Counter.update(link_texts.split())
Words = Counter()
for key,value in Text_Counter.most_common(500):
if (value > 1) and (key not in count_banned_words):
print(key,value,end = ' ')
Words[key] = value
print(' ')
print('------ words end ---------')
return Words
def print_wc(cloud,ftemp):
#word cloud generator
ftemp.write('<section><h2>Word cloud:</h2><h4>')
for key,value in cloud.items():
ftemp.write('<button type="button" class="btn btn-primary">'+key+' <span class="badge badge-primary">'+str(value)+'</span></button> ')
ftemp.write('</h4></section>')
return(True)
def print_news(soup,psoup,ftemp,prefix_dict,allowed,banned_href_snipets,banned_href_endings,ico_url):
c=0
link_texts = ""
prev_href = ""
old_counter = 0
new_counter = 0
li_class=""
#chunks collect to a set to be able to detect duplicates
hrefs = set()
if ico_url:
icon_snipet = "<img src='"+ico_url+"'style='margin-right:5px;width:16px;height:16px;'>"
else:
icon_snipet = ""
for link in soup.find_all('a'):
if is_class_ok(link.get('href'),allowed) and link.string and is_href_ok(link.get('href'),banned_href_snipets,banned_href_endings) and is_news_ok(str(link.string)):
#print( link.get('href') )
li_id = str(c+1)
if psoup:
if bs4_checker(link.get('href'),psoup):
li_class = "list-group-item-info"
old_counter+=1
else:
li_class = ""
new_counter+=1
chunk = "<a class='list-group-item "+li_class+"' href=" + link.get('href') + ">" + icon_snipet+li_id +". "+ get_class_pref(link.get('href'),prefix_dict) +" " + str(link.string) + "</a>" + os.linesep
#print(chunk)
if link.get('href') not in hrefs:
c+=1
ftemp.write(chunk)
hrefs.add(link.get('href'))
#link_text collects all link string words for counting
link_texts = link_texts +' '+ str(link.string)
ftemp.write('</ul></section>')
return(link_texts,c,old_counter,new_counter);
def close_file(ftemp):
ftemp.write('</body></html>')
ftemp.close()
def write_final(ftemp,c,old_counter,new_counter):
print("Interesting articles: " + str(c))
print("New/Old articles:" + str(new_counter) + "/" + str(old_counter))
print ("Let's go to read!")
print ("-------------------------------------------------------")
| 27.69962 | 201 | 0.6814 |
acea5634bbb80aa81ea6922516323321a08bdc10 | 381 | py | Python | profiles_api/permissions.py | Raj1212/restapi_course | aa62df2e419fba9ed166737f6d500519b685cc46 | [
"MIT"
] | null | null | null | profiles_api/permissions.py | Raj1212/restapi_course | aa62df2e419fba9ed166737f6d500519b685cc46 | [
"MIT"
] | null | null | null | profiles_api/permissions.py | Raj1212/restapi_course | aa62df2e419fba9ed166737f6d500519b685cc46 | [
"MIT"
] | null | null | null | from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
"""allow user to edit their own profile"""
def has_object_permission(self, request, view, obj):
"""check user is trying to edit their own profile"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.id== request.user.id
| 25.4 | 60 | 0.695538 |
acea58af15b60322de3b2729477d4f4dd957eac5 | 263 | py | Python | AyubaML/ML/views.py | eugenennamdi/Machine-Learning | 39fee05a857b7bc9ffdd8a1c228719c0a32cbbaf | [
"Apache-2.0"
] | 23 | 2021-12-30T05:11:55.000Z | 2022-02-18T18:01:50.000Z | AyubaML/ML/views.py | eugenennamdi/Machine-Learning | 39fee05a857b7bc9ffdd8a1c228719c0a32cbbaf | [
"Apache-2.0"
] | 3 | 2022-01-02T00:35:46.000Z | 2022-01-18T05:03:45.000Z | AyubaML/ML/views.py | eugenennamdi/Machine-Learning | 39fee05a857b7bc9ffdd8a1c228719c0a32cbbaf | [
"Apache-2.0"
] | 6 | 2021-12-27T21:10:46.000Z | 2022-02-18T12:33:58.000Z | from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'index.html')
def get_choice(request):
return render(request,'get_choice.html')
def asset_opt_in(request):
return render(request,'opt_in.html') | 23.909091 | 44 | 0.749049 |
acea5914d12df5b8d933ca4438ff63304f3bdd6f | 2,036 | py | Python | tests/unit_test/iconscore/typing2/test__init__.py | bayeshack2016/icon-service | 36cab484d2e41548d7f2f74526f127ee3a4423fc | [
"Apache-2.0"
] | 52 | 2018-08-24T02:28:43.000Z | 2021-07-06T04:44:22.000Z | tests/unit_test/iconscore/typing2/test__init__.py | bayeshack2016/icon-service | 36cab484d2e41548d7f2f74526f127ee3a4423fc | [
"Apache-2.0"
] | 62 | 2018-09-17T06:59:16.000Z | 2021-12-15T06:02:51.000Z | tests/unit_test/iconscore/typing2/test__init__.py | bayeshack2016/icon-service | 36cab484d2e41548d7f2f74526f127ee3a4423fc | [
"Apache-2.0"
] | 35 | 2018-09-14T02:42:10.000Z | 2022-02-05T10:34:46.000Z | # -*- coding: utf-8 -*-
from typing import Union, List, Dict, Optional
import pytest
from typing_extensions import TypedDict
from iconservice.base.address import Address
from iconservice.iconscore.typing import (
get_origin,
get_args,
isinstance_ex,
)
class Person(TypedDict):
name: str
age: int
single: bool
@pytest.mark.parametrize(
"type_hint,expected",
[
(bool, bool),
(bytes, bytes),
(int, int),
(str, str),
(Address, Address),
("Address", Address),
(List[int], list),
(List[List[str]], list),
(Dict, dict),
(Dict[str, int], dict),
(Union[int, str], Union),
(Optional[int], Union),
(Person, Person),
]
)
def test_get_origin(type_hint, expected):
origin = get_origin(type_hint)
assert origin == expected
@pytest.mark.parametrize(
"type_hint,expected",
[
(bool, ()),
(bytes, ()),
(int, ()),
(str, ()),
(Address, ()),
(Person, ()),
(List[int], (int,)),
(List[List[str]], (List[str],)),
(Dict[str, int], (str, int)),
(Union[int, str, Address], (int, str, Address)),
(Optional[int], (int, type(None))),
(List[Person], (Person,)),
]
)
def test_get_args(type_hint, expected):
args = get_args(type_hint)
assert args == expected
def test_get_args_with_struct():
expected = {
"name": str,
"age": int,
"single": bool,
}
annotations = Person.__annotations__
assert len(annotations) == len(expected)
for name, type_hint in annotations.items():
assert type_hint == expected[name]
@pytest.mark.parametrize(
"value,_type,expected",
[
(True, int, False),
(False, int, False),
(0, bool, False),
(1, bool, False),
(True, bool, True),
(False, bool, True),
]
)
def test_isinstance_ex(value, _type, expected):
assert isinstance_ex(value, _type) == expected
| 21.431579 | 56 | 0.555501 |
acea5a3c403fbba0765f0a49e99c57462b846771 | 3,570 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/digital_ocean/digital_ocean_certificate_info.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/digital_ocean/digital_ocean_certificate_info.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/digital_ocean/digital_ocean_certificate_info.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: digital_ocean_certificate_info
short_description: Gather information about DigitalOcean certificates
description:
- This module can be used to gather information about DigitalOcean provided certificates.
- This module was called C(digital_ocean_certificate_facts) before Ansible 2.9. The usage did not change.
author: "Abhijeet Kasurde (@Akasurde)"
version_added: "2.6"
options:
certificate_id:
description:
- Certificate ID that can be used to identify and reference a certificate.
required: false
requirements:
- "python >= 2.6"
extends_documentation_fragment: digital_ocean.documentation
'''
EXAMPLES = '''
- name: Gather information about all certificates
digital_ocean_certificate_info:
oauth_token: "{{ oauth_token }}"
- name: Gather information about certificate with given id
digital_ocean_certificate_info:
oauth_token: "{{ oauth_token }}"
certificate_id: "892071a0-bb95-49bc-8021-3afd67a210bf"
- name: Get not after information about certificate
digital_ocean_certificate_info:
register: resp_out
- set_fact:
not_after_date: "{{ item.not_after }}"
loop: "{{ resp_out.data|json_query(name) }}"
vars:
name: "[?name=='web-cert-01']"
- debug: var=not_after_date
'''
RETURN = '''
data:
description: DigitalOcean certificate information
returned: success
type: list
sample: [
{
"id": "892071a0-bb95-49bc-8021-3afd67a210bf",
"name": "web-cert-01",
"not_after": "2017-02-22T00:23:00Z",
"sha1_fingerprint": "dfcc9f57d86bf58e321c2c6c31c7a971be244ac7",
"created_at": "2017-02-08T16:02:37Z"
},
]
'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
def core(module):
certificate_id = module.params.get('certificate_id', None)
rest = DigitalOceanHelper(module)
base_url = 'certificates?'
if certificate_id is not None:
response = rest.get("%s/%s" % (base_url, certificate_id))
status_code = response.status_code
if status_code != 200:
module.fail_json(msg="Failed to retrieve certificates for DigitalOcean")
resp_json = response.json
certificate = resp_json['certificate']
else:
certificate = rest.get_paginated_data(base_url=base_url, data_key_name='certificates')
module.exit_json(changed=False, data=certificate)
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
certificate_id=dict(type='str', required=False),
)
module = AnsibleModule(argument_spec=argument_spec)
if module._name == 'digital_ocean_certificate_facts':
module.deprecate("The 'digital_ocean_certificate_facts' module has been renamed to 'digital_ocean_certificate_info'", version='2.13')
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
| 30 | 141 | 0.713725 |
acea5a9dab51cad8efce05f230947186252b9f71 | 6,242 | py | Python | pysource/tests/test_run.py | dankilman/pysource | 1651b092eb4313fb00e9781d47eef0b26568ebf2 | [
"Apache-2.0"
] | null | null | null | pysource/tests/test_run.py | dankilman/pysource | 1651b092eb4313fb00e9781d47eef0b26568ebf2 | [
"Apache-2.0"
] | null | null | null | pysource/tests/test_run.py | dankilman/pysource | 1651b092eb4313fb00e9781d47eef0b26568ebf2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Dan Kilman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sh
from base import WithDaemonTestCase
from pysource.tests import command
class RunTest(WithDaemonTestCase):
def setUp(self):
super(RunTest, self).setUp()
self.piped = False
def test_run_explicit(self):
self._test_run_explicit(self._run_explicit_command(self.piped))
def test_run_no_args(self):
output = self.run_pysource_script([
command.source_def('function1(): return 1',
piped=self.piped),
command.run('function1')
])
self.assertEqual(output, '1')
def test_run_no_return_value(self):
output = self.run_pysource_script([
command.source_def('function1(): pass',
piped=self.piped),
command.run('function1')
], strip=False)
self.assertEqual(output, '')
def test_run_with_args(self):
name = 'john'
output = self.run_pysource_script([
command.source_def('function1(name): return name*2',
piped=self.piped),
command.run('function1', name)
])
self.assertEqual(output, name*2)
def test_run_with_typed_args(self):
number = 3
output = self.run_pysource_script([
command.source_def('function1(number=int): return number**3',
piped=self.piped),
command.run('function1', number)
])
self.assertEqual(output, str(number**3))
def test_run_with_varargs(self):
names = ['john', 'doe']
output = self.run_pysource_script([
command.source_def('function1(*names): return list(names+names)',
piped=self.piped),
command.run('function1', *names)
])
self.assertEqual(output, str(names+names))
def test_run_with_kwargs(self):
output = self.run_pysource_script([
command.source_def('function1(**kwargs): return 1',
piped=self.piped),
command.run('function1')
])
self.assertEqual(output, '1')
def test_run_with_args_and_varargs(self):
name = 'jane'
names = ['john', 'doe']
args = [name] + names
output = self.run_pysource_script([
command.source_def('''function1(name, *names):
return [name]+list(names)''', piped=self.piped),
command.run('function1', *args)
])
self.assertEqual(output, str(args))
def test_run_raises_exception(self):
self.assertRaises(
sh.ErrorReturnCode,
self.run_pysource_script,
[command.source_def('function1(): raise RuntimeError()',
piped=self.piped),
command.run('function1')])
def test_run_too_many_args_no_varargs(self):
self.assertRaises(sh.ErrorReturnCode,
self.run_pysource_script,
[
command.source_def('function1(): pass',
piped=self.piped),
command.run('function1', 'arg')
])
def test_run_too_few_args_no_varargs(self):
self.assertRaises(sh.ErrorReturnCode,
self.run_pysource_script,
[
command.source_def('function1(arg): pass',
piped=self.piped),
command.run('function1')
])
def test_run_too_few_args_with_varargs(self):
self.assertRaises(sh.ErrorReturnCode,
self.run_pysource_script,
[
command.source_def('function1(ar, *args): pass',
piped=self.piped),
command.run('function1')
])
def test_run_no_function(self):
self.assertRaises(sh.ErrorReturnCode,
self.run_pysource_script,
[command.source_named('function1',
piped=self.piped),
command.run('function1')])
def test_run_with_run_piped_mode(self):
self.assertRaises(sh.ErrorReturnCode,
self._test_run_explicit,
self._run_explicit_command(not self.piped))
def test_using_pipes_when_non_piped_mode(self):
if self.piped:
return
import_statement = 'from pysource import stdin, stdout'
self.run_pysource_script([command.source_inline(import_statement)])
self.assertRaises(
sh.ErrorReturnCode,
self.run_pysource_script,
[command.source_def('function1(): stdin.read()'),
command.run('function1')])
self.assertRaises(
sh.ErrorReturnCode,
self.run_pysource_script,
[command.source_def('function1(): stdout.write("1")'),
command.run('function1')])
def _test_run_explicit(self, run_explicit_command):
output = self.run_pysource_script([
command.source_def('function1(): return 1',
piped=self.piped),
run_explicit_command('function1')
])
self.assertEqual(output, '1')
def _run_explicit_command(self, piped):
if piped:
return command.run_piped_explicit
else:
return command.run_explicit
| 36.717647 | 78 | 0.553188 |
acea5b2b035394360e6eb3283df990118e54d0cd | 605 | py | Python | TrekBot2_WS/build/hector_map_tools/catkin_generated/pkg.develspace.context.pc.py | Rafcin/RescueRoboticsLHMV | d3dc63e6c16a040b16170f143556ef358018b7da | [
"Unlicense"
] | 1 | 2018-10-04T14:37:00.000Z | 2018-10-04T14:37:00.000Z | TrekBot2_WS/build/hector_map_tools/catkin_generated/pkg.develspace.context.pc.py | Rafcin/TrekBot | d3dc63e6c16a040b16170f143556ef358018b7da | [
"Unlicense"
] | null | null | null | TrekBot2_WS/build/hector_map_tools/catkin_generated/pkg.develspace.context.pc.py | Rafcin/TrekBot | d3dc63e6c16a040b16170f143556ef358018b7da | [
"Unlicense"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/xavier_ssd/TrekBot/TrekBot2_WS/src/hector_slam/hector_map_tools/include;/usr/include/eigen3".split(';') if "/xavier_ssd/TrekBot/TrekBot2_WS/src/hector_slam/hector_map_tools/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "nav_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_map_tools"
PROJECT_SPACE_DIR = "/xavier_ssd/TrekBot/TrekBot2_WS/devel/.private/hector_map_tools"
PROJECT_VERSION = "0.3.5"
| 67.222222 | 251 | 0.77686 |
acea5b3bbbaca7ae7f1dfed8a63554d512645c85 | 1,018 | py | Python | Rotate String.py | Jorewang/LeetCode_Solutions | 0c483a915e2a8b3bfc4bcb4b5a35df3dd0dbe8ba | [
"Apache-2.0"
] | 2 | 2020-06-13T06:37:50.000Z | 2020-06-13T06:37:52.000Z | Rotate String.py | Jorewang/LeetCode_Solutions | 0c483a915e2a8b3bfc4bcb4b5a35df3dd0dbe8ba | [
"Apache-2.0"
] | null | null | null | Rotate String.py | Jorewang/LeetCode_Solutions | 0c483a915e2a8b3bfc4bcb4b5a35df3dd0dbe8ba | [
"Apache-2.0"
] | null | null | null | class Solution(object):
def rotateString(self, A, offset):
if offset == 0:
return A
s = list(A)
c = 0
length = len(s)
while c < length:
k = offset*(c+1) % length
s[0], s[k] = s[k], s[0]
c += 1
return ''.join(s)
def rotateString_2(self, A, offset):
if offset == 0:
return A
offset %= len(A)
before = A[:len(A)-offset]
after = A[len(A)-offset:]
A = before[::-1] + after[::-1]
A = A[::-1]
return A
def rotateString_3(self, A, B):
if len(A) != len(B):
return False
for sli in range(len(A)):
print(A[:sli])
print(B[-(sli):])
print(A[sli:])
print(B[:-(sli)])
if A[:sli] == B[-(sli):] and A[sli:] == B[:-(sli)]:
return True
return False
if __name__ == '__main__':
A = ''
B = ''
print(Solution().rotateString_3(A, B))
| 22.622222 | 63 | 0.418468 |
acea5b7c886d6936ff5272d9458ef6af26fa5003 | 91 | py | Python | codes_auto/1642.water-bottles.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | codes_auto/1642.water-bottles.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | codes_auto/1642.water-bottles.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | #
# @lc app=leetcode.cn id=1642 lang=python3
#
# [1642] water-bottles
#
None
# @lc code=end | 13 | 42 | 0.67033 |
acea5bef1b3f2e46c10c9630df6ae197dfddfcc5 | 3,630 | py | Python | tests/test_import_fence.py | themarcelor/fence | 9417655f84752477399e71b58b92c4c333b9704c | [
"Apache-2.0"
] | 31 | 2018-01-05T22:49:33.000Z | 2022-02-02T10:30:23.000Z | tests/test_import_fence.py | themarcelor/fence | 9417655f84752477399e71b58b92c4c333b9704c | [
"Apache-2.0"
] | 737 | 2017-12-11T17:42:11.000Z | 2022-03-29T22:42:52.000Z | tests/test_import_fence.py | themarcelor/fence | 9417655f84752477399e71b58b92c4c333b9704c | [
"Apache-2.0"
] | 46 | 2018-02-23T09:04:23.000Z | 2022-02-09T18:29:51.000Z | """
NOTE: the tests use ``pytest.mark.filterwarnings('ignore')`` because SQLAlchemy
isn't happy if you try to load a table a second time, which we have to
inadvertently do when we reload all the fence submodules.
"""
import importlib
import sys
import pytest
from sqlalchemy.exc import InvalidRequestError
def reload_modules(module_name):
"""
Reload all of fence's submodules.
Use this after patching ``local_settings.py`` not existing, to make sure
that nothing will remember that it existed.
"""
# First we have to convince fence that ``local_settings.py`` does not
# actually exist, even if it does. To do this, patch-delete the attribute
# and reload all the fence modules.
fence_submodules = [
module for module in list(sys.modules.keys()) if module.startswith(module_name)
]
for module in fence_submodules:
if sys.modules[module]:
# SQLAlchemy gets upset when a table is loaded twice, so ignore
# that.
try:
importlib.reload(sys.modules[module])
except InvalidRequestError:
pass
class FakeModule(object):
"""
Define a context manager for instantiating a fake copy of a module under an
arbitrary module name.
We use this to make a copy of fence from which the local settings module is
removed, without disturbing the normal fence.
"""
def __init__(self, real_name, fake_name):
"""Save a copy of the real module."""
self.real_name = real_name
self.fake_name = fake_name
# Save a copy of the real module.
importlib.import_module(self.real_name)
self.real_module = sys.modules.pop(self.real_name)
def __enter__(self):
"""
Insert a copy of the real module into ``sys.modules`` under the fake
name.
"""
sys.modules[self.fake_name] = importlib.import_module(self.real_name)
def __exit__(self, type, value, traceback):
"""
Remove the fake module and put the real module back in ``sys.modules``.
(The arguments are required for a context manager.)
"""
sys.modules.pop(self.fake_name)
sys.modules[self.real_name] = self.real_module
@pytest.mark.filterwarnings("ignore")
def test_import_without_local_settings(app, monkeypatch):
"""
Simply try to import fence when ``fence.local_settings`` doesn't exist.
"""
with FakeModule("fence", "test_fence"):
# Take out the local settings module and reload ``test_fence``.
monkeypatch.delattr("test_fence.local_settings", raising=False)
reload_modules("test_fence")
# Now try to import fence.
import test_fence
assert hasattr(test_fence, "app")
@pytest.mark.filterwarnings("ignore")
def test_import_fence_would_break(monkeypatch):
"""
Sort of test the previous test by making sure that if ``local_settings.py``
did not exist and we tried to use it, things would go horribly wrong.
"""
with FakeModule("fence", "test_fence"):
# Take out the local settings module and reload ``test_fence``.
monkeypatch.delattr("test_fence.local_settings", raising=False)
reload_modules("test_fence")
# Import ``test_fence`` and make sure that using the local settings
# would break things.
import test_fence
assert not hasattr(test_fence, "local_settings")
# Try to get an arbitrary variable from ``local_settings`` and make
# sure it fails.
with pytest.raises(AttributeError):
test_fence.local_settings.DB
| 34.571429 | 87 | 0.668595 |
acea5d18b40aa856d5dc9eab67c8e2c9ca646b32 | 7,706 | py | Python | other/Imaging-1.1.7/build/lib.macosx-10.7-intel-2.7/IptcImagePlugin.py | dguo/headlines | d6aa64ac88895b786acc7694045e6232bfc3229c | [
"MIT"
] | 112 | 2015-01-15T21:36:02.000Z | 2021-12-28T19:19:01.000Z | PIL/IptcImagePlugin.py | trezorg/PIL | 552d32ab04ce52a5eeba06ffa69709761d66d42a | [
"Python-2.0"
] | 118 | 2020-03-14T17:34:11.000Z | 2022-03-30T07:07:45.000Z | PIL/IptcImagePlugin.py | trezorg/PIL | 552d32ab04ce52a5eeba06ffa69709761d66d42a | [
"Python-2.0"
] | 35 | 2015-01-15T21:34:36.000Z | 2022-01-29T07:42:34.000Z | #
# The Python Imaging Library.
# $Id$
#
# IPTC/NAA file handling
#
# history:
# 1995-10-01 fl Created
# 1998-03-09 fl Cleaned up and added to PIL
# 2002-06-18 fl Added getiptcinfo helper
#
# Copyright (c) Secret Labs AB 1997-2002.
# Copyright (c) Fredrik Lundh 1995.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.3"
import Image, ImageFile
import os, tempfile
COMPRESSION = {
1: "raw",
5: "jpeg"
}
PAD = chr(0) * 4
#
# Helpers
def i16(c):
return ord(c[1]) + (ord(c[0])<<8)
def i32(c):
return ord(c[3]) + (ord(c[2])<<8) + (ord(c[1])<<16) + (ord(c[0])<<24)
def i(c):
return i32((PAD + c)[-4:])
def dump(c):
for i in c:
print "%02x" % ord(i),
print
##
# Image plugin for IPTC/NAA datastreams. To read IPTC/NAA fields
# from TIFF and JPEG files, use the <b>getiptcinfo</b> function.
class IptcImageFile(ImageFile.ImageFile):
format = "IPTC"
format_description = "IPTC/NAA"
def getint(self, key):
return i(self.info[key])
def field(self):
#
# get a IPTC field header
s = self.fp.read(5)
if not len(s):
return None, 0
tag = ord(s[1]), ord(s[2])
# syntax
if ord(s[0]) != 0x1C or tag[0] < 1 or tag[0] > 9:
raise SyntaxError, "invalid IPTC/NAA file"
# field size
size = ord(s[3])
if size > 132:
raise IOError, "illegal field length in IPTC/NAA file"
elif size == 128:
size = 0
elif size > 128:
size = i(self.fp.read(size-128))
else:
size = i16(s[3:])
return tag, size
def _is_raw(self, offset, size):
#
# check if the file can be mapped
# DISABLED: the following only slows things down...
return 0
self.fp.seek(offset)
t, sz = self.field()
if sz != size[0]:
return 0
y = 1
while 1:
self.fp.seek(sz, 1)
t, s = self.field()
if t != (8, 10):
break
if s != sz:
return 0
y = y + 1
return y == size[1]
def _open(self):
# load descriptive fields
while 1:
offset = self.fp.tell()
tag, size = self.field()
if not tag or tag == (8,10):
break
if size:
tagdata = self.fp.read(size)
else:
tagdata = None
if tag in self.info.keys():
if isinstance(self.info[tag], list):
self.info[tag].append(tagdata)
else:
self.info[tag] = [self.info[tag], tagdata]
else:
self.info[tag] = tagdata
# print tag, self.info[tag]
# mode
layers = ord(self.info[(3,60)][0])
component = ord(self.info[(3,60)][1])
if self.info.has_key((3,65)):
id = ord(self.info[(3,65)][0])-1
else:
id = 0
if layers == 1 and not component:
self.mode = "L"
elif layers == 3 and component:
self.mode = "RGB"[id]
elif layers == 4 and component:
self.mode = "CMYK"[id]
# size
self.size = self.getint((3,20)), self.getint((3,30))
# compression
try:
compression = COMPRESSION[self.getint((3,120))]
except KeyError:
raise IOError, "Unknown IPTC image compression"
# tile
if tag == (8,10):
if compression == "raw" and self._is_raw(offset, self.size):
self.tile = [(compression, (offset, size + 5, -1),
(0, 0, self.size[0], self.size[1]))]
else:
self.tile = [("iptc", (compression, offset),
(0, 0, self.size[0], self.size[1]))]
def load(self):
if len(self.tile) != 1 or self.tile[0][0] != "iptc":
return ImageFile.ImageFile.load(self)
type, tile, box = self.tile[0]
encoding, offset = tile
self.fp.seek(offset)
# Copy image data to temporary file
outfile = tempfile.mktemp()
o = open(outfile, "wb")
if encoding == "raw":
# To simplify access to the extracted file,
# prepend a PPM header
o.write("P5\n%d %d\n255\n" % self.size)
while 1:
type, size = self.field()
if type != (8, 10):
break
while size > 0:
s = self.fp.read(min(size, 8192))
if not s:
break
o.write(s)
size = size - len(s)
o.close()
try:
try:
# fast
self.im = Image.core.open_ppm(outfile)
except:
# slightly slower
im = Image.open(outfile)
im.load()
self.im = im.im
finally:
try: os.unlink(outfile)
except: pass
Image.register_open("IPTC", IptcImageFile)
Image.register_extension("IPTC", ".iim")
##
# Get IPTC information from TIFF, JPEG, or IPTC file.
#
# @param im An image containing IPTC data.
# @return A dictionary containing IPTC information, or None if
# no IPTC information block was found.
def getiptcinfo(im):
import TiffImagePlugin, JpegImagePlugin
import StringIO
data = None
if isinstance(im, IptcImageFile):
# return info dictionary right away
return im.info
elif isinstance(im, JpegImagePlugin.JpegImageFile):
# extract the IPTC/NAA resource
try:
app = im.app["APP13"]
if app[:14] == "Photoshop 3.0\x00":
app = app[14:]
# parse the image resource block
offset = 0
while app[offset:offset+4] == "8BIM":
offset = offset + 4
# resource code
code = JpegImagePlugin.i16(app, offset)
offset = offset + 2
# resource name (usually empty)
name_len = ord(app[offset])
name = app[offset+1:offset+1+name_len]
offset = 1 + offset + name_len
if offset & 1:
offset = offset + 1
# resource data block
size = JpegImagePlugin.i32(app, offset)
offset = offset + 4
if code == 0x0404:
# 0x0404 contains IPTC/NAA data
data = app[offset:offset+size]
break
offset = offset + size
if offset & 1:
offset = offset + 1
except (AttributeError, KeyError):
pass
elif isinstance(im, TiffImagePlugin.TiffImageFile):
# get raw data from the IPTC/NAA tag (PhotoShop tags the data
# as 4-byte integers, so we cannot use the get method...)
try:
type, data = im.tag.tagdata[TiffImagePlugin.IPTC_NAA_CHUNK]
except (AttributeError, KeyError):
pass
if data is None:
return None # no properties
# create an IptcImagePlugin object without initializing it
class FakeImage:
pass
im = FakeImage()
im.__class__ = IptcImageFile
# parse the IPTC information chunk
im.info = {}
im.fp = StringIO.StringIO(data)
try:
im._open()
except (IndexError, KeyError):
pass # expected failure
return im.info
| 26.66436 | 73 | 0.495069 |
acea5d9ff705171335f83676787acc1a311d6e24 | 14,159 | py | Python | pipenv/patched/piptools/resolver.py | mikiec84/pipenv | 16d4c5267cfcceb86a150e2085c668c4ab0a37e9 | [
"MIT"
] | null | null | null | pipenv/patched/piptools/resolver.py | mikiec84/pipenv | 16d4c5267cfcceb86a150e2085c668c4ab0a37e9 | [
"MIT"
] | null | null | null | pipenv/patched/piptools/resolver.py | mikiec84/pipenv | 16d4c5267cfcceb86a150e2085c668c4ab0a37e9 | [
"MIT"
] | 2 | 2018-04-06T05:36:25.000Z | 2018-12-30T22:58:58.000Z | # coding: utf-8
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
from functools import partial
from itertools import chain, count
import os
from first import first
from notpip.req import InstallRequirement
from . import click
from .cache import DependencyCache
from .exceptions import UnsupportedConstraint
from .logging import log
from .utils import (format_requirement, format_specifier, full_groupby, dedup,
is_pinned_requirement, key_from_ireq, key_from_req, UNSAFE_PACKAGES)
green = partial(click.style, fg='green')
magenta = partial(click.style, fg='magenta')
class RequirementSummary(object):
"""
Summary of a requirement's properties for comparison purposes.
"""
def __init__(self, ireq):
self.req = ireq.req
self.key = key_from_req(ireq.req)
self.markers = ireq.markers
self.extras = str(sorted(ireq.extras))
self.specifier = str(ireq.specifier)
def __eq__(self, other):
return str(self) == str(other)
def __hash__(self):
return hash(str(self))
def __str__(self):
return repr([self.key, self.specifier, self.extras])
class Resolver(object):
def __init__(self, constraints, repository, cache=None, prereleases=False, clear_caches=False, allow_unsafe=False):
"""
This class resolves a given set of constraints (a collection of
InstallRequirement objects) by consulting the given Repository and the
DependencyCache.
"""
self.our_constraints = set(constraints)
self.their_constraints = set()
self.repository = repository
if cache is None:
cache = DependencyCache() # pragma: no cover
self.dependency_cache = cache
self.prereleases = prereleases
self.clear_caches = clear_caches
self.allow_unsafe = allow_unsafe
self.unsafe_constraints = set()
@property
def constraints(self):
return set(self._group_constraints(chain(self.our_constraints,
self.their_constraints)))
def resolve_hashes(self, ireqs):
"""
Finds acceptable hashes for all of the given InstallRequirements.
"""
with self.repository.allow_all_wheels():
return {ireq: self.repository.get_hashes(ireq) for ireq in ireqs}
def resolve(self, max_rounds=12):
"""
Finds concrete package versions for all the given InstallRequirements
and their recursive dependencies. The end result is a flat list of
(name, version) tuples. (Or an editable package.)
Resolves constraints one round at a time, until they don't change
anymore. Protects against infinite loops by breaking out after a max
number rounds.
"""
if self.clear_caches:
self.dependency_cache.clear()
self.repository.clear_caches()
self.check_constraints(chain(self.our_constraints,
self.their_constraints))
# Ignore existing packages
os.environ[str('PIP_EXISTS_ACTION')] = str('i') # NOTE: str() wrapping necessary for Python 2/3 compat
for current_round in count(start=1):
if current_round > max_rounds:
raise RuntimeError('No stable configuration of concrete packages '
'could be found for the given constraints after '
'%d rounds of resolving.\n'
'This is likely a bug.' % max_rounds)
log.debug('')
log.debug(magenta('{:^60}'.format('ROUND {}'.format(current_round))))
has_changed, best_matches = self._resolve_one_round()
log.debug('-' * 60)
log.debug('Result of round {}: {}'.format(current_round,
'not stable' if has_changed else 'stable, done'))
if not has_changed:
break
# If a package version (foo==2.0) was built in a previous round,
# and in this round a different version of foo needs to be built
# (i.e. foo==1.0), the directory will exist already, which will
# cause a pip build failure. The trick is to start with a new
# build cache dir for every round, so this can never happen.
self.repository.freshen_build_caches()
del os.environ['PIP_EXISTS_ACTION']
# Only include hard requirements and not pip constraints
return {req for req in best_matches if not req.constraint}
@staticmethod
def check_constraints(constraints):
for constraint in constraints:
if constraint.link is not None and not constraint.editable:
msg = ('pip-compile does not support URLs as packages, unless they are editable. '
'Perhaps add -e option?')
raise UnsupportedConstraint(msg, constraint)
def _group_constraints(self, constraints):
"""
Groups constraints (remember, InstallRequirements!) by their key name,
and combining their SpecifierSets into a single InstallRequirement per
package. For example, given the following constraints:
Django<1.9,>=1.4.2
django~=1.5
Flask~=0.7
This will be combined into a single entry per package:
django~=1.5,<1.9,>=1.4.2
flask~=0.7
"""
for _, ireqs in full_groupby(constraints, key=key_from_ireq):
ireqs = list(ireqs)
editable_ireq = first(ireqs, key=lambda ireq: ireq.editable)
if editable_ireq:
yield editable_ireq # ignore all the other specs: the editable one is the one that counts
continue
ireqs = iter(ireqs)
# deepcopy the accumulator so as to not modify the self.our_constraints invariant
combined_ireq = copy.deepcopy(next(ireqs))
combined_ireq.comes_from = None
for ireq in ireqs:
# NOTE we may be losing some info on dropped reqs here
combined_ireq.req.specifier &= ireq.req.specifier
combined_ireq.constraint &= ireq.constraint
combined_ireq.markers = ireq.markers
# Return a sorted, de-duped tuple of extras
combined_ireq.extras = tuple(sorted(set(tuple(combined_ireq.extras) + tuple(ireq.extras))))
yield combined_ireq
def _resolve_one_round(self):
"""
Resolves one level of the current constraints, by finding the best
match for each package in the repository and adding all requirements
for those best package versions. Some of these constraints may be new
or updated.
Returns whether new constraints appeared in this round. If no
constraints were added or changed, this indicates a stable
configuration.
"""
# Sort this list for readability of terminal output
constraints = sorted(self.constraints, key=key_from_ireq)
unsafe_constraints = []
original_constraints = copy.copy(constraints)
if not self.allow_unsafe:
for constraint in original_constraints:
if constraint.name in UNSAFE_PACKAGES:
constraints.remove(constraint)
constraint.req.specifier = None
unsafe_constraints.append(constraint)
log.debug('Current constraints:')
for constraint in constraints:
log.debug(' {}'.format(constraint))
log.debug('')
log.debug('Finding the best candidates:')
best_matches = {self.get_best_match(ireq) for ireq in constraints}
# Find the new set of secondary dependencies
log.debug('')
log.debug('Finding secondary dependencies:')
safe_constraints = []
for best_match in best_matches:
for dep in self._iter_dependencies(best_match):
if self.allow_unsafe or dep.name not in UNSAFE_PACKAGES:
safe_constraints.append(dep)
# Grouping constraints to make clean diff between rounds
theirs = set(self._group_constraints(safe_constraints))
# NOTE: We need to compare RequirementSummary objects, since
# InstallRequirement does not define equality
diff = {RequirementSummary(t) for t in theirs} - {RequirementSummary(t) for t in self.their_constraints}
removed = ({RequirementSummary(t) for t in self.their_constraints} -
{RequirementSummary(t) for t in theirs})
unsafe = ({RequirementSummary(t) for t in unsafe_constraints} -
{RequirementSummary(t) for t in self.unsafe_constraints})
has_changed = len(diff) > 0 or len(removed) > 0 or len(unsafe) > 0
if has_changed:
log.debug('')
log.debug('New dependencies found in this round:')
for new_dependency in sorted(diff, key=lambda req: key_from_req(req.req)):
log.debug(' adding {}'.format(new_dependency))
log.debug('Removed dependencies in this round:')
for removed_dependency in sorted(removed, key=lambda req: key_from_req(req.req)):
log.debug(' removing {}'.format(removed_dependency))
log.debug('Unsafe dependencies in this round:')
for unsafe_dependency in sorted(unsafe, key=lambda req: key_from_req(req.req)):
log.debug(' remembering unsafe {}'.format(unsafe_dependency))
# Store the last round's results in the their_constraints
self.their_constraints = theirs
# Store the last round's unsafe constraints
self.unsafe_constraints = unsafe_constraints
return has_changed, best_matches
def get_best_match(self, ireq):
"""
Returns a (pinned or editable) InstallRequirement, indicating the best
match to use for the given InstallRequirement (in the form of an
InstallRequirement).
Example:
Given the constraint Flask>=0.10, may return Flask==0.10.1 at
a certain moment in time.
Pinned requirements will always return themselves, i.e.
Flask==0.10.1 => Flask==0.10.1
"""
if ireq.editable:
# NOTE: it's much quicker to immediately return instead of
# hitting the index server
best_match = ireq
elif is_pinned_requirement(ireq):
# NOTE: it's much quicker to immediately return instead of
# hitting the index server
best_match = ireq
else:
best_match = self.repository.find_best_match(ireq, prereleases=self.prereleases)
# Format the best match
log.debug(' found candidate {} (constraint was {})'.format(format_requirement(best_match),
format_specifier(ireq)))
return best_match
def _iter_dependencies(self, ireq):
"""
Given a pinned or editable InstallRequirement, collects all the
secondary dependencies for them, either by looking them up in a local
cache, or by reaching out to the repository.
Editable requirements will never be looked up, as they may have
changed at any time.
"""
if ireq.editable:
for dependency in self.repository.get_dependencies(ireq):
yield dependency
return
elif ireq.markers:
for dependency in self.repository.get_dependencies(ireq):
dependency.prepared = False
yield dependency
return
elif ireq.extras:
for dependency in self.repository.get_dependencies(ireq):
dependency.prepared = False
yield dependency
return
elif not is_pinned_requirement(ireq):
raise TypeError('Expected pinned or editable requirement, got {}'.format(ireq))
# Now, either get the dependencies from the dependency cache (for
# speed), or reach out to the external repository to
# download and inspect the package version and get dependencies
# from there
if ireq not in self.dependency_cache:
log.debug(' {} not in cache, need to check index'.format(format_requirement(ireq)), fg='yellow')
dependencies = self.repository.get_dependencies(ireq)
import sys
if sys.version_info[0] == 2:
self.dependency_cache[ireq] = sorted(format_requirement(ireq) for ireq in dependencies)
else:
self.dependency_cache[ireq] = sorted(format_requirement(ireq) for ireq in dependencies)
# Example: ['Werkzeug>=0.9', 'Jinja2>=2.4']
dependency_strings = self.dependency_cache[ireq]
log.debug(' {:25} requires {}'.format(format_requirement(ireq),
', '.join(sorted(dependency_strings, key=lambda s: s.lower())) or '-'))
from notpip._vendor.packaging.markers import InvalidMarker
for dependency_string in dependency_strings:
try:
_dependency_string = dependency_string
if ';' in dependency_string:
# split off markers and remove any duplicates by comparing against deps
_dependencies = [dep.strip() for dep in dependency_string.split(';')]
_dependency_string = '; '.join([dep for dep in dedup(_dependencies)])
yield InstallRequirement.from_line(_dependency_string, constraint=ireq.constraint)
except InvalidMarker:
yield InstallRequirement.from_line(dependency_string, constraint=ireq.constraint)
def reverse_dependencies(self, ireqs):
non_editable = [ireq for ireq in ireqs if not ireq.editable]
return self.dependency_cache.reverse_dependencies(non_editable)
| 43.036474 | 119 | 0.624832 |
acea5dd0e622f56bb2368d17fc4de3179ddecbf8 | 10,751 | py | Python | devday/devday/settings/base.py | jandd/devday_website | 8c78c276096124bcfd175632affdc3fbbb1224ab | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | devday/devday/settings/base.py | jandd/devday_website | 8c78c276096124bcfd175632affdc3fbbb1224ab | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | devday/devday/settings/base.py | jandd/devday_website | 8c78c276096124bcfd175632affdc3fbbb1224ab | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | """
Django settings for devday project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
Please keep this list of settings sorted alphabetically!
"""
import mimetypes
import os
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
import requests
def gettext(s):
return s
def get_env_variable(var_name):
"""
Get a setting from an environment variable.
:param str var_name: variable name
"""
try:
return os.environ[var_name]
except KeyError:
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
_VAULT_DATA = None
_VAULT_BASE_URL = get_env_variable("VAULT_URL")
_VAULT_URL = "{}/v1/secret/data/devday".format(_VAULT_BASE_URL)
_VAULT_RENEW_URL = "{}/v1/auth/token/renew-self".format(_VAULT_BASE_URL)
def _fetch_from_vault():
global _VAULT_DATA
if not _VAULT_DATA:
s = requests.Session()
s.headers.update({"x-vault-token": get_env_variable("VAULT_TOKEN")})
r = s.post(_VAULT_RENEW_URL, data="{}")
r.raise_for_status()
r = s.get(_VAULT_URL)
r.raise_for_status()
_VAULT_DATA = r.json()["data"]["data"]
s.close()
return _VAULT_DATA
def get_vault_variable(var_name):
"""
Get a setting from vault
:param var_name: variable name
:return: variable data from vault /secret/data/devday
"""
try:
return _fetch_from_vault()[var_name]
except KeyError:
error_msg = "Define %s in Vault key at %s" % (var_name, _VAULT_URL)
raise ImproperlyConfigured(error_msg)
def get_variable_cascade(var_name, type=str, default_value=None):
"""
Try to get a setting from Vault or the environment and fallback to
default_value if it is defined.
Variables are transformed to uppercase before they are looked up in the
environment.
If no default is defined and the variable cannot be found in either
Vault or the environment an ImproperlyConfigured exception is raised.
:param var_name: variable name
:param type: result type
:param default_value: default value
:return: variable from Vault or the environment
"""
try:
value = _fetch_from_vault()[var_name]
except KeyError:
try:
value = os.environ[var_name.upper()]
except KeyError:
if default_value is None:
error_msg = (
"Define %s in Vault key at %s or set the" " environment variable %s"
) % (var_name, _VAULT_URL, var_name.upper())
raise ImproperlyConfigured(error_msg)
else:
return default_value
try:
return type(value)
except ValueError:
raise ImproperlyConfigured(
"Cannot interpret value %s as %s", value, type.__name__
)
mimetypes.add_type("image/svg+xml", ".svg", True)
# settings for django-django_registration
# see: https://django-registration.readthedocs.io/en/2.1.1/index.html
ACCOUNT_ACTIVATION_DAYS = 14
ALLOWED_HOSTS = []
AUTH_USER_MODEL = "attendee.DevDayUser"
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
CMS_LANGUAGES = {
1: [
{
"code": "de",
"name": gettext("de"),
"public": True,
"hide_untranslated": False,
"redirect_on_fallback": True,
}
],
"default": {
"fallbacks": ["de"],
"redirect_on_fallback": True,
"public": True,
"hide_untranslated": False,
},
}
CMS_PLACEHOLDER_CONF = {}
DJANGOCMS_STYLE_CHOICES = ["row", "container", "col-xs-12", "col-md-12"]
DJANGOCMS_STYLE_TEMPLATES = [
# styles for bootstrap grid model
("row", gettext("row")),
("container", gettext("container")),
("col-xs-12", gettext("col-xs-12")),
("col-md-12", gettext("col-md-12")),
]
DJANGOCMS_PICTURE_RESPONSIVE_IMAGES = False
DJANGOCMS_PICTURE_TEMPLATES = (
("carousel", _("Image in carousel")),
("carousel_first", _("First image in carousel")),
("gallery", _("Image in galery")),
)
CMS_TEMPLATES = (
("devday_no_cta.html", _("Dev Day Page")),
("devday.html", _("Dev Day Page with Call to Action area")),
("devday_index.html", _("Dev Day Home Page")),
(
"devday_all_static_placeholders.html",
_("Page with all static placeholders not for menu"),
),
)
CMS_PERMISSION = True
CRISPY_TEMPLATE_PACK = "bootstrap3"
DATA_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
DATABASES = {
"default": {
"ENGINE": "psqlextra.backend",
"NAME": get_variable_cascade("DEVDAY_PG_DBNAME"),
"USER": get_variable_cascade("DEVDAY_PG_USER"),
"PASSWORD": get_variable_cascade("postgresql_password"),
"HOST": get_variable_cascade("DEVDAY_PG_HOST"),
"PORT": get_variable_cascade("DEVDAY_PG_PORT"),
}
}
DEBUG = False
DEVDAY_FACEBOOK_URL = "https://www.facebook.com/events/193156441425350/"
DEVDAY_TWITTER_URL = "https://twitter.com/devdaydresden"
DEVDAY_XING_URL = "https://www.xing.com/events/dev-day-2018-1897927"
DEFAULT_EMAIL_SENDER = "info-bounce@devday.de"
INSTALLED_APPS = [
"ckeditor",
"djangocms_admin_style",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.admin",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"django.contrib.messages",
"rest_framework",
"devday",
"event.apps.EventsConfig",
"attendee.apps.AttendeeConfig",
"talk.apps.SessionsConfig",
"sponsoring",
"cms",
"menus",
"sekizai",
"treebeard",
"easy_thumbnails",
"filer",
"djangocms_text_ckeditor",
"djangocms_style",
"djangocms_column",
"djangocms_file",
"djangocms_link",
"djangocms_picture",
"djangocms_video",
"crispy_forms",
"django_file_form",
"django_file_form.ajaxuploader",
"twitterfeed",
"speaker.apps.SpeakerConfig",
"django.contrib.postgres",
"psqlextra",
]
LANGUAGE_CODE = "de"
LANGUAGES = (
("de", gettext("de")),
# ('en', gettext('en')),
)
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
MIDDLEWARE = [
"cms.middleware.utils.ApphookReloadMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"cms.middleware.user.CurrentUserMiddleware",
"cms.middleware.page.CurrentPageMiddleware",
"cms.middleware.toolbar.ToolbarMiddleware",
]
MIGRATION_MODULES = {}
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
]
}
REST_FRAMEWORK = {
"DEFAULT_PERMISSION_CLASSES": ["rest_framework.permissions.IsAuthenticated"]
}
ROOT_URLCONF = "devday.urls"
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_vault_variable("secret_key")
SPONSORING_OPEN = get_variable_cascade("sponsoring_open", bool, False)
SPONSORING_FROM_EMAIL = "info@devday.de"
SPONSORING_RECIPIENTS = ["info@devday.de"]
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
CSRF_COOKIE_AGE = None
SITE_ID = 1
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATIC_URL = "/static/"
STATICFILES_DIRS = (os.path.join(BASE_DIR, "devday", "static"),)
TALK_RESERVATION_CONFIRMATION_DAYS = 5
CONFIRMATION_SALT = get_vault_variable("confirmation_salt")
TALK_PUBLIC_SPEAKER_IMAGE_HEIGHT = 960
TALK_PUBLIC_SPEAKER_IMAGE_WIDTH = 636
TALK_THUMBNAIL_HEIGHT = 320
# Feedback for talks is allowed when that many minutes passed since the talk started
TALK_FEEDBACK_ALLOWED_MINUTES = 30
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "devday", "templates")],
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.i18n",
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.media",
"django.template.context_processors.csrf",
"django.template.context_processors.tz",
"sekizai.context_processors.sekizai",
"django.template.context_processors.static",
"cms.context_processors.cms_settings",
"devday.contextprocessors.devdaysettings_contextprocessor",
"talk.context_processors.committee_member_context_processor",
"talk.context_processors.reservation_context_processor",
"twitterfeed.contextprocessors.twitter_feed_context_processor",
"event.contextprocessors.current_event_contextprocessor",
],
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
},
}
]
THUMBNAIL_HIGH_RESOLUTION = True
THUMBNAIL_PROCESSORS = (
"easy_thumbnails.processors.colorspace",
"easy_thumbnails.processors.autocrop",
# 'easy_thumbnails.processors.scale_and_crop',
"filer.thumbnail_processors.scale_and_crop_with_subject_location",
"easy_thumbnails.processors.filters",
)
TIME_ZONE = "Europe/Berlin"
TWITTERFEED_PROXIES = {}
TWITTERFEED_PATHS = ["/"]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": (
"%(levelname)s %(asctime)s %(module)s %(process)d"
" %(thread)s %(message)s"
)
},
"simple": {"format": "%(asctime)s %(levelname)s %(message)s"},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "simple",
"level": "INFO",
}
},
"loggers": {},
}
USE_I18N = True
USE_L10N = True
USE_TZ = True
| 30.284507 | 88 | 0.666078 |
acea5fc24dd17d82327a27d9f6b6061d37b30ac9 | 105 | py | Python | problem/01000~09999/02407/2407.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/01000~09999/02407/2407.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/01000~09999/02407/2407.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | from math import factorial
n,r=map(int,input().split())
print(factorial(n)//factorial(r)//factorial(n-r)) | 35 | 49 | 0.733333 |
acea600570b32951aa1d7c83f787ef959f757c4d | 123 | py | Python | schoolapp/users/api/urls.py | shiyanshirani/schoolappAPI | 9f36988adc249b9d36c457229575a2786869aa2a | [
"MIT"
] | null | null | null | schoolapp/users/api/urls.py | shiyanshirani/schoolappAPI | 9f36988adc249b9d36c457229575a2786869aa2a | [
"MIT"
] | null | null | null | schoolapp/users/api/urls.py | shiyanshirani/schoolappAPI | 9f36988adc249b9d36c457229575a2786869aa2a | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('users/', views.UserProfileViewSet.as_view())
] | 17.571429 | 54 | 0.723577 |
acea609e646a79b83f619514738556dbcd2f4d0a | 5,966 | py | Python | local/bin/py/build/update_pre_build.py | jong82/documentation | 635633c0670ad41dbc26c94fb29674c622d62b54 | [
"BSD-3-Clause"
] | null | null | null | local/bin/py/build/update_pre_build.py | jong82/documentation | 635633c0670ad41dbc26c94fb29674c622d62b54 | [
"BSD-3-Clause"
] | null | null | null | local/bin/py/build/update_pre_build.py | jong82/documentation | 635633c0670ad41dbc26c94fb29674c622d62b54 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import sys
import yaml
from pull_and_push_file import pull_and_push_file
from pull_and_push_folder import pull_and_push_folder
from content_manager import prepare_content
from integrations import Integrations
from security_rules import security_rules
from collections import OrderedDict
from optparse import OptionParser
from os import sep, getenv
from os.path import (
curdir,
join,
abspath,
normpath
)
class Build:
def __init__(self, opts, tempdir):
super().__init__()
self.options = opts
self.list_of_contents = []
self.tempdir = tempdir
self.content_dir = "{0}{1}{2}".format(
abspath(normpath(options.source)),
sep,
"content" + sep + "en" + sep,
)
self.extract_dir = "{0}".format(
join(self.tempdir, "extracted") + sep
)
self.build_configuration = []
# Loads the configurations in the configuration/ folder and attaches it to the Build Class
def load_config(self, build_configuration_file_path, integration_merge_configuration_file_path):
self.build_configuration = yaml.safe_load(open(build_configuration_file_path))
self.integration_mutations = OrderedDict(yaml.safe_load(open(integration_merge_configuration_file_path)))
# Get the list of content to work with after it gets updated with the local globs or the
# downloaded globs from Github.
def get_list_of_content(self, configuration):
self.list_of_contents = prepare_content(
configuration, self.options.token, self.extract_dir)
# Build the documentation by injecting content from other repository.
def build_documentation(self, list_of_contents):
# Instanciation of the integrations class since it's needed for content management below.
Int = Integrations(self.options.source, self.tempdir,
self.integration_mutations)
# Depending of the action attached to the content the proper function is called
for content in list_of_contents:
try:
if content["action"] == "integrations":
Int.process_integrations(content)
elif content["action"] == "marketplace-integrations":
Int.process_integrations(content, marketplace=True)
elif (content["action"] == "pull-and-push-folder"):
pull_and_push_folder(content, self.content_dir)
elif content["action"] == "npm-integrations":
Int.process_integrations(content)
elif content["action"] == "pull-and-push-file":
pull_and_push_file(content, self.content_dir)
elif content["action"] in ("security-rules", "compliance-rules"):
security_rules(content, self.content_dir)
elif content["action"] == "Not Available":
if getenv("LOCAL") == 'True':
print("\x1b[33mWARNING\x1b[0m: Processing of {} canceled, since content is not available. Documentation is in degraded mode".format(
content["repo_name"]))
else:
print(
"\x1b[31mERROR\x1b[0m: Action {} unknown for {}".format(content["action"], content))
raise ValueError
except Exception as e:
print(e)
if getenv("LOCAL") == 'True':
print(
"\x1b[33mWARNING\x1b[0m: Unsuccessful processing of {}".format(content))
else:
print(
"\x1b[31mERROR\x1b[0m: Unsuccessful processing of {}".format(content))
raise ValueError
# Once all the content is processed integrations are merged according to the integration_merge.yaml
# configuration file. This needs to happen after all content is processed to avoid flacky integration merge
try:
Int.merge_integrations()
except Exception as e:
print(e)
if getenv("LOCAL") == 'True':
print(
"\x1b[33mWARNING\x1b[0m: Integration merge failed, documentation is now in degraded mode.")
else:
print(
"\x1b[31mERROR\x1b[0m: Integration merge failed, stopping build.")
sys.exit(1)
if __name__ == "__main__":
parser = OptionParser(
usage="usage: %prog [options] link_type"
)
parser.add_option(
"-t",
"--token",
help="github access token",
default=None,
)
parser.add_option(
"-s",
"--source",
help="location of src files",
default=curdir,
)
options, args = parser.parse_args()
options.token = (
getenv("GITHUB_TOKEN", options.token)
if not options.token
else options.token
)
# Those hard-written variables should be set in the Makefile config later down the road.
build_configuration_file_path = getenv("CONFIGURATION_FILE")
integration_merge_configuration_file_path = "./local/bin/py/build/configurations/integration_merge.yaml"
temp_directory = "./integrations_data"
# Documentation build process:
# 1. Instantiation of the Build class with the options (Github token) and the temp directory to work with
# 2. Load all configuration needed to build the doc
# 3. Retrieve the list of content to work with and updates it based of the configuration specification
# 4. Actually build the documentation with the udpated list of content.
build = Build(options, temp_directory)
build.load_config(build_configuration_file_path,
integration_merge_configuration_file_path)
build.get_list_of_content(build.build_configuration)
build.build_documentation(build.list_of_contents)
| 40.310811 | 156 | 0.629232 |
acea60acc127bf66e007d569973ebdaca14d7130 | 2,552 | py | Python | api_service/common.py | rohittuli5/fras | ff0ab3e535daabcf5d3bedbab5bba8bbd6f4a170 | [
"MIT"
] | null | null | null | api_service/common.py | rohittuli5/fras | ff0ab3e535daabcf5d3bedbab5bba8bbd6f4a170 | [
"MIT"
] | null | null | null | api_service/common.py | rohittuli5/fras | ff0ab3e535daabcf5d3bedbab5bba8bbd6f4a170 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Common functions that are used by other modules.
:Authors: Balwinder Sodhi
"""
import logging
import random
import string
from datetime import datetime as DT
from functools import wraps
from flask import (redirect, session, url_for, flash)
from models import db
from playhouse.shortcuts import *
TS_FORMAT = "%Y%m%d_%H%M%S"
def inject_user():
if "user" in session:
logging.info("Found user in session: {}".format(session["user"]))
return dict(user=session["user"])
else:
logging.info("User not found in session!")
return dict()
def auth_check(_func=None, *, roles=None):
def decor_auth(func):
@wraps(func)
def wrapper_auth(*args, **kwargs):
if "user" not in session:
msg = "Illegal access to operation. Login required."
logging.warning(msg)
flash(msg)
return redirect(url_for('login view'))
user_role = session["user"]["role"]
print("User role: {}".format(user_role))
if roles and (user_role not in roles):
msg = "You do not have required permissions to access."
logging.warning(msg)
flash(msg)
return redirect(url_for('login view'))
return func(*args, **kwargs)
return wrapper_auth
if _func is None:
return decor_auth
else:
return decor_auth(_func)
def filter_date_using_format(date_time, given_format=None):
if not given_format:
given_format = TS_FORMAT
if isinstance(date_time, str):
date_time = DT.strptime(date_time, given_format)
nat_dt = date_time.replace(tzinfo=None)
to_fmt = '%d-%m-%Y@%I:%M:%S %p'
return nat_dt.strftime(to_fmt)
def get_ts_str():
return DT.now().strftime(TS_FORMAT)
def random_str(size=10):
char_choices = string.ascii_uppercase + string.digits
random_char_list = random.choices(char_choices, k=size)
return ''.join(random_char_list)
def merge_form_to_model(mod, frm):
"""[summary]
Arguments:
mod {Model} -- peewee Model class instance
frm {dict} -- dict from JSON object instance
"""
# print("BEFORE: Form={0}. Model={1}".format(frm.to_dict(), model_to_dict(mod)))
update_model_from_dict(mod, frm, ignore_unknown=True)
# print("AFTER: Form={0}. Model={1}".format(frm.to_dict(), model_to_dict(mod)))
def db_connect():
db.connect()
def db_close(http_resp):
db.close()
return http_resp
| 26.309278 | 84 | 0.628918 |
acea63ce83e008af258809c54ee1a117eaa10d66 | 22,281 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_06_01/operations/_security_rules_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2019-08-23T21:14:00.000Z | 2021-09-07T18:32:34.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_06_01/operations/_security_rules_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_06_01/operations/_security_rules_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SecurityRulesOperations(object):
"""SecurityRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
"""Get the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "_models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(security_rule_parameters, 'SecurityRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "_models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.SecurityRule"]
"""Creates or updates a security rule in the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param security_rule_parameters: Parameters supplied to the create or update network security
rule operation.
:type security_rule_parameters: ~azure.mgmt.network.v2017_06_01.models.SecurityRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SecurityRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_06_01.models.SecurityRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
security_rule_parameters=security_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SecurityRuleListResult"]
"""Gets all security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_06_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules'} # type: ignore
| 50.638636 | 244 | 0.674027 |
acea65afbe4bd1fc12d8d727e392d81a9b9dfce2 | 509 | py | Python | blender/arm/logicnode/postprocess_get_ssao.py | ValtoGameEngines/Armory | ad3d3c63e64e9225e62b414b7ec4dd9fb93fab32 | [
"Zlib"
] | 1 | 2021-03-17T05:51:45.000Z | 2021-03-17T05:51:45.000Z | blender/arm/logicnode/postprocess_get_ssao.py | ValtoGameEngines/Armory | ad3d3c63e64e9225e62b414b7ec4dd9fb93fab32 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/postprocess_get_ssao.py | ValtoGameEngines/Armory | ad3d3c63e64e9225e62b414b7ec4dd9fb93fab32 | [
"Zlib"
] | 1 | 2020-06-29T07:54:21.000Z | 2020-06-29T07:54:21.000Z | import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class SSAOGetNode(Node, ArmLogicTreeNode):
'''Get SSAO Effect'''
bl_idname = 'LNSSAOGetNode'
bl_label = 'Get SSAO'
bl_icon = 'QUESTION'
def init(self, context):
self.outputs.new('NodeSocketFloat', 'Radius')
self.outputs.new('NodeSocketFloat', 'Strength')
self.outputs.new('NodeSocketFloat', 'Max Steps')
add_node(SSAOGetNode, category='Postprocess')
| 28.277778 | 56 | 0.699411 |
acea66486a5217d20835e1148ad2bf9cde1bf9db | 4,648 | py | Python | src/backend/partaj/core/api/referral_message.py | MTES-MCT/partaj | 0025c17a96d9212430d18ec36f6a2474c4609738 | [
"MIT"
] | 2 | 2020-10-15T11:28:26.000Z | 2021-06-25T15:24:33.000Z | src/backend/partaj/core/api/referral_message.py | MTES-MCT/partaj | 0025c17a96d9212430d18ec36f6a2474c4609738 | [
"MIT"
] | 7 | 2020-10-01T14:49:51.000Z | 2022-01-24T09:44:10.000Z | src/backend/partaj/core/api/referral_message.py | MTES-MCT/partaj | 0025c17a96d9212430d18ec36f6a2474c4609738 | [
"MIT"
] | 3 | 2020-03-18T15:53:26.000Z | 2021-09-16T14:39:27.000Z | """
Referral message related API endpoints.
"""
from rest_framework import viewsets
from rest_framework.response import Response
from .. import models
from ..email import Mailer
from ..forms import ReferralMessageForm
from ..serializers import ReferralMessageSerializer
from . import permissions
class ReferralMessageViewSet(viewsets.ModelViewSet):
"""
API endpoints for referral messages.
"""
permission_classes = [permissions.NotAllowed]
queryset = models.ReferralMessage.objects.all()
serializer_class = ReferralMessageSerializer
def get_permissions(self):
"""
Manage permissions for default methods separately, delegating to @action defined
permissions for other actions.
"""
if self.action in ["create", "list"]:
permission_classes = [
permissions.IsRequestReferralLinkedUser
| permissions.IsRequestReferralLinkedUnitMember
]
elif self.action in ["retrieve"]:
permission_classes = [
permissions.IsLinkedReferralLinkedUser
| permissions.IsLinkedReferralLinkedUnitMember
]
else:
try:
permission_classes = getattr(self, self.action).kwargs.get(
"permission_classes"
)
except AttributeError:
permission_classes = self.permission_classes
return [permission() for permission in permission_classes]
def create(self, request, *args, **kwargs):
"""
Create a new referral message as the client issues a POST on the referralmessages endpoint.
"""
try:
referral = models.Referral.objects.get(id=request.data.get("referral"))
except models.Referral.DoesNotExist:
return Response(
status=400,
data={
"errors": [
f"Referral f{request.data.get('referral')} does not exist."
]
},
)
form = ReferralMessageForm(
{
"content": request.data.get("content") or "",
"referral": referral,
"user": request.user,
},
request.FILES,
)
if not form.is_valid():
return Response(status=400, data=form.errors)
# Create the referral message from incoming data, and attachment instances for the files
referral_message = form.save()
files = request.FILES.getlist("files")
for file in files:
referral_message_attachment = models.ReferralMessageAttachment(
file=file, referral_message=referral_message
)
referral_message_attachment.save()
# Define all users who need to receive emails for this referral
targets = [referral.user]
if referral.assignees.count() > 0:
targets = targets + list(referral.assignees.all())
else:
for unit in referral.units.all():
targets = targets + [
membership.user
for membership in unit.get_memberships().filter(
role=models.UnitMembershipRole.OWNER
)
]
# The user who sent the message should not receive an email
targets = [target for target in targets if target != referral_message.user]
# Iterate over targets
for target in targets:
if target == referral.user:
Mailer.send_new_message_for_requester(referral, referral_message)
else:
Mailer.send_new_message_for_unit_member(
target, referral, referral_message
)
return Response(
status=201, data=ReferralMessageSerializer(referral_message).data
)
def list(self, request, *args, **kwargs):
"""
Return a list of referral messages. The list is always filtered by referral as there's
no point in shuffling together messages that belong to different referrals.
"""
queryset = self.get_queryset().filter(
referral__id=request.query_params.get("referral")
)
page = self.paginate_queryset(queryset.order_by("created_at"))
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset.order_by("created_at"), many=True)
return Response(serializer.data)
| 35.212121 | 99 | 0.598967 |
acea666c768d04f6781784ad88861f66a72ff654 | 721 | py | Python | weasyl/useralias.py | akash143143/weasyl | be42a2313e657e97c4a48432379e37b6a3d4a4af | [
"Apache-2.0"
] | null | null | null | weasyl/useralias.py | akash143143/weasyl | be42a2313e657e97c4a48432379e37b6a3d4a4af | [
"Apache-2.0"
] | null | null | null | weasyl/useralias.py | akash143143/weasyl | be42a2313e657e97c4a48432379e37b6a3d4a4af | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from weasyl import define as d
from weasyl import login
from weasyl.error import WeasylError
def select(userid):
return d.engine.scalar("SELECT alias_name FROM useralias WHERE userid = %(user)s AND settings ~ 'p'", user=userid)
def set(userid, username):
if username and login.username_exists(username):
raise WeasylError("usernameExists")
elif not d.get_premium(userid):
raise WeasylError("InsufficientPermissions")
d.engine.execute("DELETE FROM useralias WHERE userid = %(user)s AND settings ~ 'p'", user=userid)
if username:
d.engine.execute("INSERT INTO useralias VALUES (%(user)s, %(name)s, 'p')", user=userid, name=username)
| 32.772727 | 118 | 0.723994 |
acea674fa9154ed93f146d34cdd29eea16a8dd82 | 6,939 | py | Python | blue_devices_screen/devices.py | ShareASmile/car-locator | 765d26ad414ab86e4d93bc5338868769e8b3e90f | [
"MIT"
] | 21 | 2020-09-08T21:03:25.000Z | 2022-02-15T07:08:04.000Z | blue_devices_screen/devices.py | ShareASmile/car-locator | 765d26ad414ab86e4d93bc5338868769e8b3e90f | [
"MIT"
] | 3 | 2021-04-13T09:40:20.000Z | 2021-05-28T20:53:07.000Z | blue_devices_screen/devices.py | ShareASmile/car-locator | 765d26ad414ab86e4d93bc5338868769e8b3e90f | [
"MIT"
] | 9 | 2020-12-11T09:01:42.000Z | 2022-03-28T00:55:59.000Z | from random import choice
from kivymd.uix.screen import MDScreen
from kivymd.uix.list import OneLineListItem
from kivymd.uix.list import MDList
from kivymd.uix.toolbar import MDToolbar
from kivymd.uix.button import MDFloatingActionButton
from kivymd.app import MDApp
from kivy.utils import platform, get_hex_from_color
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.scrollview import ScrollView
from kivy.clock import Clock
from kivy.animation import Animation
from kivy.metrics import dp
from constants.colors import DECORATION_COLORS
if platform == 'android':
from jnius import autoclass
from android_toast.toast import android_toast
BluetoothAdapter = autoclass('android.bluetooth.BluetoothAdapter')
BluetoothDevice = autoclass('android.bluetooth.BluetoothDevice')
Intent = autoclass('android.content.Intent')
mActivity = autoclass('org.kivy.android.PythonActivity').mActivity
ContextCompat = autoclass('androidx.core.content.ContextCompat')
Manifest = autoclass('android.Manifest$permission')
PackageManager = autoclass('android.content.pm.PackageManager')
class BlueDevicesScreen(MDScreen):
def __init__(self, **kw):
self.devices = []
self.list_of_devices = None
self.toolbar = None
self.choosen_color = None
self.app = MDApp.get_running_app()
self.number_of_devices = 0
self.count_animations = 0
if platform == 'android':
self.bluetoothAdapter = BluetoothAdapter.getDefaultAdapter()
if not self.bluetoothAdapter:
android_toast("This device doesn't support bluetooth", True)
else:
self.bluetoothAdapter = None
Clock.schedule_once(self.post_init, 0)
super().__init__(**kw)
def post_init(self, dt):
scroll = ScrollView(always_overscroll=False)
self.list_of_devices = MDList()
scroll.add_widget(self.list_of_devices)
box = BoxLayout()
box.add_widget(scroll)
self.refresh_btn = MDFloatingActionButton(
icon='refresh',
pos_hint={'center_x': .5, 'center_y': .5},
md_bg_color=self.app.theme_cls.primary_color,
opacity=0
)
self.refresh_btn.bind(on_release=self.get_bluetooth_devices)
btn_layout = FloatLayout(size_hint_y=None, height=dp(100))
btn_layout.add_widget(self.refresh_btn)
self.container = BoxLayout(orientation='vertical')
self.toolbar = MDToolbar(pos_hint={'top': 1})
self.toolbar.left_action_items = [
'chevron-left', lambda x: self.switch_screen()
],
self.toolbar.right_action_items = [
'bluetooth-off', lambda x: self.clear_device()
],
self.container.add_widget(self.toolbar)
self.container.add_widget(box)
self.container.add_widget(btn_layout)
self.add_widget(self.container)
def enable_bluetooth(self):
enableAdapter = Intent(BluetoothAdapter.ACTION_REQUEST_ENABLE)
mActivity.startActivityForResult(enableAdapter, 0)
def post_background_permissions(self):
if self.bluetoothAdapter:
if not self.bluetoothAdapter.isEnabled():
self.enable_bluetooth()
return
self.get_bluetooth_devices()
Clock.schedule_once(self.animate_button_colors, 0)
def on_enter(self, *args):
if platform == 'android':
granted = self.app.check_background()
if granted:
self.post_background_permissions()
else:
self.app.root.ids.sm.current = 'scr 1'
return super().on_enter(*args)
def on_leave(self, *args):
self.devices = []
self.number_of_devices = 0
self.list_of_devices.clear_widgets()
Clock.schedule_once(self.app.save_theme, 0)
Animation.cancel_all(self.refresh_btn)
Animation.cancel_all(self.toolbar)
self.toolbar.md_bg_color = self.app.theme_cls.primary_color
self.refresh_btn.md_bg_color = self.app.theme_cls.primary_color
return super().on_leave(*args)
def on_pre_enter(self, *args):
Animation(opacity=1, d=1.5).start(self.refresh_btn)
return super().on_pre_enter(*args)
def on_pre_leave(self, *args):
self.app.set_decorations()
self.refresh_btn.opacity = 0
return super().on_pre_leave(*args)
def get_bluetooth_devices(self, *_):
if self.bluetoothAdapter:
if self.bluetoothAdapter.isEnabled():
results = self.bluetoothAdapter.getBondedDevices()
self.devices = results.toArray()
self.list_of_devices.clear_widgets()
for device in self.devices:
name = OneLineListItem(
text=device.getName(), opacity=0
)
name.bind(on_release=self.save_device_name)
self.list_of_devices.add_widget(name)
self.count_animations = len(self.list_of_devices.children)
Clock.schedule_once(self.animate_items_opacity, 0)
else:
self.enable_bluetooth()
def save_device_name(self, widget):
self.app.paired_car = widget.text
self.app.root.ids.content_drawer\
.ids.md_list.children[0].text = widget.text
android_toast(f'Listening for {widget.text}', True)
def switch_screen(self):
self.app.root.ids.sm.current = 'scr 1'
def clear_device(self):
self.app.paired_car = ''
self.app.root.ids.content_drawer\
.ids.md_list.children[0].text = 'Choose car'
def change_decorations(self, *_):
if platform == 'android':
statusbar = get_hex_from_color(self.choosen_color[:-1])
navbar = get_hex_from_color(self.choosen_color[:-1])
self.app.statusbar(statusbar, navbar)
def animate_button_colors(self, *_):
self.choosen_color = choice(DECORATION_COLORS)
a = Animation(md_bg_color=self.choosen_color, d=.3)
b = Animation(md_bg_color=self.choosen_color, d=.3)
a.bind(on_start=self.change_decorations)
a.start(self.refresh_btn)
b.start(self.toolbar)
def animate_items_opacity(self, *_):
try:
a = Animation(opacity=1, d=.25, t='out_bounce')
a.bind(on_complete=self.decrease_children)
a.start(self.list_of_devices.children[self.count_animations-1])
except IndexError as e:
print(e)
for child in self.list_of_devices.children:
child.opacity = 1
def decrease_children(self, *_):
self.count_animations -= 1
if self.count_animations < 0:
self.count_animations = 0
return
self.animate_items_opacity()
| 35.22335 | 76 | 0.649517 |
acea67ddbe278f465293e1045467406a5fb18f5a | 1,957 | py | Python | tools/test_selfgcn.py | lxc86739795/vehicle_reid_by_parsing | a96496e11124d47d08a478696e0d3deb1e9b0c1a | [
"Apache-2.0"
] | 36 | 2020-11-20T05:40:14.000Z | 2022-02-18T10:15:23.000Z | tools/test_selfgcn.py | lxc86739795/vehicle_reid_by_parsing | a96496e11124d47d08a478696e0d3deb1e9b0c1a | [
"Apache-2.0"
] | 3 | 2021-06-25T07:51:12.000Z | 2021-12-05T09:44:26.000Z | tools/test_selfgcn.py | lxc86739795/vehicle_reid_by_parsing | a96496e11124d47d08a478696e0d3deb1e9b0c1a | [
"Apache-2.0"
] | 5 | 2020-12-14T02:19:11.000Z | 2022-02-18T10:15:47.000Z | # encoding: utf-8
"""
@author: l1aoxingyu
@contact: sherlockliao01@gmail.com
"""
import argparse
import os
import sys
import torch
from torch.backends import cudnn
from torch import nn
# Changed by Xinchen Liu
sys.path.append('.')
from config import cfg
from data import get_test_dataloader_mask
from engine.inference_selfgcn import inference
from modeling import build_model_selfgcn
from utils.logger import setup_logger
def main():
parser = argparse.ArgumentParser(description="ReID Baseline Inference")
parser.add_argument('-cfg',
"--config_file", default="", help="path to config file", type=str
)
parser.add_argument("opts", help="Modify config options using the command-line", default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
gpus = os.environ["CUDA_VISIBLE_DEVICES"] if "CUDA_VISIBLE_DEVICES" in os.environ else '0'
gpus = [int(i) for i in gpus.split(',')]
num_gpus = len(gpus)
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# set pretrian = False to avoid loading weight repeatedly
cfg.MODEL.PRETRAIN = False
cfg.freeze()
logger = setup_logger("reid_baseline", False, 0)
logger.info("Using {} GPUS".format(num_gpus))
logger.info(args)
if args.config_file != "":
logger.info("Loaded configuration file {}".format(args.config_file))
logger.info("Running with config:\n{}".format(cfg))
cudnn.benchmark = True
model = build_model_selfgcn(cfg, 0)
model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT))
if num_gpus > 1:
model = nn.DataParallel(model)
model = model.cuda()
print('prepare test set ...')
test_dataloader_collection, num_query_collection, _ = get_test_dataloader_mask(cfg)
inference(cfg, model, test_dataloader_collection, num_query_collection, use_mask=True)
if __name__ == '__main__':
main()
| 27.56338 | 98 | 0.703628 |
acea68cb4a3baaebd16e24dd879bd000e81184b7 | 6,148 | py | Python | exercise-10-advanced-sessions/server.py | agdonovan98/web-programming | 210cc49943630364a2a5b1363c658a264966f807 | [
"Unlicense"
] | null | null | null | exercise-10-advanced-sessions/server.py | agdonovan98/web-programming | 210cc49943630364a2a5b1363c658a264966f807 | [
"Unlicense"
] | null | null | null | exercise-10-advanced-sessions/server.py | agdonovan98/web-programming | 210cc49943630364a2a5b1363c658a264966f807 | [
"Unlicense"
] | null | null | null | from bottle import route, get, post
from bottle import run, debug
from bottle import request, response, redirect, template
from bottle import static_file
import dataset
import json
from bottle import default_app
import random
import string
# http://localhost:8080/
def write(key, data):
assert type(data) is dict
with open(f"data/session.{key}.json", "w") as f:
json.dump(data,f)
return
def read(key):
with open(f"data/session.{key}.json", "r") as f:
data = json.load(f)
assert type(data) is dict
return data
def new_session_id():
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=32))
def get_session(request):
def new_session():
session_id = new_session_id()
print("new session id = ", session_id)
session = {
"session_id" : session_id,
"username" : ''
}
return session
session_id = request.get_cookie("session_id", default=None)
if session_id == None:
session = new_session()
else:
try:
session = read(session_id)
except:
session = new_session()
print("loaded session = ", [session])
return session
def save_session(response, session):
# session = [{'session_id': 'khlrry5slw5d5fmlawvrfz8oqz7tl8ue', 'username': ''}]
write(session['session_id'], session)
print("saved session = ",[session])
response.set_cookie("session_id", session['session_id'], path="/") #, secret='some-secret-key')
@route("/table")
def get_table():
table = [
{'a':1, 'b':'alpha'},
{'a':2, 'b':'beta'},
]
return template('table',table=table)
@route("/request")
def get_request():
environ = request.environ
table = [ {"key":key,"value":environ[key]} for key in environ.keys()]
return template('table',table=table)
@get("/login")
def get_login():
return template("login")
@post("/login")
def post_login():
session = get_session(request)
username = request.forms.get('username')
session['username'] = username
save_session(response, session)
return redirect('/')
@get("/logout")
def get_logout():
session = get_session(request)
session['username'] = ''
save_session(response, session)
return redirect('/')
@route('/restricted')
def restricted_area():
username = request.get_cookie("username") #, secret='some-secret-key')
if username:
return template("Hello {{name}}. Welcome back.", name=username)
else:
return "You are not logged in. Access denied."
@route("/")
def get_todo_list():
session = get_session(request)
print("session = ", [session])
username = session['username']
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
items = todo_table.find()
items = [ dict(x) for x in list(items) if x['user'] == username ]
tpl = template("todo_list", items=items, message="Logged in as " + username)
save_session(response, session)
return tpl
@route("/data")
def get_data():
pets = [
{
"name": "Dorothy",
"kind": "dog",
},
{
"name": "Squeakers",
"kind": "guinea pig",
},
{
"name": "Sandy",
"kind": "cat",
}
]
response.content_type = 'application/json'
return json.dumps({"pets":pets})
@route("/static/png/<filename:re:.*\.png>")
@route("/image/<filename:re:.*\.png>")
def get_image(filename):
return static_file(filename=filename, root="static/images", mimetype="image/png")
@route("/static/<filename:path>")
def get_static(filename):
return static_file(filename=filename, root="static")
@route("/show")
def get_show():
return template("show")
@route('/counter')
def get_counter():
count = int(request.get_cookie("count", default='0', secret="Elephant12"))
count = count + 1
response.set_cookie("count", str(count), secret="Elephant12")
return template("counter", count=count)
@route("/delete/<id>")
def get_delete(id):
id = int(id)
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
print(f"We need to delete id# {id}...")
todo_table.delete(id=id)
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return template("deleted", id=id)
@get("/insert")
def get_insert():
global message
message = "A task was added"
return template("insert")
@post("/insert")
def post_insert():
task = request.forms.get('task')
print("task=", task)
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
todo_table.insert({
'task' : task.strip(),
'done' : 0
})
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return redirect('/')
@get("/edit/<id>")
def get_edit(id):
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
items = list(todo_table.find(id=id))
if len(items) != 1:
response.status="404 Not Found:"+str(id)
return
items = [ dict(x) for x in items ]
print(items)
print(items[0])
except Exception as e:
print(e)
response.status="409 Bad Request:"+str(e)
return
return template("edit", item=items[0]) # put something here
@post("/edit")
def post_edit():
id = request.forms.get('id')
id = int(id)
task = request.forms.get('task')
print("task=", task)
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
todo_table.update({
'id' : id,
'task' : task.strip(),
}, ['id'])
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return redirect('/')
if __name__ == "__main__":
debug(True)
run(host="localhost", port=8080)
else:
application = default_app()
| 27.0837 | 99 | 0.609304 |
acea6a35c4bbc15542303267343dbfd78b7c0f44 | 3,994 | py | Python | benchmark/startQiskit_Class2331.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_Class2331.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_Class2331.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=29
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=14
prog.x(input_qubit[3]) # number=15
prog.rx(1.8001325905069514,input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=16
prog.h(input_qubit[1]) # number=22
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.x(input_qubit[3]) # number=24
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.x(input_qubit[1]) # number=25
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.z(input_qubit[1]) # number=27
prog.cx(input_qubit[1],input_qubit[0]) # number=28
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.x(input_qubit[1]) # number=17
prog.cx(input_qubit[2],input_qubit[0]) # number=11
prog.y(input_qubit[0]) # number=12
prog.y(input_qubit[0]) # number=13
prog.cx(input_qubit[2],input_qubit[1]) # number=23
prog.x(input_qubit[0]) # number=19
prog.x(input_qubit[0]) # number=20
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2331.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.136752 | 140 | 0.64697 |
acea6aacb3db615fb9dd9d2a4eca0c15f1d8068a | 538 | py | Python | emplog/manage.py | tsitsiflora/newapi | 2f1c85b6b529c246fa1c890303f40b7308177d73 | [
"Apache-2.0"
] | null | null | null | emplog/manage.py | tsitsiflora/newapi | 2f1c85b6b529c246fa1c890303f40b7308177d73 | [
"Apache-2.0"
] | null | null | null | emplog/manage.py | tsitsiflora/newapi | 2f1c85b6b529c246fa1c890303f40b7308177d73 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'emplog.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.625 | 73 | 0.685874 |
acea6ca8d24590d53b24d1efa8e1505907938cb1 | 12,384 | py | Python | OpenAttack2/victim/classifiers/transformers.py | NingNing-C/OpenAttack | b49c7907c791d1a95acb222560b03884833b2745 | [
"MIT"
] | null | null | null | OpenAttack2/victim/classifiers/transformers.py | NingNing-C/OpenAttack | b49c7907c791d1a95acb222560b03884833b2745 | [
"MIT"
] | null | null | null | OpenAttack2/victim/classifiers/transformers.py | NingNing-C/OpenAttack | b49c7907c791d1a95acb222560b03884833b2745 | [
"MIT"
] | null | null | null | import numpy as np
from .base import Classifier
from ...utils import language_by_name, HookCloser
from ...text_process.tokenizer import TransformersTokenizer,ProteinTokenizer
from ...attack_assist.word_embedding import WordEmbedding
import transformers
import torch
class TransformersClassifier(Classifier):
@property
def TAGS(self):
if self.__lang_tag is None:
return super().TAGS
return super().TAGS.union({ self.__lang_tag })
def __init__(self,
model : transformers.PreTrainedModel,
tokenizer : transformers.PreTrainedTokenizer,
embedding_layer,
device : torch.device = None,
max_length : int = 128,
batch_size : int = 8,
lang = None
):
"""
Args:
model: Huggingface model for classification.
tokenizer: Huggingface tokenizer for classification. **Default:** None
embedding_layer: The module of embedding_layer used in transformers models. For example, ``BertModel.bert.embeddings.word_embeddings``. **Default:** None
device: Device of pytorch model. **Default:** "cpu" if cuda is not available else "cuda"
max_len: Max length of input tokens. If input token list is too long, it will be truncated. Uses None for no truncation. **Default:** None
batch_size: Max batch size of this classifier.
lang: Language of this classifier. If is `None` then `TransformersClassifier` will intelligently select the language based on other parameters.
"""
self.model = model
if lang is not None:
self.__lang_tag = language_by_name(lang)
else:
self.__lang_tag = None
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.to(device)
self.curr_embedding = None
self.hook = embedding_layer.register_forward_hook( HookCloser(self) )
self.embedding_layer = embedding_layer
self.word2id = dict()
for i in range(tokenizer.vocab_size):
self.word2id[tokenizer.convert_ids_to_tokens(i)] = i
self.__tokenizer = tokenizer
self.embedding = embedding_layer.weight.detach().cpu().numpy()
self.token_unk = tokenizer.unk_token
self.token_unk_id = tokenizer.unk_token_id
self.max_length = max_length
self.batch_size = batch_size
@property
def tokenizer(self):
return TransformersTokenizer(self.__tokenizer, self.__lang_tag)
def to(self, device : torch.device):
"""
Args:
device: Device that moves model to.
"""
self.device = device
self.model = self.model.to(device)
return self
def get_pred(self, input_):
return self.get_prob(input_).argmax(axis=1)
def get_prob(self, input_):
return self.get_grad([
self.__tokenizer.tokenize(sent) for sent in input_
], [0] * len(input_))[0]
def get_grad(self, input_, labels):
v = self.predict(input_, labels)
return v[0], v[1]
def predict(self, sen_list, labels=None):
sen_list = [
sen[:self.max_length - 2] for sen in sen_list
]
sent_lens = [ len(sen) for sen in sen_list ]
batch_len = max(sent_lens) + 2
attentions = np.array([
[1] * (len(sen) + 2) + [0] * (batch_len - 2 - len(sen))
for sen in sen_list
], dtype='int64')
sen_list = [
self.__tokenizer.convert_tokens_to_ids(sen)
for sen in sen_list
]
tokeinzed_sen = np.array([
[self.__tokenizer.cls_token_id] + sen + [self.__tokenizer.sep_token_id] + ([self.__tokenizer.pad_token_id] * (batch_len - 2 - len(sen)))
for sen in sen_list
], dtype='int64')
result = None
result_grad = None
all_hidden_states = None
if labels is None:
labels = [0] * len(sen_list)
labels = torch.LongTensor(labels).to(self.device)
for i in range( (len(sen_list) + self.batch_size - 1) // self.batch_size):
curr_sen = tokeinzed_sen[ i * self.batch_size: (i + 1) * self.batch_size ]
curr_mask = attentions[ i * self.batch_size: (i + 1) * self.batch_size ]
xs = torch.from_numpy(curr_sen).long().to(self.device)
masks = torch.from_numpy(curr_mask).long().to(self.device)
outputs = self.model(input_ids = xs,attention_mask = masks, output_hidden_states=True, labels=labels[ i * self.batch_size: (i + 1) * self.batch_size ])
if i == 0:
all_hidden_states = outputs.hidden_states[-1].detach().cpu()
loss = outputs.loss
logits = outputs.logits
logits = torch.nn.functional.softmax(logits,dim=-1)
loss = - loss
loss.backward()
result_grad = self.curr_embedding.grad.clone().cpu()
self.curr_embedding.grad.zero_()
self.curr_embedding = None
result = logits.detach().cpu()
else:
all_hidden_states = torch.cat((all_hidden_states, outputs.hidden_states[-1].detach().cpu()), dim=0)
loss = outputs.loss
logits = outputs.logits
logits = torch.nn.functional.softmax(logits,dim=-1)
loss = - loss
loss.backward()
result_grad = torch.cat((result_grad, self.curr_embedding.grad.clone().cpu()), dim=0)
self.curr_embedding.grad.zero_()
self.curr_embedding = None
result = torch.cat((result, logits.detach().cpu()))
result = result.numpy()
all_hidden_states = all_hidden_states.numpy()
result_grad = result_grad.numpy()[:, 1:-1]
return result, result_grad, all_hidden_states
def get_hidden_states(self, input_, labels=None):
"""
:param list input_: A list of sentences of which we want to get the hidden states in the model.
:rtype torch.tensor
"""
return self.predict(input_, labels)[2]
def get_embedding(self):
return WordEmbedding(self.word2id, self.embedding)
class ProteinClassifier(Classifier):
@property
def TAGS(self):
if self.__lang_tag is None:
return super().TAGS
return super().TAGS.union({ self.__lang_tag })
def __init__(self,
model : transformers.PreTrainedModel,
tokenizer : transformers.PreTrainedTokenizer,
embedding_layer,
device : torch.device = None,
max_length : int = 128,
batch_size : int = 8,
lang = None
):
"""
Args:
model: Huggingface model for classification.
tokenizer: Huggingface tokenizer for classification. **Default:** None
embedding_layer: The module of embedding_layer used in transformers models. For example, ``BertModel.bert.embeddings.word_embeddings``. **Default:** None
device: Device of pytorch model. **Default:** "cpu" if cuda is not available else "cuda"
max_len: Max length of input tokens. If input token list is too long, it will be truncated. Uses None for no truncation. **Default:** None
batch_size: Max batch size of this classifier.
lang: Language of this classifier. If is `None` then `TransformersClassifier` will intelligently select the language based on other parameters.
"""
self.model = model
if lang is not None:
self.__lang_tag = language_by_name(lang)
else:
self.__lang_tag = None
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.to(device)
self.curr_embedding = None
self.hook = embedding_layer.register_forward_hook( HookCloser(self) )
self.embedding_layer = embedding_layer
self.word2id = tokenizer.tok_to_idx
self.__tokenizer = tokenizer
self.embedding = embedding_layer.weight.detach().cpu().numpy()
self.token_unk = tokenizer.unk_token
self.token_unk_id = tokenizer.unk_token_id
self.max_length = max_length
self.batch_size = batch_size
@property
def tokenizer(self):
return ProteinTokenizer()
def to(self, device : torch.device):
"""
Args:
device: Device that moves model to.
"""
self.device = device
self.model = self.model.to(device)
return self
def get_pred(self, input_):
return self.get_prob(input_).argmax(axis=1)
def get_prob(self, input_):
return self.get_grad([
self.__tokenizer.tokenize(sent) for sent in input_
], [0] * len(input_))[0]
def get_grad(self, input_, labels):
v = self.predict(input_, labels)
return v[0], v[1]
def predict(self, sen_list, labels=None):
sen_list = [
sen[:self.max_length - 2] for sen in sen_list
]
sent_lens = [ len(sen) for sen in sen_list ]
batch_len = max(sent_lens) + 2
attentions = np.array([
[1] * (len(sen) + 2) + [0] * (batch_len - 2 - len(sen))
for sen in sen_list
], dtype='int64')
sen_list = [
self.__tokenizer.convert_tokens_to_ids(sen)
for sen in sen_list
]
tokeinzed_sen = np.array([
[self.__tokenizer.cls_token_id] + sen + [self.__tokenizer.sep_token_id] + ([self.__tokenizer.pad_token_id] * (batch_len - 2 - len(sen)))
for sen in sen_list
], dtype='int64')
result = None
result_grad = None
all_hidden_states = None
if labels is None:
labels = [0] * len(sen_list)
labels = torch.LongTensor(labels).to(self.device)
for i in range( (len(sen_list) + self.batch_size - 1) // self.batch_size):
curr_sen = tokeinzed_sen[ i * self.batch_size: (i + 1) * self.batch_size ]
curr_mask = attentions[ i * self.batch_size: (i + 1) * self.batch_size ]
xs = torch.from_numpy(curr_sen).long().to(self.device)
masks = torch.from_numpy(curr_mask).long().to(self.device)
outputs = self.model(input_ids = xs,attention_mask = masks, output_hidden_states=True, labels=labels[ i * self.batch_size: (i + 1) * self.batch_size ])
if i == 0:
all_hidden_states = outputs.hidden_states[-1].detach().cpu()
loss = outputs.loss
logits = outputs.logits
logits = torch.nn.functional.softmax(logits,dim=-1)
loss = - loss
loss.backward()
result_grad = self.curr_embedding.grad.clone().cpu()
self.curr_embedding.grad.zero_()
self.curr_embedding = None
result = logits.detach().cpu()
else:
all_hidden_states = torch.cat((all_hidden_states, outputs.hidden_states[-1].detach().cpu()), dim=0)
loss = outputs.loss
logits = outputs.logits
logits = torch.nn.functional.softmax(logits,dim=-1)
loss = - loss
loss.backward()
result_grad = torch.cat((result_grad, self.curr_embedding.grad.clone().cpu()), dim=0)
self.curr_embedding.grad.zero_()
self.curr_embedding = None
result = torch.cat((result, logits.detach().cpu()))
result = result.numpy()
all_hidden_states = all_hidden_states.numpy()
result_grad = result_grad.numpy()[:, 1:-1]
return result, result_grad, all_hidden_states
def get_hidden_states(self, input_, labels=None):
"""
:param list input_: A list of sentences of which we want to get the hidden states in the model.
:rtype torch.tensor
"""
return self.predict(input_, labels)[2]
def get_embedding(self):
return WordEmbedding(self.word2id, self.embedding)
| 37.98773 | 165 | 0.589793 |
acea6cac697c8f7cefb6879e5aeab092eb9c0805 | 284 | py | Python | Chapter02/HelloOpenCVPy/HelloOpenCVPy.py | PacktPublishing/Hands-On-Algorithms-for-Computer-Vision | 204c69a357f42ac9b7ac641df697cf52ced96416 | [
"MIT"
] | 36 | 2018-06-03T13:53:48.000Z | 2022-03-15T13:52:47.000Z | Chapter02/HelloOpenCVPy/HelloOpenCVPy.py | PacktPublishing/Hands-On-Algorithms-for-Computer-Vision | 204c69a357f42ac9b7ac641df697cf52ced96416 | [
"MIT"
] | null | null | null | Chapter02/HelloOpenCVPy/HelloOpenCVPy.py | PacktPublishing/Hands-On-Algorithms-for-Computer-Vision | 204c69a357f42ac9b7ac641df697cf52ced96416 | [
"MIT"
] | 21 | 2018-08-07T11:04:05.000Z | 2021-04-15T12:51:41.000Z | # To be able to omit cv2 in code, use the following import statement (not recommended)
#from cv2 import *
# Otherwise use the following
import cv2
image = cv2.imread("Test.png")
if image is not None :
cv2.imshow("image", image)
cv2.waitKey()
else:
print("Empty image!") | 21.846154 | 86 | 0.697183 |
acea6d66ff931c861f00bb58e690214586e6b26c | 703 | py | Python | src/opera/operations.py | sstanovnik/xopera-opera | 06031d37268913c6ba6dbc30ec6b4acb3a17dc5a | [
"Apache-2.0"
] | null | null | null | src/opera/operations.py | sstanovnik/xopera-opera | 06031d37268913c6ba6dbc30ec6b4acb3a17dc5a | [
"Apache-2.0"
] | null | null | null | src/opera/operations.py | sstanovnik/xopera-opera | 06031d37268913c6ba6dbc30ec6b4acb3a17dc5a | [
"Apache-2.0"
] | null | null | null | from opera import ansible
class Operation(object):
def __init__(self, instance, implementation, inputs):
self.instance = instance
self.implementation = implementation
self.inputs = inputs
def run(self):
host = self.instance.get_host()
# print("HOST: {}".format(host))
if not self.implementation:
return True, {}
print(" Executing {} ...".format(self.implementation))
evaled_inputs = {
k: v.eval(self.instance) for k, v in self.inputs.items()
}
status, attrs = ansible.run(
host, self.implementation.primary.data, evaled_inputs,
)
return status == 0, attrs
| 29.291667 | 68 | 0.590327 |
acea6e564672ee67e0d69d902f6a51fba221870a | 11,526 | py | Python | tests/test_global_minimize_marker_velocity.py | lvayssac/bioptim | 526abff72a8a1b2cb84ccc40c6067b7a18f537e3 | [
"MIT"
] | null | null | null | tests/test_global_minimize_marker_velocity.py | lvayssac/bioptim | 526abff72a8a1b2cb84ccc40c6067b7a18f537e3 | [
"MIT"
] | null | null | null | tests/test_global_minimize_marker_velocity.py | lvayssac/bioptim | 526abff72a8a1b2cb84ccc40c6067b7a18f537e3 | [
"MIT"
] | null | null | null | """
Test for file IO
"""
import pytest
import numpy as np
import biorbd_casadi as biorbd
from bioptim import (
OptimalControlProgram,
DynamicsList,
DynamicsFcn,
ObjectiveList,
ObjectiveFcn,
BoundsList,
QAndQDotBounds,
InitialGuessList,
ControlType,
OdeSolver,
Node,
)
from .utils import TestUtils
def prepare_ocp(
biorbd_model_path: str,
final_time: float,
n_shooting: int,
marker_velocity_or_displacement: str,
marker_in_first_coordinates_system: bool,
control_type: ControlType,
ode_solver: OdeSolver = OdeSolver.RK4(),
) -> OptimalControlProgram:
"""
Prepare an ocp that targets some marker velocities, either by finite differences or by jacobian
Parameters
----------
biorbd_model_path: str
The path to the bioMod file
final_time: float
The time of the final node
n_shooting: int
The number of shooting points
marker_velocity_or_displacement: str
which type of tracking: finite difference ('disp') or by jacobian ('velo')
marker_in_first_coordinates_system: bool
If the marker to track should be expressed in the global or local reference frame
control_type: ControlType
The type of controls
ode_solver: OdeSolver
The ode solver to use
Returns
-------
The OptimalControlProgram ready to be solved
"""
biorbd_model = biorbd.Model(biorbd_model_path)
# Add objective functions
if marker_in_first_coordinates_system:
# Marker should follow this segment (0 velocity when compare to this one)
coordinates_system_idx = 0
else:
# Marker should be static in global reference frame
coordinates_system_idx = None
objective_functions = ObjectiveList()
if marker_velocity_or_displacement == "disp":
objective_functions.add(
ObjectiveFcn.Lagrange.MINIMIZE_MARKERS,
derivative=True,
reference_jcs=coordinates_system_idx,
marker_index=6,
weight=1000,
)
elif marker_velocity_or_displacement == "velo":
objective_functions.add(
ObjectiveFcn.Lagrange.MINIMIZE_MARKERS_VELOCITY, node=Node.ALL, marker_index=6, weight=1000
)
else:
raise RuntimeError(
f"Wrong choice of marker_velocity_or_displacement, actual value is "
f"{marker_velocity_or_displacement}, should be 'velo' or 'disp'."
)
# Make sure the segments actually moves (in order to test the relative speed objective)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_STATE, key="qdot", node=Node.ALL, index=[2, 3], weight=-1)
# Dynamics
dynamics = DynamicsList()
expand = False if isinstance(ode_solver, OdeSolver.IRK) else True
dynamics.add(DynamicsFcn.TORQUE_DRIVEN, expand=expand)
# Path constraint
nq = biorbd_model.nbQ()
x_bounds = BoundsList()
x_bounds.add(bounds=QAndQDotBounds(biorbd_model))
x_bounds[0].min[nq:, :] = -10
x_bounds[0].max[nq:, :] = 10
# Initial guess
x_init = InitialGuessList()
x_init.add([1.5, 1.5, 0.0, 0.0, 0.7, 0.7, 0.6, 0.6])
# Define control path constraint
tau_min, tau_max, tau_init = -100, 100, 0
u_bounds = BoundsList()
u_bounds.add([tau_min] * biorbd_model.nbGeneralizedTorque(), [tau_max] * biorbd_model.nbGeneralizedTorque())
u_init = InitialGuessList()
u_init.add([tau_init] * biorbd_model.nbGeneralizedTorque())
return OptimalControlProgram(
biorbd_model,
dynamics,
n_shooting,
final_time,
x_init,
u_init,
x_bounds,
u_bounds,
objective_functions,
control_type=control_type,
ode_solver=ode_solver,
)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4, OdeSolver.RK8, OdeSolver.IRK])
def test_track_and_minimize_marker_displacement_global(ode_solver):
# Load track_and_minimize_marker_velocity
ode_solver = ode_solver()
ocp = prepare_ocp(
biorbd_model_path=TestUtils.bioptim_folder() + "/examples/track/cube_and_line.bioMod",
n_shooting=5,
final_time=1,
marker_velocity_or_displacement="disp",
marker_in_first_coordinates_system=False,
control_type=ControlType.CONSTANT,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], -143.5854887928483)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (40, 1))
np.testing.assert_almost_equal(g, np.zeros((40, 1)))
# Check some of the results
q, qdot, tau = sol.states["q"], sol.states["qdot"], sol.controls["tau"]
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.37791617, 3.70167396, 10.0, 10.0]), decimal=2)
np.testing.assert_almost_equal(qdot[:, -1], np.array([0.37675299, -3.40771446, 10.0, 10.0]), decimal=2)
# initial and final controls
np.testing.assert_almost_equal(
tau[:, 0], np.array([-4.52595667e-02, 9.25475333e-01, -4.34001849e-08, -9.24667407e01]), decimal=2
)
np.testing.assert_almost_equal(
tau[:, -2], np.array([4.42976253e-02, 1.40077846e00, -7.28864793e-13, 9.24667396e01]), decimal=2
)
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4, OdeSolver.RK8, OdeSolver.IRK])
def test_track_and_minimize_marker_displacement_RT(ode_solver):
# Load track_and_minimize_marker_velocity
ode_solver = ode_solver()
ocp = prepare_ocp(
biorbd_model_path=TestUtils.bioptim_folder() + "/examples/track/cube_and_line.bioMod",
n_shooting=5,
final_time=1,
marker_velocity_or_displacement="disp",
marker_in_first_coordinates_system=True,
control_type=ControlType.CONSTANT,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], -200.80194174353494)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (40, 1))
np.testing.assert_almost_equal(g, np.zeros((40, 1)))
# Check some of the results
q, qdot, tau = sol.states["q"], sol.states["qdot"], sol.controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0.07221334, -0.4578082, -3.00436948, 1.57079633]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.05754807, -0.43931116, 2.99563057, 1.57079633]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([5.17192208, 2.3422717, 10.0, -10.0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([-3.56965109, -4.36318589, 10.0, 10.0]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([-3.21817755e01, 1.55202948e01, 7.42730542e-13, 2.61513401e-08]))
np.testing.assert_almost_equal(
tau[:, -2], np.array([-1.97981112e01, -9.89876772e-02, 4.34033234e-08, 2.61513636e-08])
)
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4, OdeSolver.RK8, OdeSolver.IRK])
def test_track_and_minimize_marker_velocity(ode_solver):
# Load track_and_minimize_marker_velocity
ode_solver = ode_solver()
ocp = prepare_ocp(
biorbd_model_path=TestUtils.bioptim_folder() + "/examples/track/cube_and_line.bioMod",
n_shooting=5,
final_time=1,
marker_velocity_or_displacement="velo",
marker_in_first_coordinates_system=True,
control_type=ControlType.CONSTANT,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], -80.20048585400944)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (40, 1))
np.testing.assert_almost_equal(g, np.zeros((40, 1)))
# Check some of the results
q, qdot, tau = sol.states["q"], sol.states["qdot"], sol.controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([7.18708669e-01, -4.45703930e-01, -3.14159262e00, 0]))
np.testing.assert_almost_equal(q[:, -1], np.array([1.08646846e00, -3.86731175e-01, 3.14159262e00, 0]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([3.78330878e-01, 3.70214281, 10, 0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([3.77168521e-01, -3.40782793, 10, 0]))
# # initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([-4.52216174e-02, 9.25170010e-01, 0, 0]))
np.testing.assert_almost_equal(tau[:, -2], np.array([4.4260355e-02, 1.4004583, 0, 0]))
# # save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4, OdeSolver.RK8, OdeSolver.IRK])
def test_track_and_minimize_marker_velocity_linear_controls(ode_solver):
# Load track_and_minimize_marker_velocity
ode_solver = ode_solver()
if isinstance(ode_solver, OdeSolver.IRK):
with pytest.raises(
NotImplementedError, match="ControlType.LINEAR_CONTINUOUS ControlType not implemented yet with IRK"
):
prepare_ocp(
biorbd_model_path=TestUtils.bioptim_folder() + "/examples/track/cube_and_line.bioMod",
n_shooting=5,
final_time=1,
marker_velocity_or_displacement="velo",
marker_in_first_coordinates_system=True,
control_type=ControlType.LINEAR_CONTINUOUS,
ode_solver=ode_solver,
)
else:
ocp = prepare_ocp(
biorbd_model_path=TestUtils.bioptim_folder() + "/examples/track/cube_and_line.bioMod",
n_shooting=5,
final_time=1,
marker_velocity_or_displacement="velo",
marker_in_first_coordinates_system=True,
control_type=ControlType.LINEAR_CONTINUOUS,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (40, 1))
np.testing.assert_almost_equal(g, np.zeros((40, 1)))
# Check some of the results
q, qdot, tau = sol.states["q"], sol.states["qdot"], sol.controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[2:, 0], np.array([-3.14159264, 0]))
np.testing.assert_almost_equal(q[2:, -1], np.array([3.14159264, 0]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[2:, 0], np.array([10, 0]))
np.testing.assert_almost_equal(qdot[2:, -1], np.array([10, 0]))
# initial and final controls
np.testing.assert_almost_equal(tau[2:, 0], np.array([-8.495542, 0]), decimal=5)
np.testing.assert_almost_equal(tau[2:, -1], np.array([8.495541, 0]), decimal=5)
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol)
| 36.245283 | 120 | 0.670137 |
acea6ef184038746588de2cf84606bab92122dea | 1,539 | py | Python | sask_glacier_prob1.py | Intro-Quantitative-Geology/Exercise-12 | fbbb65f2b7d9424be35c4df5ab3859e72fd7cbbc | [
"MIT"
] | null | null | null | sask_glacier_prob1.py | Intro-Quantitative-Geology/Exercise-12 | fbbb65f2b7d9424be35c4df5ab3859e72fd7cbbc | [
"MIT"
] | null | null | null | sask_glacier_prob1.py | Intro-Quantitative-Geology/Exercise-12 | fbbb65f2b7d9424be35c4df5ab3859e72fd7cbbc | [
"MIT"
] | null | null | null | """
sask_glacier_prob1.py
This script ...
@author: NAME - DD.MM.YYYY
"""
# Import NumPy
import numpy as np
import matplotlib.pyplot as plt
#--- User-defined variables
a = # Viscosity [1/(Pa**3.0 s)]
h = # Channel width
y = # Range of values across channel for velocity calculation
umax = # Velocity at center of channel
n_prof = 4 # Number of velocity profiles to calculate
# Open and read input file
data = np.loadtxt(fname='sask_glacier_velo.txt', delimiter=',')
# Create zeros arrays
data_y = np.zeros(len(data)) # Create empty array for data y-values
data_u_ma = np.zeros(len(data)) # Create empty array for data velocities [m/a]
data_u_ms = np.zeros(len(data)) # Create empty array for data velocities [m/s]
# Loop over lines in file and split into different variables
for line in data:
data_y[linecount] = line[0]
data_u_ma[linecount] = line[1]
data_u_ms[linecount] = line[2]
linecount = linecount + 1
# Equations
u = np.zeros([n_prof,len()])
n = 1
# Velocity profile for a Newtonian or non-Newtonian fluid
for i in range():
n =
p = # Equation 10 rearranged to solve for (p1-p0)/L
for j in range():
if :
# Equation 10
else:
# Equation 10
# Plot predicted velocity profiles
plt.plot()
# Plot observed velocities
plt.plot()
# Add axis labels and a title
plt.xlabel("")
plt.ylabel("")
plt.title("")
plt.show()
| 25.65 | 80 | 0.615335 |
acea6f54ec82ed62867d2409bd27c9877b8d4c45 | 317 | py | Python | ec2ssh2/exceptions.py | scivey/ec2ssh2 | 905aa86d077033f237c8f7eb86ac9021af7b6762 | [
"MIT"
] | null | null | null | ec2ssh2/exceptions.py | scivey/ec2ssh2 | 905aa86d077033f237c8f7eb86ac9021af7b6762 | [
"MIT"
] | null | null | null | ec2ssh2/exceptions.py | scivey/ec2ssh2 | 905aa86d077033f237c8f7eb86ac9021af7b6762 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
class EC2SSH2Exception(Exception):
pass
class InvalidInput(EC2SSH2Exception):
pass
class UnsupportedMethod(EC2SSH2Exception):
pass
class NoInstancesFound(EC2SSH2Exception, LookupError):
pass
| 15.85 | 55 | 0.757098 |
acea6fa7ce0df93b77287bbc0e4b7cfb5f960db5 | 2,103 | py | Python | python2.7/site-packages/twisted/python/runtime.py | 84KaliPleXon3/sslstrip-hsts-openwrt | f875ded48078a3ed84bffef1e69dcbeaf2e77ae3 | [
"MIT"
] | 4 | 2020-10-31T19:52:05.000Z | 2021-09-22T11:39:27.000Z | python2.7/site-packages/twisted/python/runtime.py | 84KaliPleXon3/sslstrip-hsts-openwrt | f875ded48078a3ed84bffef1e69dcbeaf2e77ae3 | [
"MIT"
] | null | null | null | python2.7/site-packages/twisted/python/runtime.py | 84KaliPleXon3/sslstrip-hsts-openwrt | f875ded48078a3ed84bffef1e69dcbeaf2e77ae3 | [
"MIT"
] | 2 | 2020-02-27T08:28:35.000Z | 2020-09-13T12:39:26.000Z |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
# System imports
import os
import sys
import time
import imp
def shortPythonVersion():
hv = sys.hexversion
major = (hv & 0xff000000L) >> 24
minor = (hv & 0x00ff0000L) >> 16
teeny = (hv & 0x0000ff00L) >> 8
return "%s.%s.%s" % (major,minor,teeny)
knownPlatforms = {
'nt': 'win32',
'ce': 'win32',
'posix': 'posix',
'java': 'java',
'org.python.modules.os': 'java',
}
_timeFunctions = {
#'win32': time.clock,
'win32': time.time,
}
class Platform:
"""Gives us information about the platform we're running on"""
type = knownPlatforms.get(os.name)
seconds = staticmethod(_timeFunctions.get(type, time.time))
def __init__(self, name=None):
if name is not None:
self.type = knownPlatforms.get(name)
self.seconds = _timeFunctions.get(self.type, time.time)
def isKnown(self):
"""Do we know about this platform?"""
return self.type != None
def getType(self):
"""Return 'posix', 'win32' or 'java'"""
return self.type
def isMacOSX(self):
"""Return if we are runnng on Mac OS X."""
return sys.platform == "darwin"
def isWinNT(self):
"""Are we running in Windows NT?"""
if self.getType() == 'win32':
import _winreg
try:
k=_winreg.OpenKeyEx(_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows NT\CurrentVersion')
_winreg.QueryValueEx(k, 'SystemRoot')
return 1
except WindowsError:
return 0
# not windows NT
return 0
def isWindows(self):
return self.getType() == 'win32'
def supportsThreads(self):
"""Can threads be created?
"""
try:
return imp.find_module('thread')[0] is None
except ImportError:
return False
platform = Platform()
platformType = platform.getType()
seconds = platform.seconds
| 25.337349 | 84 | 0.571089 |
acea6ff139d373bbd7c41e26d3e0dac09499a61a | 1,252 | py | Python | xicam/gui/tests/test_imageviewmixins.py | ihumphrey/Xi-cam-test-actions | 2b37e58ca559e35c5228a0a7de58e47d823a5759 | [
"BSD-3-Clause-LBNL"
] | null | null | null | xicam/gui/tests/test_imageviewmixins.py | ihumphrey/Xi-cam-test-actions | 2b37e58ca559e35c5228a0a7de58e47d823a5759 | [
"BSD-3-Clause-LBNL"
] | null | null | null | xicam/gui/tests/test_imageviewmixins.py | ihumphrey/Xi-cam-test-actions | 2b37e58ca559e35c5228a0a7de58e47d823a5759 | [
"BSD-3-Clause-LBNL"
] | null | null | null | from pytestqt import qtbot
def test_logIntensity(qtbot):
from xicam.gui.widgets.imageviewmixins import LogScaleIntensity
import numpy as np
windows = []
data1 = np.fromfunction(lambda x, y: np.exp((x ** 2 + y ** 2) / 10000.0), (100, 100)) - 2
data2 = np.random.random((100, 100))
data3 = np.random.random((100, 100)) * 1000 - 2
data3[:10, :10] = np.random.random((10, 10)) * 10 - 2
for data in [data1, data2, data3]:
w = LogScaleIntensity()
w.setImage(data)
w.show()
windows.append(w)
def test_xarrayview(qtbot):
from xicam.gui.widgets.imageviewmixins import XArrayView
from xarray import DataArray
import numpy as np
data = np.random.random((100, 10, 10,))
xdata = DataArray(data, dims=['E (eV)', 'y (μm)', 'x (μm)'], coords=[np.arange(100)*100, np.arange(10)/10., np.arange(10)/10.])
w = XArrayView()
w.setImage(xdata)
w.show()
# qtbot.stopForInteraction()
def test_betterlayout(qtbot):
from xicam.gui.widgets.imageviewmixins import BetterLayout
from xarray import DataArray
import numpy as np
data = np.random.random((10, 10,))
w = BetterLayout()
w.setImage(data)
w.show()
#qtbot.stopForInteraction() | 25.04 | 131 | 0.634185 |
acea70478bd66f7a77f983987114fffce7635272 | 17,848 | py | Python | src/util.py | weizhao-BME/metis-project3 | ed814d5fd5121d7089a0def963ba1719cfdb73ed | [
"MIT"
] | null | null | null | src/util.py | weizhao-BME/metis-project3 | ed814d5fd5121d7089a0def963ba1719cfdb73ed | [
"MIT"
] | null | null | null | src/util.py | weizhao-BME/metis-project3 | ed814d5fd5121d7089a0def963ba1719cfdb73ed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file includes utilities used in data analysis.
@author: Wei Zhao @ Metis, 01/27/2021
"""
#%%
import pickle
from collections import defaultdict
from sqlalchemy import create_engine
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import RandomizedSearchCV
import seaborn as sns
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.metrics import precision_score, recall_score, precision_recall_curve,f1_score
from sklearn.metrics import roc_auc_score, roc_curve
from yellowbrick.model_selection import FeatureImportances
#%%
#--------------------------------------------------------
def save_as_pickle(fn, data):
"""
Function to save data as a pickled file
Parameters
----------
fn : str
File directory.
data : any type, but recommend dictionary
data to save.
Returns
-------
None.
"""
with open(fn, 'wb') as to_write:
pickle.dump(data, to_write)
print('Saved data to "' + fn + '"')
#--------------------------------------------------------
def read_from_pickle(fn):
"""
Function to read data from a pickled file
Parameters
----------
fn : str
File directory.
data : any type, but recommend dictionary
data to read.
Returns
-------
data : same as data
Read in this variable.
"""
with open(fn,'rb') as read_file:
data = pickle.load(read_file)
print('Read data from "' + fn + '"')
return data
#--------------------------------------------------------
def gen_engine(name_of_db):
"""
Function to generate an engine of sql.
Parameters
----------
name_of_db : str
Name of database.
Returns
-------
engine : sqlalchemy.engine.base.Engine
Engine used with pd.read_sql.
"""
url = ['postgresql://'
+ 'weizhao:'
+ 'localhost@localhost:'
+'5432/' + name_of_db]
engine = create_engine(url[0])
return engine
#--------------------------------------------------------
def do(query, name_of_db='refinance'):
"""
Functio to query sql database
Parameters
----------
query : str
sql command lines.
name_of_db : str, optional
Name of database. The default is 'refinance'.
Returns
-------
Query results from sql database
"""
engine = gen_engine(name_of_db)
return pd.read_sql(query, engine)
#--------------------------------------------------------
def race_approval_rate(df):
"""
Calculate race-wise approval rate
Parameters
----------
df : pandas data frame
input data frame.
Returns
-------
d : dictionary
Keys are race; values are approval rates.
"""
idx = df['applicant_race_1'].value_counts().index
d = defaultdict(list)
for i in idx:
mask = df['applicant_race_1'] == i
app_stat= df['action_taken'][mask]
d[i].append(100*sum(app_stat[app_stat==1])/len(app_stat))
return d
#--------------------------------------------------------
def ohe_data(x_cat):
"""
This function converts categorical variables
to numerical variables
Parameters
----------
x_cat : pandas data frame
Input data frame.
Returns
-------
x_cat_tform : pandas data frame
data frame including dummy variables.
"""
ohe = OneHotEncoder(sparse=False, drop='first')
ohe.fit(x_cat)
columns = ohe.get_feature_names(x_cat.columns)
t_x_cat = ohe.transform(x_cat)
x_cat_tform = pd.DataFrame(t_x_cat,
columns=columns,
index=x_cat.index)
return x_cat_tform
#--------------------------------------------------------
def std_data(x_cont):
"""
This function standardize a dataset
only including continuous variables.
Parameters
----------
x_cont : pandas data frame
Input data frame.
Returns
-------
x_cont_tform : pandas data frame
data frame including standardized variables.
"""
std = StandardScaler()
std.fit(x_cont)
t_x_cont = std.transform(x_cont)
columns = x_cont.columns
x_cont_tform = pd.DataFrame(t_x_cont,
columns=columns,
index=x_cont.index)
return x_cont_tform
#--------------------------------------------------------
def gen_cat(df):
"""
Generate a data frame only including catgorical variables.
Parameters
----------
df : pandas data frame
whole data frame.
Returns
-------
df_new: pandas data frame
new data frame only including categorical variables.
"""
feat_cat = ['derived_msa_md', 'county_code',
'conforming_loan_limit',
'derived_race', 'derived_sex',
'hoepa_status',
'interest_only_payment',
'balloon_payment', 'occupancy_type',
'total_units', 'applicant_race_1', 'applicant_sex',
'applicant_age_above_62', 'co_applicant_age_above_62',
'derived_loan_product_type',
'lien_status', 'open_end_line_of_credit',
'business_or_commercial_purpose'
]
df_new = df[feat_cat]
return df_new
#--------------------------------------------------------
def gen_cont(df):
"""
Generate a data frame only including continuous variables.
Parameters
----------
df : pandas data frame
whole data frame.
Returns
-------
df_new: pandas data frame
new data frame only including continuous variables.
"""
feat_cont = ['loan_term', 'loan_amount',
'property_value','loan_to_value_ratio',
'income', 'debt_to_income_ratio',
'total_age', 'applicant_age',
'co_applicant_age'
]
df_new = df[feat_cont]
return df_new
#--------------------------------------------------------
def gen_hypergrid_for_rf_cv():
"""
Function to generate a hypergrid as the input of
random forest for cross-validation classifier.
Returns
-------
random_grid : dictionary
A dictionary including hyperparameters
need to be tuned .
"""
n_estimators = [int(x)
for x in np.linspace(start = 100,
stop = 500,
num = 5)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt', 'log2']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 50, num = 5)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 20]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]# Create the random grid
random_grid = {'classifier__n_estimators': n_estimators,
'classifier__max_features': max_features,
'classifier__max_depth': max_depth,
'classifier__min_samples_split': min_samples_split,
'classifier__min_samples_leaf': min_samples_leaf,
'classifier__bootstrap': bootstrap}
print(random_grid)
return random_grid
#--------------------------------------------------------
def get_logreg_models():
"""
Function to generate a series of logistic regression models
with varying penalty strength, p
smaller p --> more penalty
larger p --> less penalty
Returns
-------
models : dictionary
logistic regression models with varying penalty values.
"""
models = dict()
for p in [0.0, 0.0001, 0.001, 0.01, 0.1, 1.0]:
# create name for model
key = '%.4f' % p
# turn off penalty in some cases
if p == 0.0:
# no penalty in this case
models[key] = LogisticRegression(
solver='newton-cg',
penalty='none', n_jobs=-1)
else:
models[key] = LogisticRegression(
solver='newton-cg',
penalty='l2', C=p,
n_jobs=-1)
return models
#--------------------------------------------------------
def evaluate_model(model, X, y, scoring):
"""
Function to evaluate model performance
Parameters
----------
model : dictionary
A dictionary of models generated with sklearn.
X : pandas data frame
training features.
y : pandas data frame
training labels.
scoring : TYPE, optional
DESCRIPTION. The default is scoring.
Returns
-------
scores : numpy array
An array of scores.
"""
# define the evaluation procedure
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate the model
scores = cross_val_score(model, X, y, scoring=scoring, cv=cv, n_jobs=-1)
return scores
#--------------------------------------------------------
def make_cv_pipelinie(classifier,
categorical_columns,
numerical_columns,
random_grid,
scoring='roc_auc'
):
"""
Function to make a pipeline for 5-fold cross-validation
Parameters
----------
categorical_columns : list
A list of column names from pandas series
for categorical variables.
numerical_columns : list
A list of column names from pandas series
for numerical variables..
random_grid : dictionary
A grid of hyperparameters,
e.g. the output of gen_hypergrid_for_rf_cv.
scoring : dtr, optional
Available scores are listed here.
https://scikit-learn.org/stable/modules/model_evaluation.html
The default is 'roc_auc'.
Returns
-------
clf_random : pipeline
A pipeline variable. To get the best estimator,
use rf_random.best_estimator_.named_steps['classifier'],
which is the tuned model from cross-validation.
"""
categorical_encoder = OneHotEncoder(handle_unknown='ignore')
numerical_scalar = StandardScaler()
preprocessing = ColumnTransformer(
[('cat', categorical_encoder, categorical_columns),
('num', numerical_scalar, numerical_columns)])
clf_pipe = Pipeline([
('preprocess', preprocessing),
('classifier', classifier),
])
# Random search of parameters, using stratified 5 fold cross validation,
# search across all different combinations, and use all available cores
clf_random = RandomizedSearchCV(estimator = clf_pipe,
param_distributions = random_grid,
n_iter = 100,
cv = StratifiedKFold(5,
random_state=15),
verbose=2, random_state=15,
scoring=scoring,
n_jobs = -1) # Fit the random search model
return clf_random
#--------------------------------------------------------
def make_data_dict(x_train, y_train,
x_test, y_test,
x_val, y_val,
categorical_columns=None,
numerical_columns=None
):
"""
Function to compile the split dataset into a dictionary to save
Parameters
----------
x_train : pandas data frame
Training features.
y_train : pandas data frame
Training labels.
x_test : pandas data frame
Testing features.
y_test : pandas data frame
Testing labels.
x_val : pandas data frame
Validation features.
y_val : pandas data frame
Validation labels.
categorical_columns : list
A list of column names from pandas series
for categorical variables.
numerical_columns : list
A list of column names from pandas series
for numerical variables..
Returns
-------
data_dict : TYPE
DESCRIPTION.
"""
data_dict = {'x_train': x_train, 'y_train': y_train,
'x_test': x_test, 'y_test': y_test,
'x_val': x_val, 'y_val': y_val,
'cat_col': categorical_columns,
'num_col': numerical_columns}
return data_dict
#--------------------------------------------------------
def make_mdl_eval_pipeline(classifier,
categorical_columns,
numerical_columns
):
"""
Function to make a pipeline for model performance evaluaton
Parameters
----------
classifier : sklearn classifier
base classifier from sklearn.
categorical_columns : list
A list of column names from pandas series
for categorical variables.
numerical_columns : list
A list of column names from pandas series
for numerical variables.
Returns
-------
clf_pipe : pipeline
pipeline created.
"""
categorical_encoder = OneHotEncoder(handle_unknown='ignore')
numerical_scalar = StandardScaler()
preprocessing = ColumnTransformer(
[('cat', categorical_encoder, categorical_columns),
('num', numerical_scalar, numerical_columns)])
clf_pipe = Pipeline([
('preprocess', preprocessing),
('classifier', classifier),
])
return clf_pipe
#--------------------------------------------------------
def disp_confusion_matrix(cf_matrix,
vmin,vmax,
cmap='Blues',
annot_kws={"size": 15}
):
"""
Function to display confusion matrix with details reported
Parameters
----------
cf_matrix : numpy array
confusion matrix from sklearn.
Returns
-------
ax : handle
handle from seaborns.heatmap.
"""
group_names = ['True Neg','False Pos', 'False Neg', 'True Pos']
group_counts = ['{0:0.0f}'.format(value) for value in
cf_matrix.flatten()]
group_percentages = ['{0:.2%}'.format(value) for value in
(cf_matrix /
(np.sum(cf_matrix, axis=1)[:, None])).flatten()]
labels = [f'{v1}\n{v2}\n{v3}' for v1, v2, v3 in
zip(group_names, group_counts, group_percentages)]
labels = np.asarray(labels).reshape(2,2)
ax = sns.heatmap(cf_matrix, annot=labels, annot_kws=annot_kws,
fmt='', cmap=cmap, vmin=vmin, vmax=vmax,
xticklabels=True, yticklabels=True)
return ax
#--------------------------------------------------------
class multi_metrics:
"""
Function to calculate performance metrics
Parameters
----------
y_true : pandas data frame
Actual labels.
y_pred : pandas data frame
Predicted labels.
Returns
-------
None.
"""
def __init__(self, y_true, y_pred, y_score=None):
self.confusion_matrix = confusion_matrix(y_true, y_pred)
self.confusion_matrix_norm_by_true = \
confusion_matrix(y_true, y_pred, normalize='true')
self.accuracy = accuracy_score(y_true, y_pred)
self.precision = precision_score(y_true, y_pred)
self.recall = recall_score(y_true, y_pred)
self.f1 = f1_score(y_true, y_pred)
if y_score is not None:
self.roc_auc = roc_auc_score(y_true, y_score[:,1])
self.roc_curve =roc_curve(y_true, y_score[:,1])
self.precision_recall_curve = precision_recall_curve(y_true, y_score[:,1])
#--------------------------------------------------------
def print_metrics(metrics):
"""
Function to print metrics, including roc_auc,
accuracy, precision, recall, f1
Parameters
----------
metrics : Class objects
The output of multi_metrics.
Returns
-------
None.
"""
print('roc_auc = {:.2f}'.format(metrics.roc_auc),
'\naccuracy = {:.2f}'.format(metrics.accuracy),
'\nprecision = {:.2f}'.format(metrics.precision),
'\nrecall = {:.2f}'.format(metrics.recall),
'\nf1 = {:.2f}'.format(metrics.f1),
)
#--------------------------------------------------------
def get_feat_importance_logreg(mdl_logreg, x, y):
"""
Calculate feature importance for logistic regression.
This is similar to random forest.
Parameters
----------
mdl_logreg : sklearn classifier
classifier.
x : pandas data frame
features.
y : pandas data frame
Label.
Returns
-------
feat_importance : TYPE
DESCRIPTION.
"""
visualizer = FeatureImportances(mdl_logreg,
title='Logistic regression')
visualizer.fit(x, y)
visualizer.ax.remove()
feat_importance = visualizer.feature_importances_[::-1]
return feat_importance, visualizer
| 29.746667 | 90 | 0.552891 |
acea7053668717896c62137c4a091403804d1250 | 897 | py | Python | highton/call_mixins/list_comment_call_mixin.py | seibert-media/Highton | 1519e4fb105f62882c2e7bc81065d994649558d8 | [
"Apache-2.0"
] | 18 | 2015-06-24T02:33:12.000Z | 2022-02-11T10:33:58.000Z | highton/call_mixins/list_comment_call_mixin.py | seibert-media/Highton | 1519e4fb105f62882c2e7bc81065d994649558d8 | [
"Apache-2.0"
] | 13 | 2016-01-14T19:11:24.000Z | 2020-04-21T08:53:27.000Z | highton/call_mixins/list_comment_call_mixin.py | seibert-media/Highton | 1519e4fb105f62882c2e7bc81065d994649558d8 | [
"Apache-2.0"
] | 15 | 2015-04-15T15:08:31.000Z | 2022-02-11T15:34:19.000Z | from highton.call_mixins import Call
from highton import fields
class ListCommentCallMixin(Call):
"""
A mixin to get all comments of inherited class
These could be: notes || emails
"""
COMMENT_OFFSET = 25
def list_comments(self, page=0):
"""
Get the comments of current object
:param page: the page starting at 0
:return: the emails
:rtype: list
"""
from highton.models.comment import Comment
params = {'page': int(page) * self.COMMENT_OFFSET}
return fields.ListField(
name=self.ENDPOINT,
init_class=Comment
).decode(
self.element_from_string(
self._get_request(
endpoint=self.ENDPOINT + '/' + str(self.id) + '/' + Comment.ENDPOINT,
params=params
).text
)
)
| 24.916667 | 89 | 0.554069 |
acea71fa824a26ce9b2227ae9e23247a7467debf | 4,160 | py | Python | examples/cheatsheet.py | vittot/pyCeterisParibus | efe5835574026fe6b1a6993cc08cc34e67b8e018 | [
"Apache-2.0"
] | 22 | 2019-04-06T17:33:12.000Z | 2021-12-13T21:46:47.000Z | examples/cheatsheet.py | vittot/pyCeterisParibus | efe5835574026fe6b1a6993cc08cc34e67b8e018 | [
"Apache-2.0"
] | 15 | 2018-11-27T17:50:16.000Z | 2019-04-23T17:07:43.000Z | examples/cheatsheet.py | vittot/pyCeterisParibus | efe5835574026fe6b1a6993cc08cc34e67b8e018 | [
"Apache-2.0"
] | 8 | 2018-12-12T12:24:21.000Z | 2022-02-06T21:09:55.000Z | import os
import pandas as pd
from sklearn import ensemble, svm
from sklearn.datasets import load_iris
from sklearn.linear_model import LinearRegression
from ceteris_paribus.explainer import explain
from ceteris_paribus.plots.plots import plot
from ceteris_paribus.profiles import individual_variable_profile
from ceteris_paribus.select_data import select_neighbours
from ceteris_paribus.datasets import DATASETS_DIR
df = pd.read_csv(os.path.join(DATASETS_DIR, 'insurance.csv'))
df = df[['age', 'bmi', 'children', 'charges']]
x = df.drop(['charges'], inplace=False, axis=1)
y = df['charges']
var_names = list(x.columns)
x = x.values
y = y.values
iris = load_iris()
def random_forest_classifier():
rf_model = ensemble.RandomForestClassifier(n_estimators=100, random_state=42)
rf_model.fit(iris['data'], iris['target'])
return rf_model, iris['data'], iris['target'], iris['feature_names']
def linear_regression_model():
linear_model = LinearRegression()
linear_model.fit(x, y)
# model, data, labels, variable_names
return linear_model, x, y, var_names
def gradient_boosting_model():
gb_model = ensemble.GradientBoostingRegressor(n_estimators=1000, random_state=42)
gb_model.fit(x, y)
return gb_model, x, y, var_names
def supported_vector_machines_model():
svm_model = svm.SVR(C=0.01, gamma='scale', kernel='poly')
svm_model.fit(x, y)
return svm_model, x, y, var_names
if __name__ == "__main__":
(linear_model, data, labels, variable_names) = linear_regression_model()
(gb_model, _, _, _) = gradient_boosting_model()
(svm_model, _, _, _) = supported_vector_machines_model()
explainer_linear = explain(linear_model, variable_names, data, y)
explainer_gb = explain(gb_model, variable_names, data, y)
explainer_svm = explain(svm_model, variable_names, data, y)
# single profile
cp_1 = individual_variable_profile(explainer_gb, x[0], y[0])
plot(cp_1, destination="notebook", selected_variables=["bmi"], print_observations=False)
# local fit
neighbours_x, neighbours_y = select_neighbours(x, x[10], y=y, n=10)
cp_2 = individual_variable_profile(explainer_gb,
neighbours_x, neighbours_y)
plot(cp_2, show_residuals=True, selected_variables=["age"], print_observations=False, color_residuals='red',
plot_title='')
# aggregate profiles
plot(cp_2, aggregate_profiles="mean", selected_variables=["age"], color_pdps='black', size_pdps=6,
alpha_pdps=0.7, print_observations=False,
plot_title='')
# many variables
plot(cp_1, selected_variables=["bmi", "age", "children"], print_observations=False, plot_title='', width=950)
# many models
cp_svm = individual_variable_profile(explainer_svm, x[0], y[0])
cp_linear = individual_variable_profile(explainer_linear, x[0], y[0])
plot(cp_1, cp_svm, cp_linear, print_observations=False, plot_title='', width=1050)
# color by feature
plot(cp_2, color="bmi", print_observations=False, plot_title='', width=1050, selected_variables=["age"], size=3)
# classification multiplot
rf_model, iris_x, iris_y, iris_var_names = random_forest_classifier()
explainer_rf1 = explain(rf_model, iris_var_names, iris_x, iris_y,
predict_function= lambda X: rf_model.predict_proba(X)[::, 0], label=iris.target_names[0])
explainer_rf2 = explain(rf_model, iris_var_names, iris_x, iris_y,
predict_function= lambda X: rf_model.predict_proba(X)[::, 1], label=iris.target_names[1])
explainer_rf3 = explain(rf_model, iris_var_names, iris_x, iris_y,
predict_function= lambda X: rf_model.predict_proba(X)[::, 2], label=iris.target_names[2])
cp_rf1 = individual_variable_profile(explainer_rf1, iris_x[0], iris_y[0])
cp_rf2 = individual_variable_profile(explainer_rf2, iris_x[0], iris_y[0])
cp_rf3 = individual_variable_profile(explainer_rf3, iris_x[0], iris_y[0])
plot(cp_rf1, cp_rf2, cp_rf3, selected_variables=['petal length (cm)', 'petal width (cm)', 'sepal length (cm)'],
plot_title='', print_observations=False, width=1050) | 39.245283 | 116 | 0.719712 |
acea723616eabcd6a039cb68ada53587b2def850 | 10,403 | py | Python | tests/test_utils.py | gxdai/solt | 770e397884bcafe80a11723c229e275c1c1f8b5a | [
"MIT"
] | 263 | 2018-09-17T21:17:38.000Z | 2022-01-10T07:16:53.000Z | tests/test_utils.py | Oulu-IMEDS/solt | 485bfb0d471134f75e09d06aa5e9ce4b57c0e13c | [
"MIT"
] | 64 | 2018-09-21T19:30:05.000Z | 2021-08-15T13:29:45.000Z | tests/test_utils.py | Oulu-IMEDS/solt | 485bfb0d471134f75e09d06aa5e9ce4b57c0e13c | [
"MIT"
] | 25 | 2018-09-18T06:44:19.000Z | 2020-05-30T15:45:59.000Z | import solt.utils as slu
import solt.transforms as slt
import solt.core as slc
import pytest
import json
import pathlib
import yaml
def test_parameter_validation_range_default_value_not_tuple():
with pytest.raises(TypeError):
slu.validate_numeric_range_parameter(123, 123)
def test_parameter_validation_raises_error_when_types_dont_match():
with pytest.raises(NotImplementedError):
slu.validate_parameter({1, 2}, 10, int)
def test_parameter_validation_raises_error_when_default_type_is_wrong():
with pytest.raises(ValueError):
slu.validate_parameter(None, {1, 2}, (10, "12345"), int)
def test_parameter_validation_raises_error_when_default_value_is_wrong_type():
with pytest.raises(TypeError):
slu.validate_parameter(None, {1, 2}, ("10", "inherit"), int)
@pytest.mark.parametrize("parameter", [(1, 2, 3), (10, "inherit"), (1, "i"),])
def test_validate_parameter_raises_value_errors(parameter):
with pytest.raises(ValueError):
slu.validate_parameter(parameter, {1, 2}, 1, basic_type=int)
@pytest.mark.parametrize(
"serialized",
[
{
"stream": {
"transforms": [
{"pad": {"pad_to": 34}},
{"crop": {"crop_to": 32, "crop_mode": "r"}},
{"cutout": {"cutout_size": 2}},
]
}
},
{
"stream": {
"interpolation": None,
"padding": None,
"transforms": [
{"pad": {"pad_to": 34, "padding": "z"}},
{"crop": {"crop_to": 32, "crop_mode": "r"}},
{"cutout": {"cutout_size": 2, "p": 0.5}},
],
},
},
],
)
def test_deserialize_from_dict(serialized):
trfs = slc.Stream([slt.Pad(34), slt.Crop(32, "r"), slt.CutOut(2)])
serialized_trfs = json.dumps(trfs.to_dict())
serialized_from_deserialized = json.dumps(slu.from_dict(serialized).to_dict())
assert serialized_trfs == serialized_from_deserialized
@pytest.mark.parametrize(
"serialized, stream",
[
[
{
"stream": {
"transforms": [
{"pad": {"pad_to": 34}},
{"crop": {"crop_to": 32, "crop_mode": "r"}},
{"cutout": {"cutout_size": 2}},
{
"stream": {
"interpolation": None,
"padding": None,
"transforms": [
{"pad": {"pad_to": 34, "padding": "z"}},
{"crop": {"crop_to": 32, "crop_mode": "r"}},
{"cutout": {"cutout_size": 2, "p": 0.5}},
],
},
},
]
}
},
slc.Stream(
[
slt.Pad(34),
slt.Crop(32, "r"),
slt.CutOut(2),
slc.Stream([slt.Pad(34), slt.Crop(32, "r"), slt.CutOut(2)]),
]
),
],
[
{
"stream": {
"transforms": [
{
"stream": {
"interpolation": None,
"padding": None,
"transforms": [
{"pad": {"pad_to": 34, "padding": "z"}},
{"crop": {"crop_to": 32, "crop_mode": "r"}},
{"cutout": {"cutout_size": 2, "p": 0.5}},
],
},
},
{"pad": {"pad_to": 34}},
{"crop": {"crop_to": 32, "crop_mode": "c"}},
{"cutout": {"cutout_size": 4}},
{
"projection": {
"v_range": (0, 1e-3),
"affine_transforms": {
"stream": {
"transforms": [
{"rotate": {"angle_range": 30}},
{"scale": {"range_x": 2, "same": True}},
],
}
},
}
},
]
}
},
slc.Stream(
[
slc.Stream([slt.Pad(34), slt.Crop(32, "r"), slt.CutOut(2)]),
slt.Pad(34),
slt.Crop(32, "c"),
slt.CutOut(4),
slt.Projection(slc.Stream([slt.Rotate(30), slt.Scale(2)]), v_range=(0, 1e-3)),
]
),
],
],
)
def test_deserialize_from_dict_nested(serialized: dict, stream: slc.Stream):
serialized_trfs = json.dumps(stream.to_dict())
serialized_from_deserialized = json.dumps(slu.from_dict(serialized).to_dict())
assert serialized_trfs == serialized_from_deserialized
def test_stream_serializes_all_args_are_set():
ppl = slc.Stream(
[
slt.Rotate(angle_range=(-106, 90), p=0.7, interpolation="nearest"),
slt.Rotate(angle_range=(-106, 90), p=0.7, interpolation="nearest"),
slt.Rotate(angle_range=(-106, 90), p=0.7, interpolation="nearest"),
slt.Projection(
slc.Stream([slt.Rotate(angle_range=(-6, 90), p=0.2, padding="r", interpolation="nearest"),])
),
]
)
serialized = ppl.to_dict()
assert "interpolation" in serialized
assert "padding" in serialized
assert "optimize_stack" in serialized
assert "transforms" in serialized
assert len(serialized) == 5
trfs = serialized["transforms"]
for i, el in enumerate(trfs):
t = list(el.keys())[0]
if i < len(trfs) - 1:
assert list(el.keys())[0] == "rotate"
assert trfs[i][t]["p"] == 0.7
assert trfs[i][t]["interpolation"] == ("nearest", "inherit")
assert trfs[i][t]["padding"] == ("z", "inherit")
assert trfs[i][t]["angle_range"] == (-106, 90)
else:
assert t == "projection"
assert trfs[i][t]["affine_transforms"]["stream"]["transforms"][0]["rotate"]["p"] == 0.2
assert trfs[i][t]["affine_transforms"]["stream"]["transforms"][0]["rotate"]["interpolation"] == (
"nearest",
"inherit",
)
assert trfs[i][t]["affine_transforms"]["stream"]["transforms"][0]["rotate"]["padding"] == ("r", "inherit")
assert trfs[i][t]["affine_transforms"]["stream"]["transforms"][0]["rotate"]["angle_range"] == (-6, 90)
def test_transforms_type_when_deserializing():
with pytest.raises(TypeError):
slu.from_dict(2)
def test_transforms_not_in_registry_and_in_when_initialized():
with pytest.raises(ValueError):
slu.from_dict({"stream": {"transforms": [{"mycrop": {"crop_to": 32, "crop_mode": "c"}},]}})
assert "mycrop" not in slc.BaseTransform.registry
class MyCrop(slc.BaseTransform):
serializable_name = "mycrop"
assert "mycrop" in slc.BaseTransform.registry
def test_to_from_yaml_json():
ppl = slc.Stream(
[
slt.Rotate(angle_range=(-106, 90), p=0.7, interpolation="nearest"),
slt.Rotate(angle_range=(-106, 91), p=0.8, interpolation="nearest"),
slt.Rotate(angle_range=(-106, 92), p=0.2, interpolation="bilinear"),
slt.Projection(
slc.Stream([slt.Rotate(angle_range=(-6, 90), p=0.2, padding="r", interpolation="nearest"),])
),
]
)
serialized = {"stream": ppl.to_dict()}
with open("/tmp/solt_tmp_json0.json", "w") as f:
json_s = json.dumps(serialized, indent=4)
f.write(json_s)
with open("/tmp/solt_tmp_yaml0.yaml", "w") as f:
yaml_s = yaml.safe_dump(serialized)
f.write(yaml_s)
ppl.to_json("/tmp/solt_tmp_json1.json")
ppl.to_yaml("/tmp/solt_tmp_yaml1.yaml")
loaded_from_json = slu.from_json("/tmp/solt_tmp_json1.json")
loaded_from_yaml = slu.from_yaml("/tmp/solt_tmp_yaml1.yaml")
assert json_s == loaded_from_json.to_json()
assert yaml_s == loaded_from_yaml.to_yaml()
loaded_from_json = slu.from_json(pathlib.Path("/tmp/solt_tmp_json1.json"))
loaded_from_yaml = slu.from_yaml(pathlib.Path("/tmp/solt_tmp_yaml1.yaml"))
assert json_s == slu.from_yaml(loaded_from_json.to_yaml()).to_json()
assert yaml_s == slu.from_json(loaded_from_yaml.to_json()).to_yaml()
def test_complex_transform_serialization():
stream = slc.Stream(
[
slt.Flip(axis=1, p=0.5),
slc.SelectiveStream(
[
slt.Rotate(angle_range=(-45, -45), p=1, padding="r"),
slt.Rotate90(1, p=1),
slt.Rotate(angle_range=(45, 45), p=1, padding="r"),
]
),
slt.Crop((350, 350)),
slc.SelectiveStream(
[slt.GammaCorrection(gamma_range=0.5, p=1), slt.Noise(gain_range=0.1, p=1), slt.Blur()], n=3
),
slt.Projection(
affine_transforms=slc.Stream(
[
slt.Rotate(angle_range=(-45, 45), p=1),
slt.Scale(range_x=(0.8, 1.5), range_y=(0.8, 1.5), p=1, same=False),
]
),
v_range=(1e-4, 1e-3),
p=1,
),
slc.SelectiveStream(
[
slt.CutOut(40, p=1),
slt.CutOut(30, p=1),
slt.CutOut(20, p=1),
slt.CutOut(40, p=1),
slc.Stream(),
slc.Stream(),
slc.Stream(),
],
n=3,
),
]
)
assert slu.from_yaml(stream.to_yaml()).to_yaml() == slu.from_yaml(stream.to_yaml()).to_yaml()
| 36.121528 | 118 | 0.465731 |
acea7250670f4591f02276ace94293e5f83fc34a | 4,225 | py | Python | benchmark/startQiskit3036.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit3036.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit3036.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=42
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=39
prog.x(input_qubit[3]) # number=40
prog.cx(input_qubit[0],input_qubit[3]) # number=41
prog.h(input_qubit[3]) # number=32
prog.cz(input_qubit[0],input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=34
prog.rx(-1.928937889304133,input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[3]) # number=36
prog.cz(input_qubit[2],input_qubit[3]) # number=37
prog.h(input_qubit[3]) # number=38
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=24
prog.cz(input_qubit[3],input_qubit[2]) # number=25
prog.h(input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[0],input_qubit[2]) # number=29
prog.x(input_qubit[2]) # number=30
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[1]) # number=20
prog.x(input_qubit[1]) # number=21
prog.x(input_qubit[3]) # number=27
prog.x(input_qubit[3]) # number=28
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit3036.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.631148 | 140 | 0.649941 |
acea72642b6df960302ad791f26a4dab779e8d4c | 2,267 | py | Python | neural_structural_optimization/topo_api.py | yjxkwp/neural-structural-optimization | 34b06e2c7c52f06a909c1a413f254ca2ed03bf44 | [
"Apache-2.0"
] | 103 | 2019-10-20T02:50:43.000Z | 2022-01-17T20:13:29.000Z | neural_structural_optimization/topo_api.py | vkirilenko/neural-structural-optimization | 1c11b8c6ef50274802a84cf1a244735c3ed9394d | [
"Apache-2.0"
] | 1 | 2022-01-16T13:57:09.000Z | 2022-01-16T13:57:09.000Z | neural_structural_optimization/topo_api.py | isabella232/neural-structural-optimization | 1c11b8c6ef50274802a84cf1a244735c3ed9394d | [
"Apache-2.0"
] | 26 | 2019-10-24T01:55:43.000Z | 2022-03-22T09:51:43.000Z | # lint as python3
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
import autograd.numpy as np
from neural_structural_optimization import topo_physics
def specified_task(problem):
"""Given a problem, return parameters for running a topology optimization."""
fixdofs = np.flatnonzero(problem.normals.ravel())
alldofs = np.arange(2 * (problem.width + 1) * (problem.height + 1))
freedofs = np.sort(list(set(alldofs) - set(fixdofs)))
params = {
# material properties
'young': 1,
'young_min': 1e-9,
'poisson': 0.3,
'g': 0,
# constraints
'volfrac': problem.density,
'xmin': 0.001,
'xmax': 1.0,
# input parameters
'nelx': problem.width,
'nely': problem.height,
'mask': problem.mask,
'freedofs': freedofs,
'fixdofs': fixdofs,
'forces': problem.forces.ravel(),
'penal': 3.0,
'filter_width': 2,
}
return params
class Environment:
def __init__(self, args):
self.args = args
self.ke = topo_physics.get_stiffness_matrix(
self.args['young'], self.args['poisson'])
def reshape(self, params):
return params.reshape(self.args['nely'], self.args['nelx'])
def render(self, params, volume_contraint=True):
return topo_physics.physical_density(
self.reshape(params), self.args, volume_contraint=volume_contraint,
)
def objective(self, params, volume_contraint=False):
return topo_physics.objective(
self.reshape(params), self.ke, self.args,
volume_contraint=volume_contraint,
)
def constraint(self, params):
volume = topo_physics.mean_density(self.reshape(params), self.args)
return volume - self.args['volfrac']
| 30.226667 | 79 | 0.684605 |
acea72b2e7de082028641498cab5074d90c80541 | 538 | py | Python | address_book/user/views.py | battila7/address-book | 28eac578cd140bd11ff19e3bb1c4446a78c0e997 | [
"MIT"
] | 2 | 2022-02-14T08:39:28.000Z | 2022-02-14T23:20:06.000Z | address_book/user/views.py | battila7/address-book | 28eac578cd140bd11ff19e3bb1c4446a78c0e997 | [
"MIT"
] | null | null | null | address_book/user/views.py | battila7/address-book | 28eac578cd140bd11ff19e3bb1c4446a78c0e997 | [
"MIT"
] | null | null | null | from rest_framework import views as rest_views, response, permissions
from drf_yasg.utils import swagger_auto_schema
from .serializers import UserSerializer
class UserSelfDetail(rest_views.APIView):
permission_classes = [permissions.IsAuthenticated]
@swagger_auto_schema(responses={200: UserSerializer})
def get(self, request):
"""Get Current User
Retrieves the currently authenticated user.
"""
serializer = UserSerializer(request.user)
return response.Response(serializer.data)
| 28.315789 | 69 | 0.747212 |
acea72deab81aa1546a108720840e372dcbb09b5 | 910 | py | Python | setup.py | M-Ahadi/yahoo_weather | f494c38c38fd664a8ffc6440180775db135ba25a | [
"Apache-2.0"
] | 7 | 2019-01-29T00:59:20.000Z | 2020-08-25T22:41:00.000Z | setup.py | M-Ahadi/yahoo_weather | f494c38c38fd664a8ffc6440180775db135ba25a | [
"Apache-2.0"
] | 3 | 2019-03-19T18:21:37.000Z | 2022-02-11T16:25:02.000Z | setup.py | M-Ahadi/yahoo_weather | f494c38c38fd664a8ffc6440180775db135ba25a | [
"Apache-2.0"
] | 2 | 2019-04-03T20:45:46.000Z | 2020-10-01T10:56:47.000Z | import os
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
requires = [
'oauth2 == 1.9.0.post1',
'requests == 2.23.0',
]
about = {}
with open(os.path.join(here, 'yahoo_weather', '__version__.py'), mode='rt', encoding='utf-8') as f:
exec(f.read(), about)
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
author=about['__author__'],
author_email=about['__author_email__'],
license=about['__license__'],
long_description=open("README.md").read(),
long_description_content_type='text/markdown',
url=about['__url__'],
install_requires=requires,
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 26 | 99 | 0.643956 |
acea72e18eab521c6c4f49a6f98dbf0c16ddb372 | 1,247 | py | Python | elasticdl/python/worker/prediction_outputs_processor.py | QiJune/elasticdl | 6b01f5b32fd757badff96ed652662bd94afe9263 | [
"MIT"
] | null | null | null | elasticdl/python/worker/prediction_outputs_processor.py | QiJune/elasticdl | 6b01f5b32fd757badff96ed652662bd94afe9263 | [
"MIT"
] | null | null | null | elasticdl/python/worker/prediction_outputs_processor.py | QiJune/elasticdl | 6b01f5b32fd757badff96ed652662bd94afe9263 | [
"MIT"
] | null | null | null | # Copyright 2020 The ElasticDL Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
class BasePredictionOutputsProcessor(ABC):
"""
This is the base processor for prediction outputs.
Users need to implement the abstract methods in order
to process the prediction outputs.
"""
@abstractmethod
def process(self, predictions, worker_id):
"""
The method that uses to process the prediction outputs produced
from a single worker.
Arguments:
predictions: The raw prediction outputs from the model.
worker_id: The ID of the worker that produces this
batch of predictions.
"""
pass
| 34.638889 | 74 | 0.710505 |
acea72e47c14de119af94bf7966faccbb95852c7 | 605 | py | Python | python/test/misc/test_args.py | takashiharano/util | 0f730475386a77415545de3f9763e5bdeaab0e94 | [
"MIT"
] | null | null | null | python/test/misc/test_args.py | takashiharano/util | 0f730475386a77415545de3f9763e5bdeaab0e94 | [
"MIT"
] | null | null | null | python/test/misc/test_args.py | takashiharano/util | 0f730475386a77415545de3f9763e5bdeaab0e94 | [
"MIT"
] | null | null | null | #!python
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import util
def test():
ret = ''
ret += 'get_args() = ' + str(util.get_args()) + '\n'
ret += '\n'
ret += 'get_args_len() = ' + str(util.get_args_len()) + '\n'
ret += '\n'
ret += 'get_arg(0) = ' + util.get_arg(0) + '\n'
ret += 'get_arg(1) = ' + util.get_arg(1) + '\n'
ret += 'get_arg(2) = ' + util.get_arg(2) + '\n'
ret += 'get_arg(3, \'-\') = ' + util.get_arg(3, '-') + '\n'
return ret
def main():
try:
ret = test()
except Exception as e:
ret = str(e)
print(ret)
main()
| 20.862069 | 65 | 0.522314 |
acea747ba849006f2ceefe13efc4c755e7a2292d | 11,117 | py | Python | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ncs1k_mxp_cfg.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ncs1k_mxp_cfg.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ncs1k_mxp_cfg.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """ Cisco_IOS_XR_ncs1k_mxp_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ncs1k\-mxp package configuration.
This module contains definitions
for the following management objects\:
hardware\-module\: NCS1k HW module config
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class ClientDataRate(Enum):
"""
ClientDataRate (Enum Class)
Client data rate
.. data:: ten_gig = 1
TenGig
.. data:: forty_gig = 2
FortyGig
.. data:: hundred_gig = 3
HundredGig
.. data:: ten_and_hundred_gig = 4
TenAndHundredGig
"""
ten_gig = Enum.YLeaf(1, "ten-gig")
forty_gig = Enum.YLeaf(2, "forty-gig")
hundred_gig = Enum.YLeaf(3, "hundred-gig")
ten_and_hundred_gig = Enum.YLeaf(4, "ten-and-hundred-gig")
class Fec(Enum):
"""
Fec (Enum Class)
Fec
.. data:: sd7 = 1
SoftDecision7
.. data:: sd20 = 2
SoftDecision20
"""
sd7 = Enum.YLeaf(1, "sd7")
sd20 = Enum.YLeaf(2, "sd20")
class TrunkDataRate(Enum):
"""
TrunkDataRate (Enum Class)
Trunk data rate
.. data:: hundred_gig = 2
HundredGig
.. data:: two_hundred_gig = 3
TwoHundredGig
.. data:: two_hundred_fifty_gig = 4
TwoHundredFiftyGig
"""
hundred_gig = Enum.YLeaf(2, "hundred-gig")
two_hundred_gig = Enum.YLeaf(3, "two-hundred-gig")
two_hundred_fifty_gig = Enum.YLeaf(4, "two-hundred-fifty-gig")
class HardwareModule(Entity):
"""
NCS1k HW module config
.. attribute:: node
Node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.HardwareModule.Node>`
"""
_prefix = 'ncs1k-mxp-cfg'
_revision = '2015-11-09'
def __init__(self):
super(HardwareModule, self).__init__()
self._top_entity = None
self.yang_name = "hardware-module"
self.yang_parent_name = "Cisco-IOS-XR-ncs1k-mxp-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("node", ("node", HardwareModule.Node))])
self._leafs = OrderedDict()
self.node = YList(self)
self._segment_path = lambda: "Cisco-IOS-XR-ncs1k-mxp-cfg:hardware-module"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(HardwareModule, [], name, value)
class Node(Entity):
"""
Node
.. attribute:: location (key)
Fully qualified line card specification
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: slice
Slice to be Provisioned
**type**\: list of :py:class:`Slice <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.HardwareModule.Node.Slice>`
"""
_prefix = 'ncs1k-mxp-cfg'
_revision = '2015-11-09'
def __init__(self):
super(HardwareModule.Node, self).__init__()
self.yang_name = "node"
self.yang_parent_name = "hardware-module"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['location']
self._child_classes = OrderedDict([("slice", ("slice", HardwareModule.Node.Slice))])
self._leafs = OrderedDict([
('location', (YLeaf(YType.str, 'location'), ['str'])),
])
self.location = None
self.slice = YList(self)
self._segment_path = lambda: "node" + "[location='" + str(self.location) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-ncs1k-mxp-cfg:hardware-module/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(HardwareModule.Node, ['location'], name, value)
class Slice(Entity):
"""
Slice to be Provisioned
.. attribute:: slice_id (key)
Set Slice
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: values
Data rates & FEC
**type**\: :py:class:`Values <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.HardwareModule.Node.Slice.Values>`
.. attribute:: client_ains
AINS Soak Interval Value
**type**\: :py:class:`ClientAins <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.HardwareModule.Node.Slice.ClientAins>`
.. attribute:: lldp
Drop LLDP Packets
**type**\: bool
"""
_prefix = 'ncs1k-mxp-cfg'
_revision = '2015-11-09'
def __init__(self):
super(HardwareModule.Node.Slice, self).__init__()
self.yang_name = "slice"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['slice_id']
self._child_classes = OrderedDict([("values", ("values", HardwareModule.Node.Slice.Values)), ("client-ains", ("client_ains", HardwareModule.Node.Slice.ClientAins))])
self._leafs = OrderedDict([
('slice_id', (YLeaf(YType.str, 'slice-id'), ['str'])),
('lldp', (YLeaf(YType.boolean, 'lldp'), ['bool'])),
])
self.slice_id = None
self.lldp = None
self.values = HardwareModule.Node.Slice.Values()
self.values.parent = self
self._children_name_map["values"] = "values"
self.client_ains = HardwareModule.Node.Slice.ClientAins()
self.client_ains.parent = self
self._children_name_map["client_ains"] = "client-ains"
self._segment_path = lambda: "slice" + "[slice-id='" + str(self.slice_id) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(HardwareModule.Node.Slice, ['slice_id', 'lldp'], name, value)
class Values(Entity):
"""
Data rates & FEC
.. attribute:: client_rate
Client Rate
**type**\: :py:class:`ClientDataRate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.ClientDataRate>`
.. attribute:: trunk_rate
TrunkRate
**type**\: :py:class:`TrunkDataRate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.TrunkDataRate>`
.. attribute:: fec
FEC
**type**\: :py:class:`Fec <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.Fec>`
.. attribute:: encrypted
Encrypted
**type**\: bool
**default value**\: false
"""
_prefix = 'ncs1k-mxp-cfg'
_revision = '2015-11-09'
def __init__(self):
super(HardwareModule.Node.Slice.Values, self).__init__()
self.yang_name = "values"
self.yang_parent_name = "slice"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('client_rate', (YLeaf(YType.enumeration, 'client-rate'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg', 'ClientDataRate', '')])),
('trunk_rate', (YLeaf(YType.enumeration, 'trunk-rate'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg', 'TrunkDataRate', '')])),
('fec', (YLeaf(YType.enumeration, 'fec'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg', 'Fec', '')])),
('encrypted', (YLeaf(YType.boolean, 'encrypted'), ['bool'])),
])
self.client_rate = None
self.trunk_rate = None
self.fec = None
self.encrypted = None
self._segment_path = lambda: "values"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(HardwareModule.Node.Slice.Values, ['client_rate', 'trunk_rate', 'fec', 'encrypted'], name, value)
class ClientAins(Entity):
"""
AINS Soak Interval Value
.. attribute:: hours
Hours
**type**\: int
**range:** 0..48
**units**\: hour
.. attribute:: minutes
Minutes
**type**\: int
**range:** 0..59
**units**\: minute
"""
_prefix = 'ncs1k-mxp-cfg'
_revision = '2015-11-09'
def __init__(self):
super(HardwareModule.Node.Slice.ClientAins, self).__init__()
self.yang_name = "client-ains"
self.yang_parent_name = "slice"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('hours', (YLeaf(YType.uint32, 'hours'), ['int'])),
('minutes', (YLeaf(YType.uint32, 'minutes'), ['int'])),
])
self.hours = None
self.minutes = None
self._segment_path = lambda: "client-ains"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(HardwareModule.Node.Slice.ClientAins, ['hours', 'minutes'], name, value)
def clone_ptr(self):
self._top_entity = HardwareModule()
return self._top_entity
| 30.625344 | 181 | 0.515427 |
acea75e8d9751d3ef098b12c8b4396aafc43f884 | 3,366 | py | Python | picon.py | langara/MyBlocks | 6b2207f0ef4e2b508c093463c0b735c3a558ea60 | [
"Apache-2.0"
] | null | null | null | picon.py | langara/MyBlocks | 6b2207f0ef4e2b508c093463c0b735c3a558ea60 | [
"Apache-2.0"
] | null | null | null | picon.py | langara/MyBlocks | 6b2207f0ef4e2b508c093463c0b735c3a558ea60 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Small tool for copying android icons from material-design-icons repo to specified android gradle module.
It copies all density versions of png files to appropriate res subdirectories.
Usage:
picon.py add <category> <name> [-i <path>] [-o <path>] [-c <color>] [-s <size>]
picon.py rem <name> [-o <path>] [-c <color>] [-s <size>]
picon.py (-h | --help)
picon.py (-v | --version)
Options:
-c, --color <color> Which color version to use (black or white or all) [default: all]
-s, --size <size> Which size to use (number in dp units or 'all') [default: all]
-i, --input <path> Path where local copy of material-design-icons repo is located [default: /media/data/android_big/material-design-icons]
-o, --output <path> Path of top android module directory where icons will be copied [default: /home/marek/code/android/MyBlocks/myres]
-h, --help Show help screen.
-v, --version Show version.
Commands:
add: copy new icon from material-design-icons repo to android module
rem: remove all versions of given icon from android module
"""
VERSION='0.1.0'
try:
from docopt import docopt
except ImportError:
print 'This script needs a "docopt" module (http://docopt.org)'
raise
from shutil import copyfile
from os import remove
from os import mkdir
from os.path import join
from os.path import isdir
densities = [
"mdpi",
"hdpi",
"xhdpi",
"xxhdpi",
"xxxhdpi",
]
def add(category, name, color, size, inp, outp):
if color == "all":
add(category, name, "black", size, inp, outp)
add(category, name, "white", size, inp, outp)
return
if size == "all":
add(category, name, color, "18", inp, outp)
add(category, name, color, "24", inp, outp)
add(category, name, color, "36", inp, outp)
add(category, name, color, "48", inp, outp)
return
name = name + "_" + color + "_" + size + "dp.png"
for density in densities:
idir = join(inp, category, "drawable-" + density)
odir = join(outp, "src", "main", "res", "drawable-" + density)
if not isdir(odir):
mkdir(odir)
copyfile(join(idir, name), join(odir, name))
def rem(name, color, size, outp):
if color == "all":
rem(name, "black", size, outp)
rem(name, "white", size, outp)
return
if size == "all":
rem(name, color, "18", outp)
rem(name, color, "24", outp)
rem(name, color, "36", outp)
rem(name, color, "48", outp)
return
name = name + "_" + color + "_" + size + "dp.png"
for density in densities:
ofile = join(outp, "src", "main", "res", "drawable-" + density, name)
try:
remove(ofile)
except OSError:
print "Can not remove:", ofile
def main():
argdict = docopt(__doc__, version=VERSION)
if argdict["add"]:
add(argdict["<category>"], argdict["<name>"], argdict["--color"], argdict["--size"], argdict["--input"], argdict["--output"])
elif argdict["rem"]:
rem(argdict["<name>"], argdict["--color"], argdict["--size"], argdict["--output"])
if __name__ == '__main__':
main()
| 30.053571 | 151 | 0.572193 |
acea7613a01d83366fc47099aa20f037a8a7d3d9 | 2,484 | py | Python | tools/corenlpnertaggerservice/corenlptaggerservice.py | bepnye/brat | 28acfb2d3cce20bd4d4ff1a67690e271675841f2 | [
"CC-BY-3.0"
] | 20 | 2015-01-26T01:39:44.000Z | 2020-05-30T19:04:14.000Z | tools/corenlpnertaggerservice/corenlptaggerservice.py | bepnye/brat | 28acfb2d3cce20bd4d4ff1a67690e271675841f2 | [
"CC-BY-3.0"
] | 7 | 2015-04-11T12:57:42.000Z | 2016-04-08T13:43:44.000Z | tools/corenlpnertaggerservice/corenlptaggerservice.py | bepnye/brat | 28acfb2d3cce20bd4d4ff1a67690e271675841f2 | [
"CC-BY-3.0"
] | 13 | 2015-01-26T01:39:45.000Z | 2022-03-09T16:45:09.000Z | #!/usr/bin/env python
'''
Simple tagger service using CoreNLP.
Author: Pontus Stenetorp <pontus stenetorp se>
Version: 2012-04-18
'''
from argparse import ArgumentParser
from cgi import FieldStorage
from os.path import dirname, join as path_join
from corenlp import CoreNLPTagger
try:
from json import dumps
except ImportError:
# likely old Python; try to fall back on ujson in brat distrib
from sys import path as sys_path
sys_path.append(path_join(dirname(__file__), '../../server/lib/ujson'))
from ujson import dumps
from sys import stderr
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
### Constants
ARGPARSER = ArgumentParser(description='XXX')#XXX:
ARGPARSER.add_argument('-p', '--port', type=int, default=47111,
help='port to run the HTTP service on (default: 47111)')
TAGGER = None
#XXX: Hard-coded!
CORENLP_PATH = path_join(dirname(__file__), 'stanford-corenlp-2012-04-09')
###
class CoreNLPTaggerHandler(BaseHTTPRequestHandler):
def do_POST(self):
print >> stderr, 'Received request'
field_storage = FieldStorage(
headers=self.headers,
environ={
'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
},
fp=self.rfile)
global TAGGER
json_dic = TAGGER.tag(field_storage.value)
# Write the response
self.send_response(200)
self.send_header('Content-type', 'application/json; charset=utf-8')
self.end_headers()
self.wfile.write(dumps(json_dic))
print >> stderr, ('Generated %d annotations' % len(json_dic))
def log_message(self, format, *args):
return # Too much noise from the default implementation
def main(args):
argp = ARGPARSER.parse_args(args[1:])
print >> stderr, "WARNING: Don't use this in a production environment!"
print >> stderr, 'Starting CoreNLP process (this takes a while)...',
global TAGGER
TAGGER = CoreNLPTagger(CORENLP_PATH)
print >> stderr, 'Done!'
server_class = HTTPServer
httpd = server_class(('localhost', argp.port), CoreNLPTaggerHandler)
print >> stderr, 'CoreNLP tagger service started'
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print >> stderr, 'CoreNLP tagger service stopped'
if __name__ == '__main__':
from sys import argv
exit(main(argv))
| 29.223529 | 75 | 0.667069 |
acea76e712aa146f83708edf54485dfbf28d9f9d | 942 | py | Python | lista.py | isaberamos/Programinhas | d0bfa5099edaf05b9a5f055bf7ce5432588cdc3d | [
"MIT"
] | 1 | 2021-12-28T21:37:33.000Z | 2021-12-28T21:37:33.000Z | lista.py | isaberamos/Programinhas | d0bfa5099edaf05b9a5f055bf7ce5432588cdc3d | [
"MIT"
] | null | null | null | lista.py | isaberamos/Programinhas | d0bfa5099edaf05b9a5f055bf7ce5432588cdc3d | [
"MIT"
] | null | null | null | lista = []
for cont in range(1,6):
lista.append(int(input(f"Digite o {cont}º valor: ")))
print(f"O maior valor é {max(lista)} e está na posição {lista.index(max(lista))}")
print(f"O menor valor é {min(lista)} e está na posição {lista.index(min(lista))}")
'''Ou há um jeito mais sofisticado de resolver, porém mais longo:
listanum = []
mai = 0
men = 0
for c in range(0,5):
listanum.append(int(input("Digite um valor para a posição {c}: )))
if c == 0:
mai = men = listanum[c]
else:
if listanum[c] > mai:
mai = listanum[c]
if:
if listanum[c] < men:
men = listanum[c]
print(f"Você digitou os valores {listanum}")
print(f"O maior valor digitado foi {mai} e o menor foi {men} nas posições ")
for i, v in enumerate(listanum):
if v == mai:
print(f"{i}..., end="")
for i, v in enumerate(listanum):
if v == men:
print(f"{i}..., end="")
'''
| 28.545455 | 82 | 0.580679 |
acea77a75757972520007ddd0898ae6989b72e74 | 302 | py | Python | example_locust_request_file.py | brentgriffin/HTP-locust-request-generator | db7f2380f1159ad6dcb310bc936c1f04e7665359 | [
"MIT"
] | null | null | null | example_locust_request_file.py | brentgriffin/HTP-locust-request-generator | db7f2380f1159ad6dcb310bc936c1f04e7665359 | [
"MIT"
] | null | null | null | example_locust_request_file.py | brentgriffin/HTP-locust-request-generator | db7f2380f1159ad6dcb310bc936c1f04e7665359 | [
"MIT"
] | null | null | null | from locust import HttpLocust, TaskSet, task
class MyTaskSet(TaskSet):
@task(100)
def index(self):
self.client.get("/")
@task(10)
def about(self):
self.client.get("/about/")
class MyLocust(HttpLocust):
task_set = MyTaskSet
min_wait = 5000
max_wait = 15000 | 20.133333 | 44 | 0.63245 |
acea77f68c9d402a048e294cab6e74e0bb48ea3d | 613 | py | Python | Validation/CSCRecHits/python/cscRecHitValidation_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 13 | 2015-11-30T15:49:45.000Z | 2022-02-08T16:11:30.000Z | Validation/CSCRecHits/python/cscRecHitValidation_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 640 | 2015-02-11T18:55:47.000Z | 2022-03-31T14:12:23.000Z | Validation/CSCRecHits/python/cscRecHitValidation_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 51 | 2015-08-11T21:01:40.000Z | 2022-03-30T07:31:34.000Z | import FWCore.ParameterSet.Config as cms
from Validation.CSCRecHits.cscRecHitPSet import *
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
cscRecHitValidation = DQMEDAnalyzer(
'CSCRecHitValidation',
cscRecHitPSet,
doSim = cms.bool(True),
useGEMs = cms.bool(False),
simHitsTag = cms.InputTag("mix","g4SimHitsMuonCSCHits")
)
from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM
run3_GEM.toModify(cscRecHitValidation, useGEMs = True)
from Configuration.Eras.Modifier_fastSim_cff import fastSim
fastSim.toModify(cscRecHitValidation, simHitsTag = "mix:MuonSimHitsMuonCSCHits")
| 34.055556 | 80 | 0.810767 |
acea791980db2bef684dcd7cc23d1d3ddebc9a31 | 1,263 | py | Python | tensorflow/sagemakercv/utils/paths_catalog.py | aws-samples/amazon-sagemaker-cv | 0390660c6aae3880e18539af21047c078bfd0e48 | [
"Apache-2.0",
"BSD-2-Clause-FreeBSD"
] | 16 | 2021-12-14T01:35:16.000Z | 2022-03-10T19:31:48.000Z | tensorflow/sagemakercv/utils/paths_catalog.py | aws-samples/amazon-sagemaker-cv | 0390660c6aae3880e18539af21047c078bfd0e48 | [
"Apache-2.0",
"BSD-2-Clause-FreeBSD"
] | null | null | null | tensorflow/sagemakercv/utils/paths_catalog.py | aws-samples/amazon-sagemaker-cv | 0390660c6aae3880e18539af21047c078bfd0e48 | [
"Apache-2.0",
"BSD-2-Clause-FreeBSD"
] | 1 | 2022-03-14T18:45:55.000Z | 2022-03-14T18:45:55.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Facebook, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Centralized catalog of paths."""
import os
class DatasetCatalog(object):
DATA_DIR = os.path.expanduser("~/data")
DATASETS = {
"coco_2017_train": {
"img_file_pattern": "coco/train/train*",
"ann_file": "coco/annotations/instances_train2017.json"
},
"coco_2017_val": {
"img_file_pattern": "coco/val/val*",
"ann_file": "coco/annotations/instances_val2017.json"
},
}
WEIGHTS = os.path.join(DATA_DIR, 'weights/tf/resnet/resnet-nhwc-2018-02-07/model.ckpt-112603')
OUTPUT_DIR = "/home/ubuntu/models"
| 33.236842 | 98 | 0.673001 |
acea7934a942f308e7e98d6139d2aaeae4d37ab3 | 2,677 | py | Python | finalProject.py | Sanjai1112/Magnificent-Maniacs | 43633f1f0777030e0628ca6006187a58319aa44f | [
"MIT"
] | 1 | 2019-02-02T14:45:00.000Z | 2019-02-02T14:45:00.000Z | finalProject.py | Sanjai1112/Magnificent-Maniacs | 43633f1f0777030e0628ca6006187a58319aa44f | [
"MIT"
] | null | null | null | finalProject.py | Sanjai1112/Magnificent-Maniacs | 43633f1f0777030e0628ca6006187a58319aa44f | [
"MIT"
] | 1 | 2018-08-17T03:50:59.000Z | 2018-08-17T03:50:59.000Z | from gtts import gTTS
import speech_recognition as sr
import pygame
import time
import MySQLdb
db=MySQLdb.connect('localhost','dinesh','Dinesh@1997','pythonproject')
a = db.cursor()
def function():
tts=gTTS(text="What problem do you have",lang="en")
tts.save("question.mp3")
pygame.mixer.music.load("question.mp3")
pygame.mixer.music.play()
time.sleep(4)
print("Listening your disease")
with m as source:
audio=r.listen(source)
try:
disease=r.recognize_google(audio)
# diseaseList.append(disease)
value = "you have "+disease
tts=gTTS(text=value,lang="en")
tts.save("disease.mp3")
pygame.mixer.music.load("disease.mp3")
pygame.mixer.music.play()
time.sleep(5)
return disease
except LookupError:
print("Oops! Didn't catch that")
except sr.RequestError as e:
print("Uh oh! Couldn't request results from Google Speech Recognition service; {0}".format(e))
except sr.UnknownValueError:
print("No data given")
pass
# nameList=[]
# diseaseList=[]
myDiseaseDict={'fever':'you can go to ward 1','hepatitis':'you can go to ward 2','fracture':'you can go to ward 3','tuberclosis':'you can go to ward 4'}
pygame.init()
r=sr.Recognizer()
m=sr.Microphone()
tts=gTTS(text="Hi Hello,i am your assistant,Tell me What is your name",lang="en")
tts.save("welcome.mp3")
pygame.mixer.music.load("welcome.mp3")
pygame.mixer.music.play()
time.sleep(4)
print("Listening your name")
while True:
with m as source:
audio=r.listen(source)
try:
name=r.recognize_google(audio)
# nameList.append(name)
value = "your name is "+name
tts=gTTS(text=value,lang="en")
tts.save("name.mp3")
pygame.mixer.music.load("name.mp3")
pygame.mixer.music.play()
time.sleep(5)
disease = function()
place=myDiseaseDict[disease]
tts=gTTS(text=place,lang="en")
tts.save("place.mp3")
pygame.mixer.music.load("place.mp3")
pygame.mixer.music.play()
time.sleep(4)
try:
sql = "INSERT INTO mytable (name, disease) VALUES (%s, %s)"
val=(name,disease)
a.execute (sql,val)
db.commit()
except:
db.rollback()
except LookupError:
print("Oops! Didn't catch that")
except sr.RequestError as e:
print("Uh oh! Couldn't request results from Google Speech Recognition service; {0}".format(e))
break
except sr.UnknownValueError:
print("No data given")
break
a.execute("""select * from mytable;""")
print(a.fetchall())
db.close()
| 30.420455 | 152 | 0.625327 |
acea79d0bf637e398a415ba45f360832d17fb7da | 559 | py | Python | leetcode/1414_find_the_minimum_number_of_fibonacci_numbers_whose_sum_is_k.py | jacquerie/leetcode | a05e6b832eb0e0740aaff7b2eb3109038ad404bf | [
"MIT"
] | 3 | 2018-05-10T09:56:49.000Z | 2020-11-07T18:09:42.000Z | leetcode/1414_find_the_minimum_number_of_fibonacci_numbers_whose_sum_is_k.py | jacquerie/leetcode | a05e6b832eb0e0740aaff7b2eb3109038ad404bf | [
"MIT"
] | null | null | null | leetcode/1414_find_the_minimum_number_of_fibonacci_numbers_whose_sum_is_k.py | jacquerie/leetcode | a05e6b832eb0e0740aaff7b2eb3109038ad404bf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
class Solution:
def findMinFibonacciNumbers(self, k: int) -> int:
if k == 0:
return 0
elif k == 1:
return 1
elif k == 2:
return 1
a, b = 1, 1
while b <= k:
a, b = b, a + b
return 1 + self.findMinFibonacciNumbers(k - a)
if __name__ == '__main__':
solution = Solution()
assert 2 == solution.findMinFibonacciNumbers(7)
assert 2 == solution.findMinFibonacciNumbers(10)
assert 3 == solution.findMinFibonacciNumbers(19)
| 23.291667 | 54 | 0.54025 |
acea7ad85b0695e89284bebc3b8cd18dcb71f0e5 | 1,245 | py | Python | src/robotide/widgets/font.py | veryl-technologies/t24-tests-ide | 16cd803895916a785c0e1fec3f71f9388c21edc9 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-06-27T08:48:24.000Z | 2019-06-27T08:48:24.000Z | src/robotide/widgets/font.py | veryl-technologies/t24-tests-ide | 16cd803895916a785c0e1fec3f71f9388c21edc9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robotide/widgets/font.py | veryl-technologies/t24-tests-ide | 16cd803895916a785c0e1fec3f71f9388c21edc9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
class Font(object):
help = property(lambda self: self._get_font(scale=-2))
fixed = property(lambda self: self._get_font(family=wx.FONTFAMILY_MODERN))
fixed_log = property(lambda self:
self._get_font(scale=-2, family=wx.FONTFAMILY_MODERN))
underlined = property(lambda self: self._get_font(underlined=True))
def _get_font(self, scale=0, family=wx.FONTFAMILY_DEFAULT, underlined=False):
size = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT).GetPointSize()
return wx.Font(size + scale, family, wx.FONTWEIGHT_NORMAL,
wx.FONTSTYLE_NORMAL, underline=underlined)
| 42.931034 | 81 | 0.73012 |
acea7c19dc60c27a7414d4a2c68f7a62cf0911e4 | 6,423 | py | Python | models/model.py | yuezih/graduation-design-yuezihao | 760b7f82debece11ef8353f86992339daa6ad1ad | [
"MIT"
] | null | null | null | models/model.py | yuezih/graduation-design-yuezihao | 760b7f82debece11ef8353f86992339daa6ad1ad | [
"MIT"
] | null | null | null | models/model.py | yuezih/graduation-design-yuezihao | 760b7f82debece11ef8353f86992339daa6ad1ad | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import division
import numpy as np
import json
import pdb
from tqdm import tqdm
import time
import io , sys
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import framework.configbase
import framework.modelbase
import modules.transformer
import utils.evaluation
import utils.criterion
DECODER = 'transformer'
class TransModelConfig(framework.configbase.ModelConfig):
def __init__(self):
super(TransModelConfig, self).__init__()
def load(self, cfg_file):
with open(cfg_file) as f:
data = json.load(f)
for key, value in data.items():
if key != 'subcfgs':
setattr(self, key, value)
# initialize config objects
for subname, subcfg_type in self.subcfg_types.items():
if subname == DECODER:
self.subcfgs[subname] = modules.transformer.__dict__[subcfg_type]()
self.subcfgs[subname].load_from_dict(data['subcfgs'][subname])
class TransModel(framework.modelbase.ModelBase):
def build_submods(self):
submods = {}
submods[DECODER] = modules.transformer.Transformer(self.config.subcfgs[DECODER])
return submods
def build_loss(self):
xe = utils.criterion.LabelSmoothingLoss(0.1,self.config.subcfgs[DECODER].vocab,1)
classify = nn.CrossEntropyLoss()
multilabel = utils.criterion.MultilabelCategoricalLoss()
return (xe, classify, multilabel)
def forward_loss(self, batch_data, task='mmt', step=None):
src = batch_data['src_ids'].cuda()
trg = batch_data['trg_ids'].cuda()
src_mask, trg_mask = self.create_masks(src, trg, task)
img_ft = batch_data['img_ft'].cuda()
img_len = batch_data['ft_len'].cuda()
if task == 'mlm':
img_len = img_len.fill_(0)
img_mask = self.img_mask(img_len, max_len=img_ft.size(1)).unsqueeze(1)
outputs = self.submods[DECODER](src, trg, img_ft, src_mask, trg_mask, img_mask, task=task)
if task == 'itm':
loss = self.criterion[1](outputs, batch_data['align_label'].cuda())
elif task == 'attp':
loss = self.criterion[2](outputs, batch_data['attr_label'].float().cuda())
else:
outputs = nn.LogSoftmax(dim=-1)(outputs[:,img_ft.size(1):])
output_label = batch_data['output_label'].cuda()
ys = output_label.contiguous().view(-1)
norm = output_label.ne(1).sum().item()
loss = self.criterion[0](outputs.view(-1, outputs.size(-1)), ys, norm)
return loss
def evaluate(self, tst_reader):
pred_sents, ref_sents = [], []
attr_pred, attr_label = [], []
score = {}
n_correct, n_word = 0, 0
for task in tst_reader:
cur_reader = tst_reader[task]
for batch_data in tqdm(cur_reader):
src = batch_data['src_ids'].cuda()
trg = batch_data['trg_ids'].cuda()
src_mask, trg_mask = self.create_masks(src, trg, task)
img_ft = batch_data['img_ft'].cuda()
img_len = batch_data['ft_len'].cuda()
if task == 'mlm':
img_len = img_len.fill_(0)
img_mask = self.img_mask(img_len, max_len=img_ft.size(1)).unsqueeze(1)
if task == 'mmt':
if self.submods[DECODER].config.decoding == 'greedy':
output = self.submods[DECODER].sample(src, img_ft, src_mask, img_mask)
else:
output = self.submods[DECODER].beam_search(src, img_ft, src_mask, img_mask)
translations = cur_reader.dataset.int2sent(output.detach())
ref_sents.extend(batch_data['ref_sents'])
pred_sents.extend(translations)
elif task == 'itm':
target = batch_data['align_label'].cuda()
output = self.submods[DECODER](src, trg, img_ft, src_mask, trg_mask, img_mask, task=task)
pred = output.max(1, keepdim=True)[1]
n_correct += float(pred.eq(target.view_as(pred)).cpu().float().sum())
n_word += output.size(0)
elif task == 'attp':
output = self.submods[DECODER](src, trg, img_ft, src_mask, trg_mask, img_mask, task=task)
attr_pred.extend(output.detach().cpu().numpy())
attr_label.extend(batch_data['attr_label'].detach().numpy())
else:
output_label = batch_data['output_label'].cuda()
output = self.submods[DECODER](src, trg, img_ft, src_mask, trg_mask, img_mask, task=task)[:,img_ft.size(1):]
output = output[output_label != 1]
output_label = output_label[output_label != 1]
n_correct += (output.max(dim=-1)[1] == output_label).sum().item()
n_word += output_label.numel()
if task == 'mmt':
score.update(utils.evaluation.compute(pred_sents, ref_sents))
elif task == 'attp':
r_1, r_5, p_1, p_5 = utils.evaluation.compute_multilabel(np.array(attr_pred), np.array(attr_label))
score.update({task+'_r@1':r_1, task+'_r@5':r_5, task+'_p@1':p_1, task+'_p@5':p_5})
else:
score.update({task+'_avg_acc':n_correct/n_word})
return score, pred_sents
def validate(self, val_reader):
self.eval_start()
metrics, _ = self.evaluate(val_reader)
return metrics
def test(self, tst_reader, tst_pred_file, tst_model_file=None):
if tst_model_file is not None:
self.load_checkpoint(tst_model_file)
self.eval_start()
metrics, pred_data = self.evaluate(tst_reader)
with open(tst_pred_file, 'w') as f:
json.dump(pred_data, f)
return metrics
def nopeak_mask(self, size):
np_mask = np.triu(np.ones((1, size, size)), k=1).astype('uint8')
np_mask = Variable(torch.from_numpy(np_mask) == 0).cuda()
return np_mask
def create_masks(self, src, trg=None, task='mmt'):
src_mask = (src != 1).unsqueeze(-2) # 1 is src_pad, trg_pad
if trg is not None:
trg_mask = (trg != 1).unsqueeze(-2)
if task == 'mmt':
size = trg.size(1) # get seq_len for matrix
np_mask = self.nopeak_mask(size)
trg_mask = trg_mask & np_mask
else:
trg_mask = None
return src_mask, trg_mask
def img_mask(self, lengths, max_len=None):
''' Creates a boolean mask from sequence lengths.
lengths: LongTensor, (batch, )
'''
batch_size = lengths.size(0)
max_len = max_len or lengths.max()
return ~(torch.arange(0, max_len)
.type_as(lengths)
.repeat(batch_size, 1)
.ge(lengths.unsqueeze(1)))
| 38.005917 | 118 | 0.653744 |
acea7c8cdf4698bbfebe18222500b5499dd77443 | 659 | py | Python | fp17/utils.py | openhealthcare/python-fp17 | 61a665d90b04dc5e94d433dc7fbb16b901c70e7b | [
"BSD-3-Clause"
] | 1 | 2021-02-23T05:48:01.000Z | 2021-02-23T05:48:01.000Z | fp17/utils.py | openhealthcare/python-fp17 | 61a665d90b04dc5e94d433dc7fbb16b901c70e7b | [
"BSD-3-Clause"
] | 9 | 2018-03-20T15:53:44.000Z | 2018-07-09T10:56:18.000Z | fp17/utils.py | openhealthcare/python-fp17 | 61a665d90b04dc5e94d433dc7fbb16b901c70e7b | [
"BSD-3-Clause"
] | 2 | 2018-03-22T14:24:41.000Z | 2021-02-23T05:48:02.000Z | def min_digits(x):
"""
Return the minimum integer that has at least ``x`` digits:
>>> min_digits(0)
0
>>> min_digits(4)
1000
"""
if x <= 0:
return 0
return 10 ** (x - 1)
def max_digits(x):
"""
Return the maximum integer that has at most ``x`` digits:
>>> max_digits(4)
9999
>>> max_digits(0)
0
"""
return (10 ** x) - 1
def strbool(x):
"""
Return an string representation of the specified boolean for an XML
document.
>>> strbool(False)
'0'
>>> strbool(True)
'1'
"""
return '1' if x else '0'
| 16.897436 | 71 | 0.479514 |
acea7e4e8fd68a801df60578dec9060ae3ce4e8e | 2,968 | bzl | Python | dotnet/private/merge_assemblies.bzl | whimboo/selenium | 59e38770204e37a1a421ff7e7e2151ac136f0002 | [
"Apache-2.0"
] | null | null | null | dotnet/private/merge_assemblies.bzl | whimboo/selenium | 59e38770204e37a1a421ff7e7e2151ac136f0002 | [
"Apache-2.0"
] | null | null | null | dotnet/private/merge_assemblies.bzl | whimboo/selenium | 59e38770204e37a1a421ff7e7e2151ac136f0002 | [
"Apache-2.0"
] | 1 | 2020-11-13T19:50:17.000Z | 2020-11-13T19:50:17.000Z | load(
"@d2l_rules_csharp//csharp/private:common.bzl",
"collect_transitive_info",
"fill_in_missing_frameworks",
)
load(
"@d2l_rules_csharp//csharp/private:providers.bzl",
"CSharpAssemblyInfo",
)
def _merged_assembly_impl(ctx):
providers = {}
name = ctx.label.name
deps = ctx.attr.deps
target_framework = ctx.attr.target_framework
input_assembly = ctx.attr.src_assembly.files.to_list()[0]
output_file_name = ctx.attr.out
if (output_file_name == ""):
output_file_name = input_assembly.basename
output_assembly = ctx.actions.declare_file("merged/{}/{}/{}".format(name, target_framework, output_file_name))
output_pdb = ctx.actions.declare_file("merged/{}/{}/{}".format(name, target_framework, input_assembly.basename.replace(input_assembly.extension, "pdb")))
args = [
"-v4",
"-xmldocs",
"-internalize",
]
if ctx.attr.keyfile != None:
key_path = ctx.expand_location(ctx.attr.keyfile.files.to_list()[0].path)
args.append("-keyfile:{}".format(key_path))
args.append("-out={}".format(output_assembly.path))
args.append(input_assembly.path)
(refs, runfiles, native_dlls) = collect_transitive_info(deps, target_framework)
for ref in refs.to_list():
args.append(ref.path)
ctx.actions.run(
executable = ctx.executable.merge_tool,
progress_message = "Merging assembiles into {}".format(output_assembly.path),
arguments = args,
inputs = ctx.attr.src_assembly.files,
outputs = [output_assembly, output_pdb],
)
runfiles = ctx.runfiles(
files = [output_pdb]
)
for dep in ctx.files.deps:
runfiles = runfiles.merge(dep[DefaultInfo].default_runfiles)
providers[target_framework] = CSharpAssemblyInfo[target_framework](
out = output_assembly,
refout = None,
pdb = output_pdb,
native_dlls = native_dlls,
deps = deps,
transitive_refs = refs,
transitive_runfiles = depset([]),
actual_tfm = target_framework,
runtimeconfig = None,
)
fill_in_missing_frameworks(providers)
returned_info = providers.values()
returned_info.append(
DefaultInfo(
files = depset([output_assembly]),
runfiles = runfiles,
),
)
return returned_info
merged_assembly = rule(
implementation = _merged_assembly_impl,
attrs = {
"src_assembly": attr.label(),
"deps": attr.label_list(),
"out": attr.string(default = ""),
"keyfile": attr.label(allow_single_file = True),
"target_framework": attr.string(mandatory = True),
"merge_tool": attr.label(
executable = True,
cfg = "host",
default = Label("//third_party/dotnet/ilmerge:ilmerge.exe"),
allow_single_file = True,
),
},
toolchains = ["//third_party/dotnet/ilmerge:toolchain_type"],
)
| 31.242105 | 157 | 0.640162 |
acea7efe2b85bf5b99dcc5367aea53d661e22b0c | 18,433 | py | Python | model_zoo/official/cv/faster_rcnn/src/dataset.py | ATestGroup233/mindspore | 5d81221b5896cf7d7c6adb44daef28d92cb43352 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | model_zoo/official/cv/faster_rcnn/src/dataset.py | peixinhou/mindspore | fcb2ec2779b753e95c762cf292b23bd81d1f561b | [
"Apache-2.0"
] | 1 | 2022-01-20T03:11:05.000Z | 2022-01-20T06:53:39.000Z | model_zoo/official/cv/faster_rcnn/src/dataset.py | peixinhou/mindspore | fcb2ec2779b753e95c762cf292b23bd81d1f561b | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FasterRcnn dataset"""
from __future__ import division
import os
import numpy as np
from numpy import random
import cv2
import mmcv
import mindspore.dataset as de
import mindspore.dataset.vision.c_transforms as C
from mindspore.mindrecord import FileWriter
from src.config import config
def bbox_overlaps(bboxes1, bboxes2, mode='iou'):
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
Args:
bboxes1(ndarray): shape (n, 4)
bboxes2(ndarray): shape (k, 4)
mode(str): iou (intersection over union) or iof (intersection
over foreground)
Returns:
ious(ndarray): shape (n, k)
"""
assert mode in ['iou', 'iof']
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (bboxes1[:, 3] - bboxes1[:, 1] + 1)
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (bboxes2[:, 3] - bboxes2[:, 1] + 1)
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(
y_end - y_start + 1, 0)
if mode == 'iou':
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious
class PhotoMetricDistortion:
"""Photo Metric Distortion"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def __call__(self, img, boxes, labels):
# random brightness
img = img.astype('float32')
if random.randint(2):
delta = random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = mmcv.bgr2hsv(img)
# random saturation
if random.randint(2):
img[..., 1] *= random.uniform(self.saturation_lower,
self.saturation_upper)
# random hue
if random.randint(2):
img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = mmcv.hsv2bgr(img)
# random contrast
if mode == 0:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# randomly swap channels
if random.randint(2):
img = img[..., random.permutation(3)]
return img, boxes, labels
class Expand:
"""expand image"""
def __init__(self, mean=(0, 0, 0), to_rgb=True, ratio_range=(1, 4)):
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
def __call__(self, img, boxes, labels):
if random.randint(2):
return img, boxes, labels
h, w, c = img.shape
ratio = random.uniform(self.min_ratio, self.max_ratio)
expand_img = np.full((int(h * ratio), int(w * ratio), c),
self.mean).astype(img.dtype)
left = int(random.uniform(0, w * ratio - w))
top = int(random.uniform(0, h * ratio - h))
expand_img[top:top + h, left:left + w] = img
img = expand_img
boxes += np.tile((left, top), 2)
return img, boxes, labels
def rescale_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""rescale operation for image"""
img_data, scale_factor = mmcv.imrescale(img, (config.img_width, config.img_height), return_scale=True)
if img_data.shape[0] > config.img_height:
img_data, scale_factor2 = mmcv.imrescale(img_data, (config.img_height, config.img_height), return_scale=True)
scale_factor = scale_factor*scale_factor2
gt_bboxes = gt_bboxes * scale_factor
gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_data.shape[1] - 1)
gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_data.shape[0] - 1)
pad_h = config.img_height - img_data.shape[0]
pad_w = config.img_width - img_data.shape[1]
assert ((pad_h >= 0) and (pad_w >= 0))
pad_img_data = np.zeros((config.img_height, config.img_width, 3)).astype(img_data.dtype)
pad_img_data[0:img_data.shape[0], 0:img_data.shape[1], :] = img_data
img_shape = (config.img_height, config.img_width, 1.0)
img_shape = np.asarray(img_shape, dtype=np.float32)
return (pad_img_data, img_shape, gt_bboxes, gt_label, gt_num)
def rescale_column_test(img, img_shape, gt_bboxes, gt_label, gt_num):
"""rescale operation for image of eval"""
img_data, scale_factor = mmcv.imrescale(img, (config.img_width, config.img_height), return_scale=True)
if img_data.shape[0] > config.img_height:
img_data, scale_factor2 = mmcv.imrescale(img_data, (config.img_height, config.img_height), return_scale=True)
scale_factor = scale_factor*scale_factor2
pad_h = config.img_height - img_data.shape[0]
pad_w = config.img_width - img_data.shape[1]
assert ((pad_h >= 0) and (pad_w >= 0))
pad_img_data = np.zeros((config.img_height, config.img_width, 3)).astype(img_data.dtype)
pad_img_data[0:img_data.shape[0], 0:img_data.shape[1], :] = img_data
img_shape = np.append(img_shape, (scale_factor, scale_factor))
img_shape = np.asarray(img_shape, dtype=np.float32)
return (pad_img_data, img_shape, gt_bboxes, gt_label, gt_num)
def resize_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""resize operation for image"""
img_data = img
img_data, w_scale, h_scale = mmcv.imresize(
img_data, (config.img_width, config.img_height), return_scale=True)
scale_factor = np.array(
[w_scale, h_scale, w_scale, h_scale], dtype=np.float32)
img_shape = (config.img_height, config.img_width, 1.0)
img_shape = np.asarray(img_shape, dtype=np.float32)
gt_bboxes = gt_bboxes * scale_factor
gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_shape[1] - 1)
gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_shape[0] - 1)
return (img_data, img_shape, gt_bboxes, gt_label, gt_num)
def resize_column_test(img, img_shape, gt_bboxes, gt_label, gt_num):
"""resize operation for image of eval"""
img_data = img
img_data, w_scale, h_scale = mmcv.imresize(
img_data, (config.img_width, config.img_height), return_scale=True)
scale_factor = np.array(
[w_scale, h_scale, w_scale, h_scale], dtype=np.float32)
img_shape = np.append(img_shape, (h_scale, w_scale))
img_shape = np.asarray(img_shape, dtype=np.float32)
gt_bboxes = gt_bboxes * scale_factor
gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_shape[1] - 1)
gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_shape[0] - 1)
return (img_data, img_shape, gt_bboxes, gt_label, gt_num)
def impad_to_multiple_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""impad operation for image"""
img_data = mmcv.impad(img, (config.img_height, config.img_width))
img_data = img_data.astype(np.float32)
return (img_data, img_shape, gt_bboxes, gt_label, gt_num)
def imnormalize_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""imnormalize operation for image"""
img_data = mmcv.imnormalize(img, np.array([123.675, 116.28, 103.53]), np.array([58.395, 57.12, 57.375]), True)
img_data = img_data.astype(np.float32)
return (img_data, img_shape, gt_bboxes, gt_label, gt_num)
def flip_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""flip operation for image"""
img_data = img
img_data = mmcv.imflip(img_data)
flipped = gt_bboxes.copy()
_, w, _ = img_data.shape
flipped[..., 0::4] = w - gt_bboxes[..., 2::4] - 1
flipped[..., 2::4] = w - gt_bboxes[..., 0::4] - 1
return (img_data, img_shape, flipped, gt_label, gt_num)
def transpose_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""transpose operation for image"""
img_data = img.transpose(2, 0, 1).copy()
img_data = img_data.astype(np.float32)
img_shape = img_shape.astype(np.float32)
gt_bboxes = gt_bboxes.astype(np.float32)
gt_label = gt_label.astype(np.int32)
gt_num = gt_num.astype(np.bool)
return (img_data, img_shape, gt_bboxes, gt_label, gt_num)
def photo_crop_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""photo crop operation for image"""
random_photo = PhotoMetricDistortion()
img_data, gt_bboxes, gt_label = random_photo(img, gt_bboxes, gt_label)
return (img_data, img_shape, gt_bboxes, gt_label, gt_num)
def expand_column(img, img_shape, gt_bboxes, gt_label, gt_num):
"""expand operation for image"""
expand = Expand()
img, gt_bboxes, gt_label = expand(img, gt_bboxes, gt_label)
return (img, img_shape, gt_bboxes, gt_label, gt_num)
def preprocess_fn(image, box, is_training):
"""Preprocess function for dataset."""
def _infer_data(image_bgr, image_shape, gt_box_new, gt_label_new, gt_iscrowd_new_revert):
image_shape = image_shape[:2]
input_data = image_bgr, image_shape, gt_box_new, gt_label_new, gt_iscrowd_new_revert
if config.keep_ratio:
input_data = rescale_column_test(*input_data)
else:
input_data = resize_column_test(*input_data)
input_data = imnormalize_column(*input_data)
output_data = transpose_column(*input_data)
return output_data
def _data_aug(image, box, is_training):
"""Data augmentation function."""
image_bgr = image.copy()
image_bgr[:, :, 0] = image[:, :, 2]
image_bgr[:, :, 1] = image[:, :, 1]
image_bgr[:, :, 2] = image[:, :, 0]
image_shape = image_bgr.shape[:2]
gt_box = box[:, :4]
gt_label = box[:, 4]
gt_iscrowd = box[:, 5]
pad_max_number = 128
gt_box_new = np.pad(gt_box, ((0, pad_max_number - box.shape[0]), (0, 0)), mode="constant", constant_values=0)
gt_label_new = np.pad(gt_label, ((0, pad_max_number - box.shape[0])), mode="constant", constant_values=-1)
gt_iscrowd_new = np.pad(gt_iscrowd, ((0, pad_max_number - box.shape[0])), mode="constant", constant_values=1)
gt_iscrowd_new_revert = (~(gt_iscrowd_new.astype(np.bool))).astype(np.int32)
if not is_training:
return _infer_data(image_bgr, image_shape, gt_box_new, gt_label_new, gt_iscrowd_new_revert)
flip = (np.random.rand() < config.flip_ratio)
expand = (np.random.rand() < config.expand_ratio)
input_data = image_bgr, image_shape, gt_box_new, gt_label_new, gt_iscrowd_new_revert
if expand:
input_data = expand_column(*input_data)
if config.keep_ratio:
input_data = rescale_column(*input_data)
else:
input_data = resize_column(*input_data)
input_data = imnormalize_column(*input_data)
if flip:
input_data = flip_column(*input_data)
output_data = transpose_column(*input_data)
return output_data
return _data_aug(image, box, is_training)
def create_coco_label(is_training):
"""Get image path and annotation from COCO."""
from pycocotools.coco import COCO
coco_root = config.coco_root
data_type = config.val_data_type
if is_training:
data_type = config.train_data_type
# Classes need to train or test.
train_cls = config.coco_classes
train_cls_dict = {}
for i, cls in enumerate(train_cls):
train_cls_dict[cls] = i
anno_json = os.path.join(coco_root, config.instance_set.format(data_type))
coco = COCO(anno_json)
classs_dict = {}
cat_ids = coco.loadCats(coco.getCatIds())
for cat in cat_ids:
classs_dict[cat["id"]] = cat["name"]
image_ids = coco.getImgIds()
image_files = []
image_anno_dict = {}
for img_id in image_ids:
image_info = coco.loadImgs(img_id)
file_name = image_info[0]["file_name"]
anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = coco.loadAnns(anno_ids)
image_path = os.path.join(coco_root, data_type, file_name)
annos = []
for label in anno:
bbox = label["bbox"]
class_name = classs_dict[label["category_id"]]
if class_name in train_cls:
x1, x2 = bbox[0], bbox[0] + bbox[2]
y1, y2 = bbox[1], bbox[1] + bbox[3]
annos.append([x1, y1, x2, y2] + [train_cls_dict[class_name]] + [int(label["iscrowd"])])
image_files.append(image_path)
if annos:
image_anno_dict[image_path] = np.array(annos)
else:
image_anno_dict[image_path] = np.array([0, 0, 0, 0, 0, 1])
return image_files, image_anno_dict
def anno_parser(annos_str):
"""Parse annotation from string to list."""
annos = []
for anno_str in annos_str:
anno = list(map(int, anno_str.strip().split(',')))
annos.append(anno)
return annos
def filter_valid_data(image_dir, anno_path):
"""Filter valid image file, which both in image_dir and anno_path."""
image_files = []
image_anno_dict = {}
if not os.path.isdir(image_dir):
raise RuntimeError("Path given is not valid.")
if not os.path.isfile(anno_path):
raise RuntimeError("Annotation file is not valid.")
with open(anno_path, "rb") as f:
lines = f.readlines()
for line in lines:
line_str = line.decode("utf-8").strip()
line_split = str(line_str).split(' ')
file_name = line_split[0]
image_path = os.path.join(image_dir, file_name)
if os.path.isfile(image_path):
image_anno_dict[image_path] = anno_parser(line_split[1:])
image_files.append(image_path)
return image_files, image_anno_dict
def data_to_mindrecord_byte_image(dataset="coco", is_training=True, prefix="fasterrcnn.mindrecord", file_num=8):
"""Create MindRecord file."""
mindrecord_dir = config.mindrecord_dir
mindrecord_path = os.path.join(mindrecord_dir, prefix)
writer = FileWriter(mindrecord_path, file_num)
if dataset == "coco":
image_files, image_anno_dict = create_coco_label(is_training)
else:
image_files, image_anno_dict = filter_valid_data(config.IMAGE_DIR, config.ANNO_PATH)
fasterrcnn_json = {
"image": {"type": "bytes"},
"annotation": {"type": "int32", "shape": [-1, 6]},
}
writer.add_schema(fasterrcnn_json, "fasterrcnn_json")
for image_name in image_files:
with open(image_name, 'rb') as f:
img = f.read()
annos = np.array(image_anno_dict[image_name], dtype=np.int32)
row = {"image": img, "annotation": annos}
writer.write_raw_data([row])
writer.commit()
def create_fasterrcnn_dataset(mindrecord_file, batch_size=2, device_num=1, rank_id=0, is_training=True,
num_parallel_workers=8, python_multiprocessing=False):
"""Create FasterRcnn dataset with MindDataset."""
cv2.setNumThreads(0)
de.config.set_prefetch_size(8)
ds = de.MindDataset(mindrecord_file, columns_list=["image", "annotation"], num_shards=device_num, shard_id=rank_id,
num_parallel_workers=4, shuffle=is_training)
decode = C.Decode()
ds = ds.map(input_columns=["image"], operations=decode)
compose_map_func = (lambda image, annotation: preprocess_fn(image, annotation, is_training))
if is_training:
ds = ds.map(input_columns=["image", "annotation"],
output_columns=["image", "image_shape", "box", "label", "valid_num"],
column_order=["image", "image_shape", "box", "label", "valid_num"],
operations=compose_map_func, python_multiprocessing=python_multiprocessing,
num_parallel_workers=num_parallel_workers)
ds = ds.batch(batch_size, drop_remainder=True)
else:
ds = ds.map(input_columns=["image", "annotation"],
output_columns=["image", "image_shape", "box", "label", "valid_num"],
column_order=["image", "image_shape", "box", "label", "valid_num"],
operations=compose_map_func,
num_parallel_workers=num_parallel_workers)
ds = ds.batch(batch_size, drop_remainder=True)
return ds
| 38.006186 | 119 | 0.633429 |
acea7f3eaaf62a0eccc2d77e039bcee9482612d1 | 19,372 | py | Python | current_code/couplingISC.py | dharmatarha/hypercodes | 82f9709135a40671755e430a10b980280c7b2543 | [
"MIT"
] | null | null | null | current_code/couplingISC.py | dharmatarha/hypercodes | 82f9709135a40671755e430a10b980280c7b2543 | [
"MIT"
] | null | null | null | current_code/couplingISC.py | dharmatarha/hypercodes | 82f9709135a40671755e430a10b980280c7b2543 | [
"MIT"
] | null | null | null |
import numpy as np
from numpy.random import default_rng
def random_normal_data(tr_no, voxel_no):
"""
Helper function generating random standard normal data with given TR and voxel numbers.
Output is a 2D numpy ndarray with shape (tr_no, voxel_no)
"""
mu, sigma = 0, 1
data = default_rng().normal(mu, sigma, (tr_no, voxel_no))
return data
def timeshifted_data_1d(data, shift, padding_value='zero'):
"""
Generates a matrix ("data_shifted") from an input vector ("data"),
where the columns of the matrix are "shifted" versions of the input.
The amount of shifting is from +shift" to -"shift" (that is, range(-shift, shift+1, 1)).
Specifically, column 0 will contain the input vector data[shift:],
padded with "padding_value" at the end, so that its length is
the same as the length of "data".
Column 1 then is data[shift-1:] plus padding, and so on,
until the last column contains first padding, and data[0:-shift].
Inputs
data: 2D numpy array, with shape (n, 1), where n is the length of the input data.
shift: Positive integer. Maximum number of data points to shift.
E.g., if shift = 2, the columns of the output matrix will be
created from the input vector, shifted with range(-shift, shift+1, 1).
padding_value: String, either "zero" or "mean". Value for padding the input vector when it is shifted.
"mean" corresponds to the mean of the input vector.
Output
data_shifted: 2D numpy array with shape (n, shift*2+1), where n is the length of the input data.
Each column of "data_shifted" is generated from "data" by shifting it with a value from
range(-shift, shift+1, 1).
For example, with data = np.arange(10), shift = 2 and padding_value = 'zero',
data_shifted is:
array([[2., 1., 0., 0., 0.],
[3., 2., 1., 0., 0.],
[4., 3., 2., 1., 0.],
[5., 4., 3., 2., 1.],
[6., 5., 4., 3., 2.],
[7., 6., 5., 4., 3.],
[8., 7., 6., 5., 4.],
[9., 8., 7., 6., 5.],
[0., 9., 8., 7., 6.],
[0., 0., 9., 8., 7.]])
"""
# check input data format
if data.shape[1] != 1:
raise ValueError('Input arg ''data'' should have shape (n, 1)!')
# get padding value
if padding_value == 'zero':
pad = 0
elif padding_value == 'mean':
pad = np.mean(data)
else:
raise ValueError('Input arg ''padding_value'' should be either ''zero'' or ''mean''!')
# preallocate output matrix
data_shifted = np.zeros((data.shape[0], shift*2+1))
# loop through data shifts
for i in range(-shift, shift+1, 1):
if i <= 0:
data_shifted[:, i+shift] = np.concatenate((data[-i:, 0], np.tile(pad, -i)))
else:
data_shifted[:, i+shift] = np.concatenate((np.tile(pad, i), data[0:-i, 0]))
return data_shifted
def timeshifted_data_2d(data, shift, padding_value='zero'):
"""
Same as timeshifted_data_1d but with a 2D array as input. In other words, performs shifting of multiple data
vectors (e.g. multiple voxel timeseries).
Inputs
data: 2D numpy array, with shape (n, v), where n is the length of a timeseries (e.g. TRs)
and v is the number of variables (e.g. voxels).
shift: Positive integer. Maximum number of data points to shift.
E.g., if shift = 2, the columns of the output matrix will be
created from the input vector, shifted with range(-shift, shift+1, 1).
padding_value: String, either "zero" or "mean". Value for padding the input vector when it is shifted.
"mean" corresponds to the mean of the input vector.
Output
data_shifted: 3D numpy array with shape (v, n, shift*2+1). See input arg "data" for dimensions.
Each column of "data_shifted" is generated from "data" by shifting it with a value from
range(-shift, shift+1, 1).
"""
# check input data format
if np.ndim(data) != 2:
raise ValueError('Input arg ''data'' should be 2D!')
# get padding value
if padding_value == 'zero':
pad = np.asarray([0])
elif padding_value == 'mean':
pad = np.mean(data, axis=0) # returns vector
else:
raise ValueError('Input arg ''padding_value'' should be either ''zero'' or ''mean''!')
# dimensions of data
tr_no, vox_no = data.shape
# preallocate output matrix
data_shifted = np.zeros((vox_no, tr_no, shift*2+1))
# loop through data shifts - different versions depending on the padding value
# If padding is with zero (single value):
if pad.shape[0] == 1:
for i in range(-shift, shift+1, 1):
if i <= 0:
tmp = np.concatenate((data[-i:, :], np.tile(pad, (-i, vox_no))))
else:
tmp = np.concatenate((np.tile(pad, (i, vox_no)), data[0:-i, :]))
data_shifted[:, :, i+shift] = tmp.T
# If padding is with mean (vector):
else:
for i in range(-shift, shift+1, 1):
if i <= 0:
tmp = np.concatenate((data[-i:, :], np.tile(pad, (-i, 1))))
else:
tmp = np.concatenate((np.tile(pad, (i, 1)), data[0:-i, :]))
data_shifted[:, :, i+shift] = tmp.T
return data_shifted
def get_coupled_set_1d(tr_no, beta=None, shift=2, noise_level=0):
"""
Helper function to generate an independent - dependent variable pair for testing coupling (OLS solution) methods.
For given parameters, it generates a random independent data vector ("X", "speaker" data for our fMRI
coupling use case) and a corresponding dependent data vector ("Y", "listener" data for our fMRI use case).
"Y" is calculated by applying the "beta" linear coefficients to the time-shifted versions of "X".
Additionally, (random standard normal) noise is added ("noise_level").
Inputs
tr_no: Integer, length of data vectors (number of TRs in case of fMRI data).
beta: 1D numpy array or list of values, with length "shift"*2+1.
Coefficients used for deriving the dependent variable from the independent.
Defaults to np.asarray([0.1, 0, 0.5, 2, 0]).
shift: Positive integer. Maximum number of data points to shift for the independent variable before
calculating the dependent variable.
E.g., if shift = 2, the independent data is shifted with range(-shift, shift+1, 1), then the
independent variable will be calculated as ("shifted_independent_var" @ "beta") (+ normalization).
noise_level: Positive number or zero. If not zero, a random standard normal vector ("noise") scaled by
"noise_level" is added to "Y" after it is calculated from "X" and "beta".
Outputs:
X: 1D numpy array. Independent variable data vector with shape (tr_no, 1)
Y: 1D numpy array. Dependent variable data vector with shape (tr_no, 1)
"""
# input checks
if beta is None:
beta = np.asarray([0.1, 0, 0.5, 2, 0])
else:
beta = np.asarray(beta)
if np.ndim(beta) != 1:
raise ValueError('Input arg ''beta'' should be a 1D array!')
if shift % 1 != 0 or shift <= 0:
raise ValueError('Input arg ''shift'' should be a positive integer!')
if beta.shape[0] != shift*2+1:
raise ValueError('Input arg ''beta'' should have length ''shift''*2+1!')
if noise_level < 0:
raise ValueError('Input arg ''noise_level'' should be a positive number or zero!')
# generate data
X = random_normal_data(tr_no, 1)
X_shifted = timeshifted_data_1d(X, shift, padding_value='zero')
Y = (X_shifted @ beta) / np.sum(beta)
# add noise if requested
if noise_level != 0:
n = random_normal_data(tr_no, 1) * noise_level
Y = (Y + n) / (1 + noise_level)
return X, Y
def get_coupled_set_2d(data_dims, beta=None, shift=2, noise_level=0):
"""
Same as get_random_set1d but generating multivariate sets (e.g. many voxels' worth of data
at once for our fMRI use case). Outputs "X" and "Y" ("speaker" and "listener" data in our use case) are
2D arrays with shape (tr_no, vox_no), otherwise it works the same way as the 1D version of the function.
Inputs
data_dims: Tuple of integers, dimensions of data (number of TRs and voxels in case of fMRI data).
beta: 1D numpy array or list of values, with length "shift"*2+1.
Coefficients used for deriving the dependent variable from the independent.
Defaults to np.asarray([0.1, 0, 0.5, 2, 0]).
shift: Positive integer. Maximum number of data points to shift for the independent variable before
calculating the dependent variable.
E.g., if shift = 2, the independent data is shifted with range(-shift, shift+1, 1), then the
independent variable will be calculated as ("shifted_independent_var" @ "beta") (+ normalization).
noise_level: Positive number or zero. If not zero, a random standard normal vector ("noise") scaled by
"noise_level" is added to "Y" after it is calculated from "X" and "beta".
Outputs:
X: 2D numpy array. Independent variable data vector with shape (tr_no, vox_no)
Y: 2D numpy array. Dependent variable data vector with shape (tr_no, vox_no)
"""
# input checks
if beta is None:
beta = np.asarray([0.1, 0, 0.5, 2, 0])
else:
beta = np.asarray(beta)
if np.ndim(beta) != 1:
raise ValueError('Input arg ''beta'' should be a 1D array!')
if shift % 1 != 0 or shift <= 0:
raise ValueError('Input arg ''shift'' should be a positive integer!')
if beta.shape[0] != shift*2+1:
raise ValueError('Input arg ''beta'' should have length ''shift''*2+1!')
if noise_level < 0:
raise ValueError('Input arg ''noise_level'' should be a positive number or zero!')
try:
tr_no, vox_no = data_dims
except Exception:
raise ValueError('Input arg ''data_dims'' should be a tuple of integers!')
# generate data
X = random_normal_data(tr_no, vox_no)
X_shifted = timeshifted_data_2d(X, shift, padding_value='zero')
Y = (X_shifted @ beta) / np.sum(beta)
Y = Y.T
# add noise if requested
if noise_level != 0:
n = random_normal_data(tr_no, vox_no) * noise_level
Y = (Y + n) / (1 + noise_level)
return X, Y
def coupling_loop(speaker, listener, shift, padding_value='zero'):
"""
Coupling ISC estimation. The idea is to model the listener's BOLD time series as a
linear function of the speaker's time shifted time series.
See Stephens et al., 2010 and Silbert et al., 2014 for details.
Written with fMRI data in mind.
Inputs
speaker: 2D numpy array, TRs X voxels (each column is a separate variable)
listener: 2D numpy array, TRs X voxels (each column is a separate variable)
shift: Positive integer. Maximum number of data points to shift.
E.g., if shift = 2, the columns of the all speaker timeseries
will be shifted with the values in range(-shift, shift+1, 1).
padding_value: String, either "zero" or "mean". Value for padding speaker's data when it is shifted.
"mean" corresponds to the mean of each voxel timeseries.
Outputs
beta: 2D numpy array, voxels X shifts, the linear coefficients of shifted timeseries.
residuals: 1D numpy array (column vector), voxels X 1. Sum of squared residuals
after solving the linear system in the least-squares sense.
rsquared: 1D numpy array (column vector), voxels X 1. R squared values (goodness-of-fit measure)
for the coupling solution. Describes how well the speaker time series predict
the listener time series.
"""
# input checks
if speaker.shape != listener.shape or np.ndim(speaker) != 2 or np.ndim(listener) != 2:
raise ValueError('Input args ''speaker'' and ''listener'' should have the same shape (as 2D arrays)!')
if shift % 1 != 0 or shift <= 0:
raise ValueError('Input arg ''shift'' should be a positive integer!')
# get TR and voxel numbers
tr_no, vox_no = speaker.shape
# preallocate for coefficients (beta), sum of squared residuals,
beta = np.zeros((vox_no, shift*2+1))
residuals = np.zeros((vox_no, 1))
rsquared = np.zeros((vox_no, 1))
for i in range(vox_no):
# for readability, define the data for given voxel
speaker_vox = speaker[:, i]
listener_vox = listener[:, i]
# get time-shifted model (speaker) data for given voxel
speaker_vox_shifted = timeshifted_data_1d(speaker_vox[:, np.newaxis], shift, padding_value)
# Solve for coefficients, use standard least squares method.
# An alternative way is to perform the calculation step-by-step ourselves:
# X = speaker_vox_shifted
# Y = listener_vox
# b = (np.linalg.inv(X.T @ X) @ X.T) @ Y
# We might be able to use this latter method on a stack of matrices,
# that is, avoiding the for loop.
ls_results = np.linalg.lstsq(speaker_vox_shifted, listener_vox, rcond=None)
beta[i, :] = ls_results[0]
residuals[i] = ls_results[1]
# get R squared
tmp_sstotal = np.sum(np.square((listener_vox - np.mean(listener_vox))))
rsquared[i] = 1 - (ls_results[1]/tmp_sstotal)
return beta, residuals, rsquared
def coupling_onestep(speaker, listener, shift, padding_value='zero'):
"""
Same as the function 'coupling_loop' but with a hopefully faster implementation
avoiding the for loop across voxels.
The idea is to calculate the matrix multiplication and matrix inversion steps explicitly,
as lower-level methods can be invoked on stacks of matrices directly.
Inputs
speaker: 2D numpy array, TRs X voxels (each column is a separate variable)
listener: 2D numpy array, TRs X voxels (each column is a separate variable)
shift: Positive integer. Maximum number of data points to shift.
E.g., if shift = 2, the columns of the all speaker timeseries
will be shifted with the values in range(-shift, shift+1, 1).
padding_value: String, either "zero" or "mean". Value for padding speaker's data when it is shifted.
"mean" corresponds to the mean of each voxel timeseries.
Outputs
beta: 2D numpy array, voxels X shifts, the linear coefficients of shifted timeseries.
residuals: 1D numpy array (column vector), voxels X 1. Sum of squared residuals
after solving the linear system in the least-squares sense.
rsquared: 1D numpy array (column vector), voxels X 1. R squared values (goodness-of-fit measure)
for the coupling solution. Describes how well the speaker time series predict
the listener time series.
"""
# input checks
if speaker.shape != listener.shape or np.ndim(speaker) != 2 or np.ndim(listener) != 2:
raise ValueError('Input args ''speaker'' and ''listener'' should have the same shape (as 2D arrays)!')
if shift % 1 != 0 or shift <= 0:
raise ValueError('Input arg ''shift'' should be a positive integer!')
# get shifted voxel-level time series for speaker data
speaker_shifted = timeshifted_data_2d(speaker, shift, padding_value) # returns 3D array, shape (v, tr, shifts)
# get transposed matrices for each voxel
speaker_shifted_T = np.transpose(speaker_shifted, axes=[0, 2, 1]) # 3D array, shape (v, shifts, tr)
listener_T = np.transpose(listener, axes=[1, 0])[:, :, np.newaxis] # 3D array, shape (v, tr, 1)
# the least-squares solution is given by: b = (X.T @ X)^(-1) @ X.T @ Y
beta = (np.linalg.inv(speaker_shifted_T @ speaker_shifted) @ speaker_shifted_T) @ listener_T
# get residuals
listener_modelled = speaker_shifted @ beta # matrix multiplication for each voxel, results in 3D array, shape (v, tr, 1)
residuals = np.sum(np.square(listener_T - listener_modelled), 1) # 1D array, sum of squared residuals for each voxel, shape (v, 1)
# get R squared
tmp_sstotal = np.sum(np.square((listener - np.mean(listener, 0))), 0) # total sum of squares per voxel, shape (v)
rsquared = 1 - (residuals / tmp_sstotal[:, np.newaxis]) # goodness-of-fit per voxel, shape (v, 1)
return np.squeeze(beta), residuals, rsquared
def coupling_test(data_dims, shift=2, coupling_function='loop'):
"""
Test the coupling method. Main steps:
(1) Generate random coupled data set with known coefficients, using get_coupled_set_2d
(2) Estimate coupling with 'coupling_loop' or 'coupling_onestep'
(3) Compare the results to the known coefficients
Inputs
data_dims: Tuple of integers, dimensions of both speaker and listener data
(number of TRs and voxels in case of fMRI data).
shift: Positive integer. Maximum number of data points to shift.
E.g., if shift = 2, the columns of the all speaker timeseries
will be shifted with the values in range(-shift, shift+1, 1).
coupling_function: String, either 'loop' or 'onestep'. Defines the coupling estimation function to use.
Output
beta_known: 1D numpy array, the known coefficients
beta_est: 1D numpy array, estimated coefficients
"""
# input checks
if shift % 1 != 0 or shift <= 0:
raise ValueError('Input arg ''shift'' should be a positive integer!')
try:
tr_no, vox_no = data_dims
except Exception:
raise ValueError('Input arg ''data_dims'' should be a tuple of integers!')
# generate beta vector
beta_known = default_rng().random((shift*2+1, 1))*2-1 # values in range -1 : +1
beta_known = beta_known/np.sum(beta_known)
beta_known = beta_known[:, 0]
# generate random coupled set
speaker, listener = get_coupled_set_2d(data_dims, beta=beta_known, shift=shift, noise_level=0)
# estimate coupling
if coupling_function == 'loop':
beta_est, residuals, rsquared = coupling_loop(speaker, listener, shift)
elif coupling_function == 'onestep':
beta_est = coupling_onestep(speaker, listener, shift)
# compare coupling
beta_equal = np.zeros((vox_no, 1))
for i in range(vox_no):
beta_equal[i, 0] = np.allclose(beta_known, beta_est[i, :])
# report results
if np.sum(beta_equal) == vox_no:
print('COEFFS ARE EQUAL, TEST PASSED!')
else:
print('COEFFS ARE NOT EQUAL, TEST FAILED!')
return beta_known, beta_est
| 46.455635 | 135 | 0.621516 |
acea80043678f69a92cea2bd1992b736973083a7 | 246 | py | Python | bats/__init__.py | nalzok/BATS.py | d2c6c0bbec547fe48f13a62e23d41ff040b48796 | [
"MIT"
] | 2 | 2020-10-29T19:40:54.000Z | 2021-04-13T22:00:55.000Z | bats/__init__.py | nalzok/BATS.py | d2c6c0bbec547fe48f13a62e23d41ff040b48796 | [
"MIT"
] | 3 | 2020-04-25T00:50:50.000Z | 2021-07-02T15:27:03.000Z | bats/__init__.py | nalzok/BATS.py | d2c6c0bbec547fe48f13a62e23d41ff040b48796 | [
"MIT"
] | 1 | 2021-06-28T16:23:37.000Z | 2021-06-28T16:23:37.000Z | from ._version import __version__
from .topology import *
from .dense import *
from .linalg import *
from .linalg_f2 import *
from .linalg_f3 import *
from .linalg_auto import *
from .visualization import persistence_diagram, persistence_barcode
| 27.333333 | 67 | 0.804878 |
acea8135f7f57163a438293fca1408e747495e81 | 7,000 | py | Python | classical_control_theory/dc_motor.py | andreamunafo/classical_control_theory | 5e1bef562e32fb9efcde83891cb19ce5825a6a7f | [
"Apache-2.0"
] | null | null | null | classical_control_theory/dc_motor.py | andreamunafo/classical_control_theory | 5e1bef562e32fb9efcde83891cb19ce5825a6a7f | [
"Apache-2.0"
] | null | null | null | classical_control_theory/dc_motor.py | andreamunafo/classical_control_theory | 5e1bef562e32fb9efcde83891cb19ce5825a6a7f | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: 91_DC_motor.ipynb (unless otherwise specified).
__all__ = ['wrap', 'DCMotorParams', 'DCMotor', 'PID', 'Simulator', 'AnimateControlledPendulum']
# Cell
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import animation
from collections import defaultdict
import time
from .simple_pendulum import *
# Cell
def wrap(angle):
"Wraps an angle between -pi and pi."
return (angle + np.pi) % (2 * np.pi) - np.pi
# Cell
class DCMotorParams():
def __init__(self,
J=0.01,
b=0.1,
K=0.01,
R=1,
L=0.5):
self.J = J # (J) moment of inertia of the rotor 0.01 kg.m^2
self.b = b # (b) motor viscous friction constant 0.1 N.m.s
self.K = K # (Ke) electromotive force constant 0.01 V/rad/sec
# (Kt) motor torque constant 0.01 N.m/Amp
self.R = R # (R) electric resistance 1 Ohm
self.L = L # (L) electric inductance 0.5 H
# Cell
class DCMotor():
"""DCMotor implements a direct current motor."""
def __init__(self, x0, params):
self._params = params
self._x0 = x0
self._x = x0
self._J_load = 0
self._update_motor_matrix()
self._load = False
def _update_motor_matrix(self):
# state variables are: position (theta), rotation speed (w, or theta_dot) and current (i)
self._A = np.array([
[0, 1, 0],
[0, -self._params.b/(self._params.J+self._J_load), self._params.K/(self._params.J+self._J_load)],
[0, -self._params.K/self._params.L, -self._params.R/self._params.L]
])
self._B = np.array([[0],[0],[1/self._params.L]])
self._C = np.array([
[1, 0, 0], # position
[0, 1, 0] # velocity
])
self._D = 0;
def step(self, dt, u):
"""Runs one step of the motor model, and outputs the resulting torque."""
self._x = self._x + dt*(self._A@self._x + self._B*u)
self._x[0] = wrap(self._x[0]) # wrap theta to stay between -pi and pi
torque = self._params.K*self._x[1,0] # motor torque
return torque
def reset(self):
self._x = self._x0
def set_load_parameters(self, J_load):
self._params.J += J_load
self.update_motor_matrix()
def connect_to(self, load):
self._load = load
self._params.J = load.moment_of_inertia()
self._update_motor_matrix()
def _output_equation(self):
return self._C@self._x
def measure(self):
# We need to move this function out of the DCMotor class.
return np.array([[self._load.position()], [self._load.speed()]]) \
if self._load else self._output_equation()
def get_motor_torque(self):
"""the motor torque is proportional to only the armature current `i` by a constant factor `K`: T=K*i"""
return self._params.K*self._x[1,0]
# Cell
class PID():
"""PID controller."""
def __init__(self, Kp, Kd, Ki):
self.Kp = Kp
self.Kd = Kd
self.Ki = Ki
self._error_old = 0
self._error_I = 0
def control(self, dt, y_des, y):
"""Controls position to a desired (y_des) value.
Inputs:
- dt: seconds (sampling time)
- y_des: m (desired value)
- y: m (measured value)
"""
# apply controller
error = y_des - y
error_dot = (error - self._error_old)/dt
self._error_I += error*dt
self._error_old = error
# controller
return self.Kp*error + self.Kd*error_dot + self.Ki*self._error_I
# Cell
class Simulator():
def __init__(self, pendulum, motor, controller):
self._pendulum = pendulum
self._motor = motor
self._controller = controller
self._data = defaultdict(list)
def y_des(self, y):
"""Set the desired final position of the pendulum in degrees."""
self.y_des = y
def run(self, t0, tf, dt):
time = np.arange(t0, tf, dt)
for t in time:
u = self._controller.control(dt, y_des=np.radians(self.y_des),
y=self._pendulum.position())
torque = self._motor.step(dt, u)
self._pendulum.step(dt, u=torque)
self._data['error (rad)'].append(np.radians(self.y_des) \
- self._pendulum.position())
self._data['position (rad)'].append(self._pendulum.position())
self._data['speed (rad/s)'].append(self._pendulum.speed())
self._data['torque (N.m)'].append(torque)
self._data['time (s)'] = time
return self._data
# Cell
class AnimateControlledPendulum():
"""See also: https://jakevdp.github.io/blog/2012/08/18/matplotlib-animation-tutorial/"""
def __init__(self, sim):
self._sim = sim
self._fig = plt.figure()
self._ax = self._fig.add_subplot(111, aspect='equal', autoscale_on=False,
xlim=(-.5, .5), ylim=(-.5, 0))
self._ax.grid()
self._line, = self._ax.plot([], [], 'o-', lw=2)
self._title = self._ax.text(0.5, 1.05, "", #bbox={'facecolor':'w', 'alpha':0.5, 'pad':5},
transform=self._ax.transAxes, ha="center")
def animate_init(self):
"""Initialize animation.
Plot the background of each frame. """
self._line.set_data([], [])
return self._line
def animate(self, i):
"""Animation function.
This is called sequentially to perform animation step"""
rod_end_1, rod_end_2 = self._sim._pendulum.rod_position_at(np.degrees(self._sim._data['position (rad)'][i]))
self._line.set_data([rod_end_1[0], rod_end_2[0]], [rod_end_1[1], rod_end_2[1]])
self._title.set_text(u"t = {:.1f}s".format(self._sim._data['time (s)'][i]))
return self._line, self._title
def start_animation(self, t0, tf, dt):
# choose the interval based on dt and the time to animate one step
t_start = time.time()
self.animate(0)
t_end = time.time()
interval = 1000 * dt - (t_end - t_start)
n_frames = int((tf-t0)/dt)
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(self._fig,
self.animate,
init_func=self.animate_init,
frames=n_frames,
interval=interval,
blit=True,
repeat_delay=10000,
repeat=True);
plt.close()
return anim | 35.175879 | 116 | 0.547143 |
acea81862712f355e6e75bff824a22357118c7ea | 2,519 | py | Python | plugins/Reply/config.py | rostob/Limnoria | 068488c546612ee0198cecf1a4a46e2667551bcf | [
"BSD-3-Clause"
] | 22 | 2021-09-01T20:51:10.000Z | 2022-03-23T05:51:58.000Z | plugins/Reply/config.py | rostob/Limnoria | 068488c546612ee0198cecf1a4a46e2667551bcf | [
"BSD-3-Clause"
] | 16 | 2021-09-02T08:33:29.000Z | 2022-03-28T18:21:09.000Z | plugins/Reply/config.py | rostob/Limnoria | 068488c546612ee0198cecf1a4a46e2667551bcf | [
"BSD-3-Clause"
] | 9 | 2021-09-02T09:07:53.000Z | 2022-03-28T17:34:59.000Z | ###
# Copyright (c) 2005, Daniel DiPaolo
# Copyright (c) 2010-2021, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Reply')
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified themself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Reply', True)
Reply = conf.registerPlugin('Reply')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Reply, 'someConfigVariableName',
# registry.Boolean(False, _("""Help for someConfigVariableName.""")))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 48.442308 | 79 | 0.772132 |
acea8225aeb1a6912929a14ac36ff14d51ecda7a | 356 | py | Python | sdk/python/pulumi_aws/codeartifact/__init__.py | mehd-io/pulumi-aws | 034629c3fb30dc90db65b196d115df43723df19c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/codeartifact/__init__.py | mehd-io/pulumi-aws | 034629c3fb30dc90db65b196d115df43723df19c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/codeartifact/__init__.py | mehd-io/pulumi-aws | 034629c3fb30dc90db65b196d115df43723df19c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .domain import *
from .domain_permissions import *
from .repository import *
from ._inputs import *
from . import outputs
| 32.363636 | 87 | 0.727528 |
acea82816a2faacbba379b4748b115554c6fcae4 | 11,086 | py | Python | tests/test_tutorial/test_fastapi/test_update/test_tutorial001.py | DustinBracy/sqlmodel | 08dd5d31d702a54b00c14ea8d5fd1270587bb90b | [
"MIT"
] | null | null | null | tests/test_tutorial/test_fastapi/test_update/test_tutorial001.py | DustinBracy/sqlmodel | 08dd5d31d702a54b00c14ea8d5fd1270587bb90b | [
"MIT"
] | null | null | null | tests/test_tutorial/test_fastapi/test_update/test_tutorial001.py | DustinBracy/sqlmodel | 08dd5d31d702a54b00c14ea8d5fd1270587bb90b | [
"MIT"
] | null | null | null | from fastapi.testclient import TestClient
from sqlmodel import create_engine
from sqlmodel.pool import StaticPool
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/heroes/": {
"get": {
"summary": "Read Heroes",
"operationId": "read_heroes_heroes__get",
"parameters": [
{
"required": False,
"schema": {"title": "Offset", "type": "integer", "default": 0},
"name": "offset",
"in": "query",
},
{
"required": False,
"schema": {
"title": "Limit",
"type": "integer",
"default": 100,
"lte": 100,
},
"name": "limit",
"in": "query",
},
],
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Read Heroes Heroes Get",
"type": "array",
"items": {"$ref": "#/components/schemas/HeroRead"},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
},
"post": {
"summary": "Create Hero",
"operationId": "create_hero_heroes__post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/HeroCreate"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/HeroRead"}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
},
},
"/heroes/{hero_id}": {
"get": {
"summary": "Read Hero",
"operationId": "read_hero_heroes__hero_id__get",
"parameters": [
{
"required": True,
"schema": {"title": "Hero Id", "type": "integer"},
"name": "hero_id",
"in": "path",
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/HeroRead"}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
},
"patch": {
"summary": "Update Hero",
"operationId": "update_hero_heroes__hero_id__patch",
"parameters": [
{
"required": True,
"schema": {"title": "Hero Id", "type": "integer"},
"name": "hero_id",
"in": "path",
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/HeroUpdate"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/HeroRead"}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
},
},
},
"components": {
"schemas": {
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"HeroCreate": {
"title": "HeroCreate",
"required": ["name", "secret_name"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"secret_name": {"title": "Secret Name", "type": "string"},
"age": {"title": "Age", "type": "integer"},
},
},
"HeroRead": {
"title": "HeroRead",
"required": ["name", "secret_name", "id"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"secret_name": {"title": "Secret Name", "type": "string"},
"age": {"title": "Age", "type": "integer"},
"id": {"title": "Id", "type": "integer"},
},
},
"HeroUpdate": {
"title": "HeroUpdate",
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"secret_name": {"title": "Secret Name", "type": "string"},
"age": {"title": "Age", "type": "integer"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_tutorial(clear_sqlmodel):
from docs_src.tutorial.fastapi.update import tutorial001 as mod
mod.sqlite_url = "sqlite://"
mod.engine = create_engine(
mod.sqlite_url, connect_args=mod.connect_args, poolclass=StaticPool
)
with TestClient(mod.app) as client:
hero1_data = {"name": "Deadpond", "secret_name": "Dive Wilson"}
hero2_data = {
"name": "Spider-Boy",
"secret_name": "Pedro Parqueador",
"id": 9000,
}
hero3_data = {
"name": "Rusty-Man",
"secret_name": "Tommy Sharp",
"age": 48,
}
response = client.post("/heroes/", json=hero1_data)
assert response.status_code == 200, response.text
response = client.post("/heroes/", json=hero2_data)
assert response.status_code == 200, response.text
hero2 = response.json()
hero2_id = hero2["id"]
response = client.post("/heroes/", json=hero3_data)
assert response.status_code == 200, response.text
hero3 = response.json()
hero3_id = hero3["id"]
response = client.get(f"/heroes/{hero2_id}")
assert response.status_code == 200, response.text
response = client.get("/heroes/9000")
assert response.status_code == 404, response.text
response = client.get("/openapi.json")
data = response.json()
assert response.status_code == 200, response.text
assert data == openapi_schema
response = client.get("/heroes/")
assert response.status_code == 200, response.text
data = response.json()
assert len(data) == 3
response = client.patch(
f"/heroes/{hero2_id}", json={"secret_name": "Spider-Boy"}
)
data = response.json()
assert response.status_code == 200, response.text
assert data["name"] == hero2_data["name"], "The name should not be set to none"
assert data["secret_name"] == "Spider-Boy", "The secret name should be updated"
response = client.patch(f"/heroes/{hero3_id}", json={"age": None})
data = response.json()
assert response.status_code == 200, response.text
assert data["name"] == hero3_data["name"]
assert data["age"] is None, (
"A field should be updatable to None, even if " "that's the default"
)
response = client.patch("/heroes/9001", json={"name": "Dragon Cube X"})
assert response.status_code == 404, response.text
| 38.359862 | 87 | 0.355042 |
acea834a9c1e2654235c474f6356a17b829bb11e | 196 | py | Python | apps.py | tored11/DRF-redis-cache-decorator | 8653f8afce35554e1bc00a5158871b152c5535b0 | [
"MIT"
] | null | null | null | apps.py | tored11/DRF-redis-cache-decorator | 8653f8afce35554e1bc00a5158871b152c5535b0 | [
"MIT"
] | null | null | null | apps.py | tored11/DRF-redis-cache-decorator | 8653f8afce35554e1bc00a5158871b152c5535b0 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class DRFRedisCacheDecorator(AppConfig):
name = 'drf_redis_cache_decorator'
def ready(self):
from .signals import invalidate_model_cache_signal | 24.5 | 58 | 0.77551 |
acea839ff33daa0095411f9b74ae92c2cab675f6 | 2,294 | py | Python | src/utils.py | ZeroBone/PresMonDec | 366a4684a45094be46d73f4ab4ac0d5425eb581c | [
"MIT"
] | null | null | null | src/utils.py | ZeroBone/PresMonDec | 366a4684a45094be46d73f4ab4ac0d5425eb581c | [
"MIT"
] | null | null | null | src/utils.py | ZeroBone/PresMonDec | 366a4684a45094be46d73f4ab4ac0d5425eb581c | [
"MIT"
] | 1 | 2021-11-17T12:46:57.000Z | 2021-11-17T12:46:57.000Z | import subprocess
import tempfile
from z3 import *
class TooDeepFormulaError(Exception):
pass
class AstReferenceWrapper:
def __init__(self, node):
self._node = node
def __hash__(self):
return self._node.hash()
def __eq__(self, other):
return self._node.eq(other.unwrap())
def __repr__(self):
return str(self._node)
def unwrap(self):
return self._node
def wrap_ast_ref(node):
assert isinstance(node, AstRef)
return AstReferenceWrapper(node)
def is_uninterpreted_variable(node):
return is_const(node) and node.decl().kind() == Z3_OP_UNINTERPRETED
def get_formula_variables(f):
vars_set = set()
visited = set()
def ast_visitor(node):
if is_uninterpreted_variable(node):
vars_set.add(wrap_ast_ref(node))
else:
for child in node.children():
child_wrapped = wrap_ast_ref(child)
if child_wrapped in visited:
continue
visited.add(child_wrapped)
ast_visitor(child)
visited.add(wrap_ast_ref(f))
try:
ast_visitor(f)
except (RecursionError, ctypes.ArgumentError):
raise TooDeepFormulaError()
return vars_set
class Z3CliError(Exception):
pass
def timeout_ms_to_s(timeout_ms: int):
if timeout_ms % 1000 == 0:
return timeout_ms // 1000
return (timeout_ms // 1000) + 1
def run_z3_cli(smt_string, timeout_ms):
timeout_s = timeout_ms_to_s(timeout_ms)
tmp = tempfile.NamedTemporaryFile(delete=False, mode="w")
try:
smt_path = tmp.name
tmp.write(smt_string)
tmp.flush()
result = subprocess.run(["z3", "-T:%d" % timeout_s,
"-t:%d" % timeout_ms, "--", smt_path], capture_output=True)
finally:
tmp.close()
os.unlink(tmp.name)
if result is None:
raise Z3CliError("Failed to run z3 as subprocess")
result = result.stdout.decode("utf-8").rstrip()
if result.startswith("unknown") or result.startswith("timeout"):
return unknown
if result.startswith("sat"):
return sat
if result.startswith("unsat"):
return unsat
raise Z3CliError("unknown z3 output: %s" % result)
| 20.854545 | 92 | 0.620314 |
acea8417c819664363bc4221bd96ea374367ab82 | 25,796 | py | Python | tests/test_core.py | jacobtomlinson/ops-bot | 8b20dd634467097e2dc75af2371e7dec4bbb8960 | [
"Apache-2.0"
] | null | null | null | tests/test_core.py | jacobtomlinson/ops-bot | 8b20dd634467097e2dc75af2371e7dec4bbb8960 | [
"Apache-2.0"
] | 8 | 2022-03-01T13:43:05.000Z | 2022-03-05T22:51:43.000Z | tests/test_core.py | jacobtomlinson/ops-bot | 8b20dd634467097e2dc75af2371e7dec4bbb8960 | [
"Apache-2.0"
] | null | null | null | import os
import asyncio
import unittest
import unittest.mock as mock
import asynctest
import asynctest.mock as amock
import importlib
import time
from opsdroid.cli.start import configure_lang
from opsdroid.core import OpsDroid
from opsdroid.events import Message
from opsdroid.connector import Connector
from opsdroid.database import Database
from opsdroid.skill import Skill
from opsdroid.web import Web
from opsdroid.matchers import (
match_regex,
match_dialogflow_action,
match_luisai_intent,
match_sapcai,
match_rasanlu,
match_watson,
match_witai,
)
from opsdroid.testing import run_unit_test
class TestCore(unittest.TestCase):
"""Test the opsdroid core class."""
def setUp(self):
self.previous_loop = asyncio.get_event_loop()
configure_lang({})
def tearDown(self):
self.previous_loop.close()
asyncio.set_event_loop(asyncio.new_event_loop())
def test_core(self):
with OpsDroid() as opsdroid:
self.assertIsInstance(opsdroid, OpsDroid)
def test_exit(self):
with OpsDroid() as opsdroid, self.assertRaises(SystemExit):
opsdroid.eventloop = mock.Mock()
opsdroid.eventloop.is_running.return_value = True
opsdroid.exit()
self.assertTrue(opsdroid.eventloop.stop.called)
def test_is_running(self):
with OpsDroid() as opsdroid:
self.assertFalse(opsdroid.is_running())
opsdroid._running = True
self.assertTrue(opsdroid.is_running())
def test_critical(self):
with OpsDroid() as opsdroid, self.assertRaises(SystemExit):
opsdroid.critical("An error", 1)
def test_load_modules(self):
with OpsDroid() as opsdroid:
opsdroid.loader.load_modules_from_config = mock.Mock()
opsdroid.loader.load_modules_from_config.return_value = {
"skills": [],
"databases": [],
"connectors": [],
}
with self.assertRaises(SystemExit):
opsdroid.eventloop.run_until_complete(opsdroid.load())
self.assertTrue(opsdroid.loader.load_modules_from_config.called)
def test_run(self):
with OpsDroid() as opsdroid:
opsdroid.is_running = amock.Mock(side_effect=[False, True, False])
opsdroid.eventloop = mock.MagicMock()
opsdroid.eventloop.run_until_complete = mock.Mock()
with mock.patch("sys.exit") as mock_sysexit:
opsdroid.run()
self.assertTrue(opsdroid.eventloop.run_until_complete.called)
self.assertTrue(mock_sysexit.called)
def test_run_cancelled(self):
with OpsDroid() as opsdroid:
opsdroid.is_running = amock.Mock(side_effect=[False, True, False])
opsdroid.eventloop = mock.MagicMock()
opsdroid.eventloop.run_until_complete = mock.Mock(
side_effect=asyncio.CancelledError
)
opsdroid.sync_load = mock.MagicMock()
with mock.patch("sys.exit") as mock_sysexit:
opsdroid.run()
self.assertTrue(opsdroid.eventloop.run_until_complete.called)
self.assertTrue(mock_sysexit.called)
def test_run_already_running(self):
with OpsDroid() as opsdroid:
opsdroid._running = True
opsdroid.eventloop = mock.MagicMock()
opsdroid.eventloop.run_until_complete = mock.Mock(
side_effect=asyncio.CancelledError
)
opsdroid.sync_load = mock.MagicMock()
with mock.patch("sys.exit") as mock_sysexit:
opsdroid.run()
self.assertFalse(opsdroid.eventloop.run_until_complete.called)
self.assertFalse(mock_sysexit.called)
@asynctest.patch("opsdroid.core.parse_crontab")
def test_load(self, mocked_parse_crontab):
with OpsDroid() as opsdroid:
mockconfig = {
"skills": [],
"databases": [{"name": "mockdb"}],
"connectors": [{"name": "shell"}],
}
opsdroid.web_server = mock.Mock()
opsdroid.loader = mock.Mock()
opsdroid.loader.load_modules_from_config = mock.Mock(
return_value=mockconfig
)
opsdroid.setup_databases = amock.CoroutineMock()
opsdroid.setup_skills = mock.Mock()
opsdroid.setup_connectors = amock.CoroutineMock()
opsdroid.eventloop.run_until_complete(opsdroid.load())
self.assertTrue(opsdroid.setup_databases.called)
self.assertTrue(opsdroid.setup_connectors.called)
def test_multiple_opsdroids(self):
with OpsDroid() as opsdroid:
tmp = opsdroid.__class__.critical
opsdroid.__class__.critical = mock.MagicMock()
with OpsDroid() as opsdroid2, self.assertRaises(SystemExit):
opsdroid2.exit()
self.assertEqual(len(opsdroid.__class__.critical.mock_calls), 1)
opsdroid.__class__.critical = tmp
def test_setup_modules(self):
with OpsDroid() as opsdroid:
def mockskill(x):
return x * 2
mockskill.skill = True
mockmodule = mock.Mock(setup=mock.MagicMock(), mockskill=mockskill)
example_modules = [{"module": mockmodule, "config": {}}]
opsdroid.setup_skills(example_modules)
self.assertEqual(len(mockmodule.setup.mock_calls), 1)
self.assertEqual(mockmodule.method_calls[0][0], "setup")
self.assertEqual(len(mockmodule.method_calls[0][1]), 2)
self.assertEqual(mockmodule.method_calls[0][1][1], {})
self.assertEqual(len(opsdroid.skills), 2)
mockclassmodule = importlib.import_module(
"opsdroid.testing.mockmodules.skills.skill.skilltest"
)
example_modules = [{"module": mockclassmodule, "config": {}}]
opsdroid.setup_skills(example_modules)
self.assertEqual(len(opsdroid.skills), 3)
def test_default_connector(self):
with OpsDroid() as opsdroid:
mock_connector = Connector({}, opsdroid=opsdroid)
opsdroid.connectors.append(mock_connector)
self.assertEqual(opsdroid.default_connector, mock_connector)
mock_default_connector = Connector({"default": True}, opsdroid=opsdroid)
opsdroid.connectors.append(mock_default_connector)
self.assertEqual(opsdroid.default_connector, mock_default_connector)
def test_default_target(self):
with OpsDroid() as opsdroid:
mock_connector = Connector({}, opsdroid=opsdroid)
self.assertEqual(None, mock_connector.default_target)
def test_connector_names(self):
with OpsDroid() as opsdroid:
with self.assertRaises(ValueError):
opsdroid._connector_names
# Ensure names are always unique
c1 = Connector({"name": "spam"}, opsdroid=opsdroid)
c2 = Connector({"name": "spam"}, opsdroid=opsdroid)
opsdroid.connectors = [c1, c2]
names = opsdroid._connector_names
assert "spam" in names
assert "spam_1" in names
class TestCoreAsync(asynctest.TestCase):
"""Test the async methods of the opsdroid core class."""
async def setUp(self):
configure_lang({})
async def getMockSkill(self):
async def mockedskill(opsdroid, config, message):
await message.respond("Test")
mockedskill.config = {}
return mockedskill
async def getMockMethodSkill(self):
async def mockedskill(message):
await message.respond("Test")
mockedskill.config = {}
return mockedskill
async def test_handle_stop_signal(self):
with OpsDroid() as opsdroid:
opsdroid._running = True
self.assertTrue(opsdroid.is_running())
opsdroid.stop = amock.CoroutineMock()
opsdroid.unload = amock.CoroutineMock()
await opsdroid.handle_stop_signal()
self.assertFalse(opsdroid.is_running())
self.assertTrue(opsdroid.stop.called)
self.assertTrue(opsdroid.unload.called)
async def test_unload_and_stop(self):
with OpsDroid() as opsdroid:
mock_connector = Connector({}, opsdroid=opsdroid)
mock_connector.disconnect = amock.CoroutineMock()
opsdroid.connectors = [mock_connector]
mock_database = Database({})
mock_database.disconnect = amock.CoroutineMock()
opsdroid.memory.databases = [mock_database]
mock_skill = amock.Mock(config={"name": "mockskill"})
opsdroid.skills = [mock_skill]
opsdroid.web_server = Web(opsdroid)
opsdroid.web_server.stop = amock.CoroutineMock()
mock_web_server = opsdroid.web_server
async def task():
await asyncio.sleep(0.5)
t = asyncio.Task(task(), loop=self.loop)
await opsdroid.stop()
await opsdroid.unload()
self.assertTrue(t.cancel())
self.assertTrue(mock_connector.disconnect.called)
self.assertTrue(mock_database.disconnect.called)
self.assertTrue(mock_web_server.stop.called)
self.assertTrue(opsdroid.web_server is None)
self.assertFalse(opsdroid.connectors)
self.assertFalse(opsdroid.memory.databases)
self.assertFalse(opsdroid.skills)
async def test_reload(self):
with OpsDroid() as opsdroid:
opsdroid.start = amock.CoroutineMock()
opsdroid.stop = amock.CoroutineMock()
opsdroid.load = amock.CoroutineMock()
opsdroid.unload = amock.CoroutineMock()
await opsdroid.reload()
self.assertTrue(opsdroid.load.called)
self.assertTrue(opsdroid.unload.called)
self.assertTrue(opsdroid.start.called)
self.assertTrue(opsdroid.stop.called)
async def test_parse_regex(self):
with OpsDroid() as opsdroid:
regex = r"Hello .*"
mock_connector = Connector({}, opsdroid=opsdroid)
mock_connector.send = amock.CoroutineMock()
skill = await self.getMockSkill()
opsdroid.skills.append(match_regex(regex)(skill))
message = Message(
text="Hello World",
user="user",
target="default",
connector=mock_connector,
)
await opsdroid.parse(message)
self.assertTrue(mock_connector.send.called)
async def test_parse_regex_method_skill(self):
with OpsDroid() as opsdroid:
regex = r"Hello .*"
mock_connector = Connector({}, opsdroid=opsdroid)
mock_connector.send = amock.CoroutineMock()
skill = await self.getMockMethodSkill()
opsdroid.skills.append(match_regex(regex)(skill))
message = Message(
text="Hello world",
user="user",
target="default",
connector=mock_connector,
)
await opsdroid.parse(message)
self.assertTrue(mock_connector.send.called)
async def test_parse_regex_insensitive(self):
with OpsDroid() as opsdroid:
regex = r"Hello .*"
mock_connector = Connector({}, opsdroid=opsdroid)
mock_connector.send = amock.CoroutineMock()
skill = await self.getMockSkill()
opsdroid.skills.append(match_regex(regex, case_sensitive=False)(skill))
message = Message(
text="HELLO world",
user="user",
target="default",
connector=mock_connector,
)
await opsdroid.parse(message)
self.assertTrue(mock_connector.send.called)
async def test_parse_dialogflow(self):
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "path/test.json"
with OpsDroid() as opsdroid:
opsdroid.modules = {
"parsers": [
{
"config": {
"name": "dialogflow",
"project-id": "test",
"enabled": True,
}
}
]
}
dialogflow_action = "smalltalk.greetings.whatsup"
skill = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
match_dialogflow_action(dialogflow_action)(skill)
message = Message(
text="Hello world",
user="user",
target="default",
connector=mock_connector,
)
with amock.patch(
"opsdroid.parsers.dialogflow.parse_dialogflow"
), amock.patch("opsdroid.parsers.dialogflow.call_dialogflow"):
tasks = await opsdroid.parse(message)
self.assertEqual(len(tasks), 3)
tasks = await opsdroid.parse(message)
self.assertLogs("_LOGGER", "warning")
async def test_parse_luisai(self):
with OpsDroid() as opsdroid:
opsdroid.modules = {
"parsers": [{"config": {"name": "luisai", "enabled": True}}]
}
luisai_intent = ""
skill = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
match_luisai_intent(luisai_intent)(skill)
message = Message(
text="Hello world",
user="user",
target="default",
connector=mock_connector,
)
with amock.patch("opsdroid.parsers.luisai.parse_luisai"):
tasks = await opsdroid.parse(message)
self.assertEqual(len(tasks), 3)
async def test_parse_rasanlu(self):
with OpsDroid() as opsdroid:
opsdroid.modules = {
"parsers": [
{"config": {"name": "rasanlu", "module": "", "enabled": True}}
]
}
rasanlu_intent = ""
skill = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
match_rasanlu(rasanlu_intent)(skill)
message = Message(
text="Hello", user="user", target="default", connector=mock_connector
)
with amock.patch("opsdroid.parsers.rasanlu.parse_rasanlu"):
tasks = await opsdroid.parse(message)
self.assertEqual(len(tasks), 3)
async def test_parse_sapcai(self):
with OpsDroid() as opsdroid:
opsdroid.modules = {
"parsers": [{"config": {"name": "sapcai", "enabled": True}}]
}
sapcai_intent = ""
skill = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
match_sapcai(sapcai_intent)(skill)
message = Message(
text="Hello", user="user", target="default", connector=mock_connector
)
with amock.patch("opsdroid.parsers.sapcai.parse_sapcai"):
tasks = await opsdroid.parse(message)
self.assertEqual(len(tasks), 3)
async def test_parse_watson(self):
with OpsDroid() as opsdroid:
opsdroid.modules = {
"parsers": [{"config": {"name": "watson", "enabled": True}}]
}
watson_intent = ""
skill = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
match_watson(watson_intent)(skill)
message = Message("Hello world", "user", "default", mock_connector)
with amock.patch("opsdroid.parsers.watson.parse_watson"):
tasks = await opsdroid.parse(message)
self.assertEqual(len(tasks), 3)
async def test_parse_witai(self):
with OpsDroid() as opsdroid:
opsdroid.modules = {
"parsers": [{"config": {"name": "witai", "enabled": True}}]
}
witai_intent = ""
skill = amock.CoroutineMock()
mock_connector = Connector({}, opsdroid=opsdroid)
match_witai(witai_intent)(skill)
message = Message(
text="Hello world",
user="user",
target="default",
connector=mock_connector,
)
with amock.patch("opsdroid.parsers.witai.parse_witai"):
tasks = await opsdroid.parse(message)
self.assertEqual(len(tasks), 3)
async def test_send_default_one(self):
with OpsDroid() as opsdroid, amock.patch(
"opsdroid.connector.Connector.send"
) as patched_send:
connector = Connector({"name": "shell"})
patched_send.return_value = asyncio.Future()
patched_send.return_value.set_result("")
opsdroid.connectors = [connector]
input_message = Message("Test")
await opsdroid.send(input_message)
message = patched_send.call_args[0][0]
assert message is input_message
async def test_send_default_explicit(self):
with OpsDroid() as opsdroid, amock.patch(
"opsdroid.connector.Connector.send"
) as patched_send:
connector = Connector({"name": "shell", "default": True})
connector2 = Connector({"name": "matrix"})
patched_send.return_value = asyncio.Future()
patched_send.return_value.set_result("")
opsdroid.connectors = [connector, connector2]
input_message = Message("Test")
await opsdroid.send(input_message)
message = patched_send.call_args[0][0]
assert message is input_message
async def test_send_name(self):
with OpsDroid() as opsdroid, amock.patch(
"opsdroid.connector.Connector.send"
) as patched_send:
connector = Connector({"name": "shell"})
connector2 = Connector({"name": "matrix"})
patched_send.return_value = asyncio.Future()
patched_send.return_value.set_result("")
opsdroid.connectors = [connector, connector2]
input_message = Message(text="Test", connector="shell")
await opsdroid.send(input_message)
message = patched_send.call_args[0][0]
assert message is input_message
async def test_start_connectors(self):
with OpsDroid() as opsdroid:
with self.assertRaises(SystemExit):
await opsdroid.setup_connectors([])
await opsdroid.start_connectors()
module = {}
module["config"] = {}
module["module"] = importlib.import_module(
"opsdroid.testing.mockmodules.connectors.connector_mocked"
)
try:
await opsdroid.setup_connectors([module])
await opsdroid.start_connectors()
except NotImplementedError:
self.fail("Connector raised NotImplementedError.")
self.assertEqual(len(opsdroid.connectors), 1)
with mock.patch.object(opsdroid.eventloop, "is_running", return_value=True):
await opsdroid.setup_connectors([module])
await opsdroid.start_connectors()
self.assertEqual(len(opsdroid.connectors), 2)
async def test_start_connectors_not_implemented(self):
with OpsDroid() as opsdroid:
with self.assertRaises(SystemExit):
await opsdroid.setup_connectors([])
await opsdroid.start_connectors()
module = {}
module["config"] = {}
module["module"] = importlib.import_module(
"opsdroid.testing.mockmodules.connectors.connector_bare"
)
with self.assertRaises(NotImplementedError):
await opsdroid.setup_connectors([module])
await opsdroid.start_connectors()
self.assertEqual(1, len(opsdroid.connectors))
with self.assertRaises(NotImplementedError):
await opsdroid.setup_connectors([module, module])
await opsdroid.start_connectors()
self.assertEqual(3, len(opsdroid.connectors))
async def test_start_databases(self):
with OpsDroid() as opsdroid:
await opsdroid.setup_databases([])
await opsdroid.start_databases()
module = {}
module["config"] = {}
module["module"] = importlib.import_module(
"opsdroid.testing.mockmodules.databases.database"
)
with self.assertRaises(NotImplementedError):
await opsdroid.setup_databases([module])
await opsdroid.start_databases()
self.assertEqual(1, len(opsdroid.memory.databases))
async def test_train_rasanlu(self):
with OpsDroid() as opsdroid, amock.patch(
"opsdroid.parsers.rasanlu._get_rasa_nlu_version"
) as mock_crc:
opsdroid.modules = {
"parsers": [{"config": {"name": "rasanlu", "enabled": True}}]
}
mock_crc.return_value = {
"version": "2.0.0",
"minimum_compatible_version": "2.0.0",
}
await opsdroid.train_parsers({})
mock_crc.return_value = {
"version": "1.0.0",
"minimum_compatible_version": "1.0.0",
}
with self.assertRaises(SystemExit):
await opsdroid.train_parsers({})
async def test_watchdog_works(self):
from watchgod import awatch, PythonWatcher
from tempfile import TemporaryDirectory
import os.path
import asyncio
async def watch_dirs(directories):
async def watch_dir(directory):
async for changes in awatch(directory, watcher_cls=PythonWatcher):
assert changes
break
await asyncio.gather(*[watch_dir(directory) for directory in directories])
async def modify_dir(directory):
await asyncio.sleep(0.1)
with open(os.path.join(directory, "test.py"), "w") as fh:
fh.write("")
with TemporaryDirectory() as directory:
await asyncio.gather(watch_dirs([directory]), modify_dir(directory))
async def test_watchdog(self):
skill_path = "opsdroid/testing/mockmodules/skills/skill/skilltest"
example_config = {
"autoreload": True,
"connectors": {"websocket": {}},
"skills": {"test": {"path": skill_path}},
}
async def modify_dir(opsdroid, directory):
await asyncio.sleep(0.1)
mock_file_path = os.path.join(directory, "mock.py")
with open(mock_file_path, "w") as fh:
fh.write("")
fh.flush()
# let other tasks run so the watch_paths task can detect the new file
await asyncio.sleep(0.5)
for task in opsdroid.tasks:
try:
# py3.8+
task_name = task.get_coro().__name__
except AttributeError:
# py3.7
task_name = task._coro.__name__
if task_name == "watch_paths":
task.cancel()
break
os.remove(mock_file_path)
return True
with OpsDroid(config=example_config) as opsdroid:
opsdroid.reload = amock.CoroutineMock()
await opsdroid.load()
assert await run_unit_test(opsdroid, modify_dir, opsdroid, skill_path)
timeout = 5
start = time.time()
while not opsdroid.reload.called and start + timeout > time.time():
await asyncio.sleep(0.5)
assert opsdroid.reload.called
async def test_get_connector_database(self):
skill_path = "opsdroid/testing/mockmodules/skills/skill/skilltest"
example_config = {
"connectors": {"websocket": {}},
"skills": {"test": {"path": skill_path}},
}
with OpsDroid(config=example_config) as opsdroid:
await opsdroid.load()
assert opsdroid.get_connector("websocket") is not None
assert opsdroid.get_connector("slack") is None
assert opsdroid.get_database("inmem") is not None
assert opsdroid.get_database("redis") is None
async def test_no_skills(self):
with OpsDroid() as opsdroid:
with self.assertRaises(SystemExit):
await opsdroid.start()
async def test_get_skill_instance(self):
class ClassSkill(Skill):
@match_regex(r"hello")
async def method_skill(self, message):
pass
@match_regex(r"hello")
def function_skill(self, opsdroid, config, message):
pass
with OpsDroid() as opsdroid:
opsdroid.register_skill(function_skill)
assert opsdroid.get_skill_instance(opsdroid.skills[0]) is None
with OpsDroid() as opsdroid:
inst = ClassSkill(opsdroid, {})
opsdroid.register_skill(inst.method_skill)
assert opsdroid.get_skill_instance(opsdroid.skills[0]) is inst
| 37.768668 | 88 | 0.586448 |
acea853b14b63f33a0afab1bad54c4ddea1976ca | 155,806 | py | Python | core/tests/test_utils.py | Aarjav-Jain/oppia | c4225a9e3fc850786c7511f99d81ba43501a6899 | [
"Apache-2.0"
] | 4 | 2021-09-16T16:46:53.000Z | 2022-02-06T13:00:14.000Z | core/tests/test_utils.py | Aarjav-Jain/oppia | c4225a9e3fc850786c7511f99d81ba43501a6899 | [
"Apache-2.0"
] | null | null | null | core/tests/test_utils.py | Aarjav-Jain/oppia | c4225a9e3fc850786c7511f99d81ba43501a6899 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
from __future__ import absolute_import
from __future__ import unicode_literals
import collections
import contextlib
import copy
import datetime
import functools
import inspect
import itertools
import json
import logging
import os
import re
import unittest
from core import feconf
from core import python_utils
from core import schema_utils
from core import utils
from core.constants import constants
from core.controllers import base
from core.domain import auth_domain
from core.domain import caching_domain
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import fs_services
from core.domain import interaction_registry
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_manager
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.platform.search import elastic_search_services
from core.platform.taskqueue import cloud_tasks_emulator
import main
from proto_files import text_classifier_pb2
import elasticsearch
import requests_mock
import webtest
from typing import Any, Dict, Optional # isort: skip
(
auth_models, base_models, exp_models,
feedback_models, question_models, skill_models,
story_models, suggestion_models, topic_models
) = models.Registry.import_models([
models.NAMES.auth, models.NAMES.base_model, models.NAMES.exploration,
models.NAMES.feedback, models.NAMES.question, models.NAMES.skill,
models.NAMES.story, models.NAMES.suggestion, models.NAMES.topic
])
datastore_services = models.Registry.import_datastore_services()
storage_services = models.Registry.import_storage_services()
email_services = models.Registry.import_email_services()
memory_cache_services = models.Registry.import_cache_services()
platform_auth_services = models.Registry.import_auth_services()
platform_taskqueue_services = models.Registry.import_taskqueue_services()
# Prefix to append to all lines printed by tests to the console.
# We are using the b' prefix as all the stdouts are in bytes.
LOG_LINE_PREFIX = b'LOG_INFO_TEST: '
# List of model classes that don't have Wipeout or Takeout, related class
# methods defined because they're not used directly but only as
# base classes for the other models.
BASE_MODEL_CLASSES_WITHOUT_DATA_POLICIES = (
'BaseCommitLogEntryModel',
'BaseHumanMaintainedModel',
'BaseMapReduceBatchResultsModel',
'BaseModel',
'BaseSnapshotContentModel',
'BaseSnapshotMetadataModel',
'VersionedModel',
)
def get_filepath_from_filename(filename, rootdir):
"""Returns filepath using the filename. Different files are present in
different subdirectories in the rootdir. So, we walk through the rootdir and
match the all the filenames with the given filename. When a match is found
the function returns the complete path of the filename by using
os.path.join(root, filename).
For example exploration-editor-page.mainpage.html is present in
core/templates/pages/exploration-editor-page and error-page.mainpage.html
is present in core/templates/pages/error-pages. So we walk through
core/templates/pages and a match for exploration-editor-page.component.html
is found in exploration-editor-page subdirectory and a match for
error-page.directive.html is found in error-pages subdirectory.
Args:
filename: str. The name of the file.
rootdir: str. The directory to search the file in.
Returns:
str | None. The path of the file if file is found otherwise
None.
"""
# This is required since error files are served according to error status
# code. The file served is error-page.mainpage.html but it is compiled and
# stored as error-page-{status_code}.mainpage.html. So, we need to swap the
# name here to obtain the correct filepath.
if filename.startswith('error-page'):
filename = 'error-page.mainpage.html'
matches = list(itertools.chain.from_iterable(
(os.path.join(subdir, f) for f in filenames if f == filename)
for subdir, _, filenames in os.walk(rootdir)))
if len(matches) > 1:
raise Exception('Multiple files found with name: %s' % filename)
return matches[0] if matches else None
def mock_load_template(filename):
"""Mock for load_template function. This mock is required for backend tests
since we do not have webpack compilation before backend tests. The folder to
search templates is webpack_bundles which is generated after webpack
compilation. Since this folder will be missing, load_template function will
return an error. So, we use a mock for load_template which returns the html
file from the source directory instead.
Args:
filename: str. The name of the file for which template is to be
returned.
Returns:
str. The contents of the given file.
"""
filepath = get_filepath_from_filename(
filename, os.path.join('core', 'templates', 'pages'))
with python_utils.open_file(filepath, 'r') as f:
return f.read()
def check_image_png_or_webp(image_string):
"""Checks if the image is in png or webp format only.
Args:
image_string: str. Image url in base64 format.
Returns:
bool. Returns true if image is in WebP format.
"""
return image_string.startswith(('data:image/png', 'data:image/webp'))
def get_storage_model_module_names():
"""Get all module names in storage."""
# As models.NAMES is an enum, it cannot be iterated over. So we use the
# __dict__ property which can be iterated over.
for name in models.NAMES:
yield name
def get_storage_model_classes():
"""Get all model classes in storage."""
for module_name in get_storage_model_module_names():
(module,) = models.Registry.import_models([module_name])
for member_name, member_obj in inspect.getmembers(module):
if inspect.isclass(member_obj):
clazz = getattr(module, member_name)
all_base_classes = [
base_class.__name__ for base_class in inspect.getmro(
clazz)]
if 'Model' in all_base_classes:
yield clazz
class ElasticSearchStub:
"""This stub class mocks the functionality of ES in
elastic_search_services.py.
IMPORTANT NOTE TO DEVELOPERS: These mock functions are NOT guaranteed to
be exact implementations of elasticsearch functionality. If the results of
this mock and the local dev elasticsearch instance differ, the mock
functions should be updated so that their behaviour matches what a local
dev instance would return. (For example, this mock always has a 'version'
of 1 in the return dict and an arbitrary '_seq_no', although the version
number increments with every PUT in the elasticsearch Python client
library and the '_seq_no' increments with every operation.)
"""
_DB = {}
def reset(self):
"""Helper method that clears the mock database."""
self._DB.clear()
def _generate_index_not_found_error(self, index_name):
"""Helper method that generates an elasticsearch 'index not found' 404
error.
Args:
index_name: str. The index that was not found.
Returns:
elasticsearch.NotFoundError. A manually-constructed error
indicating that the index was not found.
"""
raise elasticsearch.NotFoundError(
404, 'index_not_found_exception', {
'status': 404,
'error': {
'reason': 'no such index [%s]' % index_name,
'root_cause': [{
'reason': 'no such index [%s]' % index_name,
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}],
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}
}
)
def mock_create_index(self, index_name):
"""Creates an index with the given name.
Args:
index_name: str. The name of the index to create.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name in self._DB:
raise elasticsearch.RequestError(
400, 'resource_already_exists_exception',
'index [%s/RaNdOmStRiNgOfAlPhAs] already exists' % index_name)
self._DB[index_name] = []
return {
'index': index_name,
'acknowledged': True,
'shards_acknowledged': True
}
def mock_index(self, index_name, document, id=None): # pylint: disable=redefined-builtin
"""Adds a document with the given ID to the index.
Note that, unfortunately, we have to keep the name of "id" for the
last kwarg, although it conflicts with a Python builtin. This is
because the name is an existing part of the API defined at
https://elasticsearch-py.readthedocs.io/en/v7.10.1/api.html
Args:
index_name: str. The name of the index to create.
document: dict. The document to store.
id: str. The unique identifier of the document.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
self._DB[index_name] = [
d for d in self._DB[index_name] if d['id'] != id]
self._DB[index_name].append(document)
return {
'_index': index_name,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0,
},
'_seq_no': 96,
'_primary_term': 1,
'result': 'created',
'_id': id,
'_version': 1,
'_type': '_doc',
}
def mock_exists(self, index_name, doc_id):
"""Checks whether a document with the given ID exists in the mock
database.
Args:
index_name: str. The name of the index to check.
doc_id: str. The document id to check.
Returns:
bool. Whether the document exists in the index.
Raises:
elasticsearch.NotFoundError: The given index name was not found.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
return any(d['id'] == doc_id for d in self._DB[index_name])
def mock_delete(self, index_name, doc_id):
"""Deletes a document from an index in the mock database. Does nothing
if the document is not in the index.
Args:
index_name: str. The name of the index to delete the document from.
doc_id: str. The document id to be deleted from the index.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
Exception. The document does not exist in the index.
elasticsearch.NotFoundError. The given index name was not found, or
the given doc_id was not found in the given index.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
docs = [d for d in self._DB[index_name] if d['id'] != doc_id]
if len(self._DB[index_name]) != len(docs):
self._DB[index_name] = docs
return {
'_type': '_doc',
'_seq_no': 99,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'result': 'deleted',
'_primary_term': 1,
'_index': index_name,
'_version': 4,
'_id': '0'
}
raise elasticsearch.NotFoundError(
404, {
'_index': index_name,
'_type': '_doc',
'_id': doc_id,
'_version': 1,
'result': 'not_found',
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'_seq_no': 103,
'_primary_term': 1
})
def mock_delete_by_query(self, index_name, query):
"""Deletes documents from an index based on the given query.
Note that this mock only supports a specific for the query, i.e. the
one which clears the entire index. It asserts that all calls to this
function use that query format.
Args:
index_name: str. The name of the index to delete the documents from.
query: dict. The query that defines which documents to delete.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The query is not in the correct form.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert list(query.keys()) == ['query']
assert query['query'] == {
'match_all': {}
}
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
index_size = len(self._DB[index_name])
del self._DB[index_name][:]
return {
'took': 72,
'version_conflicts': 0,
'noops': 0,
'throttled_until_millis': 0,
'failures': [],
'throttled_millis': 0,
'total': index_size,
'batches': 1,
'requests_per_second': -1.0,
'retries': {u'search': 0, u'bulk': 0},
'timed_out': False,
'deleted': index_size
}
def mock_search(self, body=None, index=None, params=None):
"""Searches and returns documents that match the given query.
Args:
body: dict. A dictionary search definition that uses Query DSL.
index: str. The name of the index to search.
params: dict. A dict with two keys: `size` and `from`. The
corresponding values are ints which represent the number of
results to fetch, and the offset from which to fetch them,
respectively.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The given arguments are not supported by this mock.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert body is not None
# "_all" and "" are special index names that are used to search across
# all indexes. We do not allow their use.
assert index not in ['_all', '', None]
assert sorted(params.keys()) == ['from', 'size']
if index not in self._DB:
raise self._generate_index_not_found_error(index)
result_docs = []
result_doc_ids = set([])
for doc in self._DB[index]:
if not doc['id'] in result_doc_ids:
result_docs.append(doc)
result_doc_ids.add(doc['id'])
filters = body['query']['bool']['filter']
terms = body['query']['bool']['must']
for f in filters:
for k, v in f['match'].items():
result_docs = [doc for doc in result_docs if doc[k] in v]
if terms:
filtered_docs = []
for term in terms:
for _, v in term.items():
values = v['query'].split(' ')
for doc in result_docs:
strs = [val for val in doc.values() if isinstance(
val, python_utils.BASESTRING)]
words = []
for s in strs:
words += s.split(' ')
if all(value in words for value in values):
filtered_docs.append(doc)
result_docs = filtered_docs
formatted_result_docs = [{
'_id': doc['id'],
'_score': 0.0,
'_type': '_doc',
'_index': index,
'_source': doc
} for doc in result_docs[
params['from']: params['from'] + params['size']
]]
return {
'timed_out': False,
'_shards': {
'failed': 0,
'total': 1,
'successful': 1,
'skipped': 0
},
'took': 4,
'hits': {
'hits': formatted_result_docs
},
'total': {
'value': len(formatted_result_docs),
'relation': 'eq'
},
'max_score': max(
[0.0] + [d['_score'] for d in formatted_result_docs]),
}
class AuthServicesStub:
"""Test-only implementation of the public API in core.platform.auth."""
def __init__(self):
"""Initializes a new instance that emulates an empty auth server."""
self._user_id_by_auth_id = {}
self._external_user_id_associations = set()
@classmethod
def install_stub(cls, test):
"""Installs a new instance of the stub onto the given test instance.
Args:
test: GenericTestBase. The test instance to install the stub on.
Returns:
callable. A function that will uninstall the stub when called.
"""
with python_utils.ExitStack() as stack:
stub = cls()
stack.enter_context(test.swap(
platform_auth_services, 'establish_auth_session',
stub.establish_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'destroy_auth_session',
stub.destroy_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_claims_from_request',
stub.get_auth_claims_from_request))
stack.enter_context(test.swap(
platform_auth_services, 'mark_user_for_deletion',
stub.mark_user_for_deletion))
stack.enter_context(test.swap(
platform_auth_services, 'delete_external_auth_associations',
stub.delete_external_auth_associations))
stack.enter_context(test.swap(
platform_auth_services,
'verify_external_auth_associations_are_deleted',
stub.verify_external_auth_associations_are_deleted))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_id_from_user_id',
stub.get_auth_id_from_user_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_user_id_from_auth_id',
stub.get_user_id_from_auth_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_user_ids_from_auth_ids',
stub.get_multi_user_ids_from_auth_ids))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_auth_ids_from_user_ids',
stub.get_multi_auth_ids_from_user_ids))
stack.enter_context(test.swap(
platform_auth_services, 'associate_auth_id_with_user_id',
stub.associate_auth_id_with_user_id))
stack.enter_context(test.swap(
platform_auth_services,
'associate_multi_auth_ids_with_user_ids',
stub.associate_multi_auth_ids_with_user_ids))
# Standard usage of ExitStack: enter a bunch of context managers
# from the safety of an ExitStack's context. Once they've all been
# opened, pop_all() of them off of the original context so they can
# *stay* open. Calling the function returned will exit all of them
# in reverse order.
# https://docs.python.org/3/library/contextlib.html#cleaning-up-in-an-enter-implementation
return stack.pop_all().close
@classmethod
def establish_auth_session(cls, unused_request, unused_response):
"""Sets login cookies to maintain a user's sign-in session.
Args:
unused_request: webapp2.Request. Unused because os.environ handles
sessions.
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def destroy_auth_session(cls, unused_response):
"""Clears login cookies from the given response headers.
Args:
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def get_auth_claims_from_request(cls, unused_request):
"""Authenticates the request and returns claims about its authorizer.
This stub obtains authorization information from os.environ. To make the
operation more authentic, this method also creates a new "external"
association for the user to simulate a genuine "provided" value.
Args:
unused_request: webapp2.Request. The HTTP request to authenticate.
Unused because auth-details are extracted from environment
variables.
Returns:
AuthClaims|None. Claims about the currently signed in user. If no
user is signed in, then returns None.
"""
auth_id = os.environ.get('USER_ID', '')
email = os.environ.get('USER_EMAIL', '')
role_is_super_admin = os.environ.get('USER_IS_ADMIN', '0') == '1'
if auth_id:
return auth_domain.AuthClaims(auth_id, email, role_is_super_admin)
return None
def mark_user_for_deletion(self, user_id):
"""Marks the user, and all of their auth associations, as deleted.
Since the stub does not use models, this operation actually deletes the
user's association. The "external" associations, however, are not
deleted yet.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._user_id_by_auth_id = {
a: u for a, u in self._user_id_by_auth_id.items() if u != user_id
}
def delete_external_auth_associations(self, user_id):
"""Deletes all associations that refer to the user outside of Oppia.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._external_user_id_associations.discard(user_id)
def verify_external_auth_associations_are_deleted(self, user_id):
"""Returns true if and only if we have successfully verified that all
external associations have been deleted.
Args:
user_id: str. The unique ID of the user whose associations should be
checked.
Returns:
bool. True if and only if we have successfully verified that all
external associations have been deleted.
"""
return user_id not in self._external_user_id_associations
def get_auth_id_from_user_id(self, user_id):
"""Returns the auth ID associated with the given user ID.
Args:
user_id: str. The user ID.
Returns:
str|None. The auth ID associated with the given user ID, or None if
no association exists.
"""
return python_utils.NEXT(
(a for a, u in self._user_id_by_auth_id.items() if u == user_id),
None)
def get_user_id_from_auth_id(self, auth_id):
"""Returns the user ID associated with the given auth ID.
Args:
auth_id: str. The auth ID.
Returns:
str|None. The user ID associated with the given auth ID, or None if
no association exists.
"""
return self._user_id_by_auth_id.get(auth_id, None)
def get_multi_user_ids_from_auth_ids(self, auth_ids):
"""Returns the user IDs associated with the given auth IDs.
Args:
auth_ids: list(str). The auth IDs.
Returns:
list(str|None). The user IDs associated with each of the given auth
IDs, or None for associations which don't exist.
"""
return [self._user_id_by_auth_id.get(a, None) for a in auth_ids]
def get_multi_auth_ids_from_user_ids(self, user_ids):
"""Returns the auth IDs associated with the given user IDs.
Args:
user_ids: list(str). The user IDs.
Returns:
list(str|None). The auth IDs associated with each of the given user
IDs, or None for associations which don't exist.
"""
auth_id_by_user_id = {u: a for a, u in self._user_id_by_auth_id.items()}
return [auth_id_by_user_id.get(u, None) for u in user_ids]
def associate_auth_id_with_user_id(self, auth_id_user_id_pair):
"""Commits the association between auth ID and user ID.
This method also adds the user to the "external" set of associations.
Args:
auth_id_user_id_pair: auth_domain.AuthIdUserIdPair. The association
to commit.
Raises:
Exception. The IDs are already associated with a value.
"""
auth_id, user_id = auth_id_user_id_pair
if auth_id in self._user_id_by_auth_id:
raise Exception(
'auth_id=%r is already associated with user_id=%r' % (
auth_id, self._user_id_by_auth_id[auth_id]))
auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id).put()
self._external_user_id_associations.add(user_id)
self._user_id_by_auth_id[auth_id] = user_id
def associate_multi_auth_ids_with_user_ids(self, auth_id_user_id_pairs):
"""Commits the associations between auth IDs and user IDs.
This method also adds the users to the "external" set of associations.
Args:
auth_id_user_id_pairs: list(auth_domain.AuthIdUserIdPair). The
associations to commit.
Raises:
Exception. One or more auth associations already exist.
"""
collisions = ', '.join(
'{auth_id=%r: user_id=%r}' % (a, self._user_id_by_auth_id[a])
for a, _ in auth_id_user_id_pairs if a in self._user_id_by_auth_id)
if collisions:
raise Exception('already associated: %s' % collisions)
datastore_services.put_multi(
[auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id)
for auth_id, user_id in auth_id_user_id_pairs])
self._external_user_id_associations.add(
u for _, u in auth_id_user_id_pairs)
self._user_id_by_auth_id.update(auth_id_user_id_pairs)
class TaskqueueServicesStub:
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.taskqueue taskqueue services API.
"""
def __init__(self, test_base):
"""Initializes a taskqueue services stub that replaces the API
functionality of core.platform.taskqueue.
Args:
test_base: GenericTestBase. The current test base.
"""
self._test_base = test_base
self._client = cloud_tasks_emulator.Emulator(
task_handler=self._task_handler, automatic_task_handling=False)
def _task_handler(self, url, payload, queue_name, task_name=None):
"""Makes a POST request to the task URL in the test app.
Args:
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults
to None if no payload is required.
queue_name: str. The name of the queue to add the task to.
task_name: str|None. Optional. The name of the task.
"""
# Header values need to be bytes, thus we encode our strings to bytes.
headers = {
'X-AppEngine-Fake-Is-Admin': b'1',
'X-Appengine-QueueName': queue_name.encode('utf-8'),
# Maps empty strings to None so the output can become 'None'.
'X-Appengine-TaskName': (
task_name.encode('utf-8') if task_name else b'None')
}
csrf_token = self._test_base.get_new_csrf_token()
self._test_base.post_task(url, payload, headers, csrf_token=csrf_token)
def create_http_task(
self, queue_name, url, payload=None, scheduled_for=None,
task_name=None):
"""Creates a Task in the corresponding queue that will be executed when
the 'scheduled_for' countdown expires using the cloud tasks emulator.
Args:
queue_name: str. The name of the queue to add the task to.
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults to
None if no payload is required.
scheduled_for: datetime|None. The naive datetime object for the time
to execute the task. Ignored by this stub.
task_name: str|None. Optional. The name of the task.
"""
# Causes the task to execute immediately by setting the scheduled_for
# time to 0. If we allow scheduled_for to be non-zero, then tests that
# rely on the actions made by the task will become unreliable.
scheduled_for = 0
self._client.create_task(
queue_name, url, payload, scheduled_for=scheduled_for,
task_name=task_name)
def count_jobs_in_taskqueue(self, queue_name=None):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_number_of_tasks(queue_name=queue_name)
def process_and_flush_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._client.process_and_flush_tasks(queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_tasks(queue_name=queue_name)
class MemoryCacheServicesStub:
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.cache cache services API.
"""
_CACHE_DICT = {}
def get_memory_cache_stats(self):
"""Returns a mock profile of the cache dictionary. This mock does not
have the functionality to test for peak memory usage and total memory
usage so the values for those attributes will be 0.
Returns:
MemoryCacheStats. MemoryCacheStats object containing the total
number of keys in the cache dictionary.
"""
return caching_domain.MemoryCacheStats(0, 0, len(self._CACHE_DICT))
def flush_caches(self):
"""Wipes the cache dictionary clean."""
self._CACHE_DICT.clear()
def get_multi(self, keys):
"""Looks up a list of keys in cache dictionary.
Args:
keys: list(str). A list of keys (strings) to look up.
Returns:
list(str). A list of values in the cache dictionary corresponding to
the keys that are passed in.
"""
assert isinstance(keys, list)
return [self._CACHE_DICT.get(key, None) for key in keys]
def set_multi(self, key_value_mapping):
"""Sets multiple keys' values at once in the cache dictionary.
Args:
key_value_mapping: dict(str, str). Both the key and value are
strings. The value can either be a primitive binary-safe string
or the JSON-encoded string version of the object.
Returns:
bool. Whether the set action succeeded.
"""
assert isinstance(key_value_mapping, dict)
self._CACHE_DICT.update(key_value_mapping)
return True
def delete_multi(self, keys):
"""Deletes multiple keys in the cache dictionary.
Args:
keys: list(str). The keys to delete.
Returns:
int. Number of successfully deleted keys.
"""
assert all(isinstance(key, python_utils.BASESTRING) for key in keys)
keys_to_delete = [key for key in keys if key in self._CACHE_DICT]
for key in keys_to_delete:
del self._CACHE_DICT[key]
return len(keys_to_delete)
class TestBase(unittest.TestCase):
"""Base class for all tests."""
maxDiff = 2500
# A test unicode string.
UNICODE_TEST_STRING = 'unicode ¡马!'
@property
def namespace(self):
"""Returns a namespace for isolating the NDB operations of each test.
Returns:
str. The namespace.
"""
return self.id()[-100:]
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
GenericTestBase's override of run() wraps super().run() in swap
contexts to mock out the cache and taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
with datastore_services.get_ndb_context(namespace=self.namespace):
super(TestBase, self).run(result=result)
def _get_unicode_test_string(self, suffix):
"""Returns a string that contains unicode characters and ends with the
given suffix. This is used to test that functions behave correctly when
handling strings with unicode characters.
Args:
suffix: str. The suffix to append to the UNICODE_TEST_STRING.
Returns:
str. A string that contains unicode characters and ends with the
given suffix.
"""
return '%s%s' % (self.UNICODE_TEST_STRING, suffix)
def _assert_validation_error(self, item, error_substring):
"""Checks that the given item passes default validation."""
with self.assertRaisesRegexp(utils.ValidationError, error_substring):
item.validate()
def log_line(self, line):
"""Print the line with a prefix that can be identified by the script
that calls the test.
"""
# We are using the b' prefix as all the stdouts are in bytes.
python_utils.PRINT(
b'%s%s' % (LOG_LINE_PREFIX, line.encode()))
def shortDescription(self):
"""Additional information logged during unit test invocation."""
# Suppress default logging of docstrings.
return None
def get_updated_param_dict(
self, param_dict, param_changes, exp_param_specs):
"""Updates a param dict using the given list of param_changes.
Note that the list of parameter changes is ordered. Parameter changes
later in the list may depend on parameter changes that have been set
earlier in the same list.
"""
new_param_dict = copy.deepcopy(param_dict)
for param_change in param_changes:
try:
obj_type = exp_param_specs[param_change.name].obj_type
except:
raise Exception('Parameter %s not found' % param_change.name)
new_param_dict[param_change.name] = (
param_change.get_normalized_value(obj_type, new_param_dict))
return new_param_dict
def get_static_asset_filepath(self):
"""Returns filepath to the static files on disk ('' or 'build/')."""
return '' if constants.DEV_MODE else os.path.join('build')
def get_static_asset_url(self, asset_suffix):
"""Returns the relative path for the asset, appending it to the
corresponding cache slug. asset_suffix should have a leading slash.
"""
return '/assets%s%s' % (utils.get_asset_dir_prefix(), asset_suffix)
@contextlib.contextmanager
def capture_logging(self, min_level=logging.NOTSET):
"""Context manager that captures logs into a list.
Strips whitespace from messages for convenience.
https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging
Args:
min_level: int. The minimum logging level captured by the context
manager. By default, all logging levels are captured. Values
should be one of the following values from the logging module:
NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL.
Yields:
list(str). A live-feed of the logging messages captured so-far.
"""
captured_logs = []
class ListStream:
"""Stream-like object that appends writes to the captured logs."""
def write(self, msg):
"""Appends stripped messages to captured logs."""
captured_logs.append(msg.strip())
def flush(self):
"""Does nothing."""
pass
list_stream_handler = logging.StreamHandler(stream=ListStream())
logger = logging.getLogger()
old_level = logger.level
logger.addHandler(list_stream_handler)
logger.setLevel(min_level)
try:
yield captured_logs
finally:
logger.setLevel(old_level)
logger.removeHandler(list_stream_handler)
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
"""Swap an object's attribute value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Example usage:
import math
with self.swap(math, 'sqrt', lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
To mock class methods, pass the function to the classmethod decorator
first, for example:
import types
with self.swap(
SomePythonClass, 'some_classmethod',
classmethod(new_classmethod)):
NOTE: self.swap and other context managers that are created using
contextlib.contextmanager use generators that yield exactly once. This
means that you can only use them once after construction, otherwise,
the generator will immediately raise StopIteration, and contextlib will
raise a RuntimeError.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
@contextlib.contextmanager
def swap_to_always_return(self, obj, attr, value=None):
"""Swap obj.attr with a function that always returns the given value."""
def function_that_always_returns(*unused_args, **unused_kwargs):
"""Returns the input value."""
return value
with self.swap(obj, attr, function_that_always_returns):
yield
@contextlib.contextmanager
def swap_to_always_raise(self, obj, attr, error=Exception):
"""Swap obj.attr with a function that always raises the given error."""
def function_that_always_raises(*unused_args, **unused_kwargs):
"""Raises the input exception."""
raise error
with self.swap(obj, attr, function_that_always_raises):
yield
@contextlib.contextmanager
def swap_with_call_counter(
self, obj, attr, raises=None, returns=None, call_through=False):
"""Swap obj.attr with a CallCounter instance.
Args:
obj: *. The Python object whose attribute you want to swap.
attr: str. The name of the function to be swapped.
raises: Exception|None. The exception raised by the swapped
function. If None, then no exception is raised.
returns: *. The return value of the swapped function.
call_through: bool. Whether to call through to the real function,
rather than use a stub implementation. If True, the `raises` and
`returns` arguments will be ignored.
Yields:
CallCounter. A CallCounter instance that's installed as obj.attr's
implementation while within the context manager returned.
"""
if call_through:
impl = obj.attr
else:
def impl(*_, **__):
"""Behaves according to the given values."""
if raises is not None:
# Pylint thinks we're trying to raise `None` even though
# we've explicitly checked for it above.
raise raises # pylint: disable=raising-bad-type
return returns
call_counter = CallCounter(impl)
with self.swap(obj, attr, call_counter):
yield call_counter
@contextlib.contextmanager
def swap_with_checks(
self, obj, attr, new_function, expected_args=None,
expected_kwargs=None, called=True):
"""Swap an object's function value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Examples:
If you want to check subprocess.Popen is invoked twice like
`subprocess.Popen(['python'], shell=True)` and
`subprocess.Popen(['python2], shell=False), you can first define the
mock function, then the swap, and just run the target function in
context, as follows:
def mock_popen(command, shell):
return
popen_swap = self.swap_with_checks(
subprocess, 'Popen', mock_popen,
expected_args=[(['python'],), (['python2'],)],
expected_kwargs=[{'shell': True}, {'shell': False}])
with popen_swap:
function_that_invokes_popen()
Args:
obj: *. The Python object whose attribute you want to swap.
attr: str. The name of the function to be swapped.
new_function: function. The new function you want to use.
expected_args: None|list(tuple). The expected args that you want
this function to be invoked with. When its value is None, args
will not be checked. If the value type is list, the function
will check whether the called args is the first element in the
list. If matched, this tuple will be removed from the list.
expected_kwargs: None|list(dict). The expected keyword args you want
this function to be invoked with. Similar to expected_args.
called: bool. Whether the function is expected to be invoked. This
will always be checked.
Yields:
context. The context with function replaced.
"""
original_function = getattr(obj, attr)
original_long_message_value = self.longMessage
msg = '%s.%s() failed the expectations of swap_with_checks()' % (
obj.__name__, attr)
expected_args_iter = iter(expected_args or ())
expected_kwargs_iter = iter(expected_kwargs or ())
@functools.wraps(original_function)
def new_function_with_checks(*args, **kwargs):
"""Wrapper function for the new value which keeps track of how many
times this function is invoked.
Args:
*args: list(*). The args passed into `attr` function.
**kwargs: dict. The key word args passed into `attr` function.
Returns:
*. Result of `new_function`.
"""
new_function_with_checks.call_num += 1
# Includes assertion error information in addition to the message.
self.longMessage = True
if expected_args:
next_args = python_utils.NEXT(expected_args_iter, None)
self.assertEqual(
args, next_args, msg='*args to call #%d of %s' % (
new_function_with_checks.call_num, msg))
if expected_kwargs:
next_kwargs = python_utils.NEXT(expected_kwargs_iter, None)
self.assertEqual(
kwargs, next_kwargs, msg='**kwargs to call #%d of %s' % (
new_function_with_checks.call_num, msg))
# Reset self.longMessage just in case `new_function()` raises.
self.longMessage = original_long_message_value
return new_function(*args, **kwargs)
new_function_with_checks.call_num = 0
setattr(obj, attr, new_function_with_checks)
try:
yield
# Includes assertion error information in addition to the message.
self.longMessage = True
self.assertEqual(
new_function_with_checks.call_num > 0, called, msg=msg)
pretty_unused_args = [
', '.join(itertools.chain(
(repr(a) for a in args),
('%s=%r' % kwarg for kwarg in kwargs.items())))
for args, kwargs in python_utils.zip_longest(
expected_args_iter, expected_kwargs_iter, fillvalue={})
]
if pretty_unused_args:
num_expected_calls = (
new_function_with_checks.call_num + len(pretty_unused_args))
missing_call_summary = '\n'.join(
'\tCall %d of %d: %s(%s)' % (
i, num_expected_calls, attr, call_args)
for i, call_args in enumerate(
pretty_unused_args,
start=new_function_with_checks.call_num + 1))
self.fail(
msg='Only %d of the %d expected calls were made.\n'
'\n'
'Missing:\n'
'%s : %s' % (
new_function_with_checks.call_num, num_expected_calls,
missing_call_summary, msg))
finally:
self.longMessage = original_long_message_value
setattr(obj, attr, original_function)
def assertRaises(self, *args, **kwargs):
raise NotImplementedError(
'self.assertRaises should not be used in these tests. Please use '
'self.assertRaisesRegexp instead.')
def assertRaisesRegexp( # pylint: disable=invalid-name
self, expected_exception, expected_regex, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regex.
This is a wrapper around assertRaisesRegex in unittest that enforces
strong regex.
Args:
expected_exception: Exception. Exception class expected
to be raised.
expected_regex: re.Pattern|str. Regex expected to be found in
error message.
*args: list(*). Function to be called and extra positional args.
**kwargs: dict(str, Any). Extra kwargs.
Returns:
bool. Whether the code raised exception in the expected format.
"""
if not expected_regex:
raise Exception(
'Please provide a sufficiently strong regexp string to '
'validate that the correct error is being raised.')
return super(TestBase, self).assertRaisesRegexp(
expected_exception, expected_regex, *args, **kwargs)
def assertItemsEqual(self, *args, **kwargs): # pylint: disable=invalid-name
"""Compares unordered sequences if they contain the same elements,
regardless of order. If the same element occurs more than once,
it verifies that the elements occur the same number of times.
Returns:
bool. Whether the items are equal.
"""
return super().assertCountEqual(*args, **kwargs)
def assert_matches_regexps(self, items, regexps, full_match=False):
"""Asserts that each item matches the corresponding regexp.
If there are any missing or extra items that do not correspond to a
regexp element, then the assertion fails.
Args:
items: list(str). The string elements being matched.
regexps: list(str|RegexObject). The patterns that each item is
expected to match.
full_match: bool. Whether to require items to match exactly with the
corresponding pattern.
Raises:
AssertionError. At least one item does not match its corresponding
pattern, or the number of items does not match the number of
regexp patterns.
"""
get_match = re.match if full_match else re.search
differences = [
'~ [i=%d]:\t%r does not match: %r' % (i, item, regexp)
for i, (regexp, item) in enumerate(python_utils.ZIP(regexps, items))
if get_match(regexp, item, flags=re.DOTALL) is None
]
if len(items) < len(regexps):
extra_regexps = regexps[len(items):]
differences.extend(
'- [i=%d]:\tmissing item expected to match: %r' % (i, regexp)
for i, regexp in enumerate(extra_regexps, start=len(items)))
if len(regexps) < len(items):
extra_items = items[len(regexps):]
differences.extend(
'+ [i=%d]:\textra item %r' % (i, item)
for i, item in enumerate(extra_items, start=len(regexps)))
if differences:
error_message = 'Lists differ:\n\t%s' % '\n\t'.join(differences)
raise AssertionError(error_message)
class AppEngineTestBase(TestBase):
"""Minimal base class for tests that need Google App Engine functionality.
This class is primarily designed for unit tests in core.platform, where we
write adapters around Oppia's third-party dependencies. Generally, our unit
tests depend on stub implementations of these adapters to protect them from
platform-specific behavior. Such stubs are installed in the
GenericTestBase.run() method.
Most of the unit tests in our code base do, and should, inherit from
`GenericTestBase` to stay platform-agnostic. The platform layer itself,
however, can _not_ mock out platform-specific behavior. Those unit tests
need to interact with a real implementation. This base class provides the
bare-minimum functionality and stubs necessary to do so.
"""
# Environment values that our tests depend on.
AUTH_DOMAIN = 'example.com'
HTTP_HOST = 'localhost'
SERVER_NAME = 'localhost'
SERVER_PORT = '8080'
DEFAULT_VERSION_HOSTNAME = '%s:%s' % (HTTP_HOST, SERVER_PORT)
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(AppEngineTestBase, self).__init__(*args, **kwargs)
# Defined outside of setUp() because we access it from methods, but can
# only install it during the run() method. Defining it in __init__
# satisfies pylint's attribute-defined-outside-init warning.
self._platform_taskqueue_services_stub = TaskqueueServicesStub(self)
def setUp(self) -> None:
super(AppEngineTestBase, self).setUp()
# Initialize namespace for the storage emulator.
storage_services.CLIENT.namespace = self.id()
# Set up apps for testing.
self.testapp = webtest.TestApp(main.app_without_context)
def tearDown(self) -> None:
datastore_services.delete_multi(
datastore_services.query_everything().iter(keys_only=True))
storage_services.CLIENT.reset()
super(AppEngineTestBase, self).tearDown()
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
AppEngineTestBase's override of run() wraps super().run() in "swap"
contexts which stub out the platform taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
platform_taskqueue_services_swap = self.swap(
platform_taskqueue_services, 'create_http_task',
self._platform_taskqueue_services_stub.create_http_task)
with platform_taskqueue_services_swap:
super(AppEngineTestBase, self).run(result=result)
def count_jobs_in_taskqueue(self, queue_name):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.count_jobs_in_taskqueue(
queue_name=queue_name)
def process_and_flush_pending_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._platform_taskqueue_services_stub.process_and_flush_tasks(
queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.get_pending_tasks(
queue_name=queue_name)
class GenericTestBase(AppEngineTestBase):
"""Base test class with common/generic helper methods.
Unless a class is testing for "platform"-specific behavior (e.g., testing
third-party library code or database model implementations), always inherit
from this base class. Otherwise, inherit from unittest.TestCase (preferred)
or AppEngineTestBase if Google App Engine services/behavior is needed.
TODO(#12135): Split this enormous test base into smaller, focused pieces.
"""
# NOTE: For tests that do not/can not use the default super admin, authors
# can override the following class-level constant.
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = True
SUPER_ADMIN_EMAIL = 'tmpsuperadmin@example.com'
SUPER_ADMIN_USERNAME = 'tmpsuperadm1n'
# Dummy strings representing user attributes. Note that it is up to the
# individual test to actually register these users as editors, admins, etc.
CURRICULUM_ADMIN_EMAIL = 'admin@example.com'
# Usernames containing the string 'admin' are reserved, so we use 'adm'
# instead.
CURRICULUM_ADMIN_USERNAME = 'adm'
BLOG_ADMIN_EMAIL = 'blogadmin@example.com'
BLOG_ADMIN_USERNAME = 'blogadm'
BLOG_EDITOR_EMAIL = 'blogeditor@example.com'
BLOG_EDITOR_USERNAME = 'blogeditor'
MODERATOR_EMAIL = 'moderator@example.com'
MODERATOR_USERNAME = 'moderator'
RELEASE_COORDINATOR_EMAIL = 'releasecoordinator@example.com'
RELEASE_COORDINATOR_USERNAME = 'releasecoordinator'
OWNER_EMAIL = 'owner@example.com'
OWNER_USERNAME = 'owner'
EDITOR_EMAIL = 'editor@example.com'
EDITOR_USERNAME = 'editor'
TOPIC_MANAGER_EMAIL = 'topicmanager@example.com'
TOPIC_MANAGER_USERNAME = 'topicmanager'
VOICE_ARTIST_EMAIL = 'voiceartist@example.com'
VOICE_ARTIST_USERNAME = 'voiceartist'
VOICEOVER_ADMIN_EMAIL = 'voiceoveradm@example.com'
VOICEOVER_ADMIN_USERNAME = 'voiceoveradm'
VIEWER_EMAIL = 'viewer@example.com'
VIEWER_USERNAME = 'viewer'
NEW_USER_EMAIL = 'new.user@example.com'
NEW_USER_USERNAME = 'newuser'
DEFAULT_END_STATE_NAME = 'End'
PSEUDONYMOUS_ID = 'pid_%s' % ('a' * 32)
VERSION_0_STATES_DICT = {
feconf.DEFAULT_INIT_STATE_NAME: {
'content': [{'type': 'text', 'value': ''}],
'param_changes': [],
'interaction': {
'customization_args': {},
'id': 'Continue',
'handlers': [{
'name': 'submit',
'rule_specs': [{
'dest': 'END',
'feedback': [],
'param_changes': [],
'definition': {'rule_type': 'default'},
}],
}],
},
},
}
VERSION_27_STATE_DICT = {
'content': {'content_id': 'content', 'html': ''},
'param_changes': [],
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
},
'interaction': {
'solution': {
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>Solution explanation</p>',
},
'answer_is_exclusive': False,
},
'answer_groups': [],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': '',
},
'dest': None,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': True,
},
'customization_args': {
'rows': {'value': 1},
'placeholder': {'value': 'Enter text here'},
},
'confirmed_unclassified_answers': [],
'id': 'TextInput',
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hint 1</p>',
},
}],
},
'classifier_model_id': None,
}
VERSION_1_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_2_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_3_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_4_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math math_content-with-value="{'
'&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, '
'&quot;svg_filename&quot;: &quot;&quot;'
'}">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_5_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math math_content-with-value="{'
'&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, '
'&quot;svg_filename&quot;: &quot;&quot;'
'}">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'thumbnail_size_in_bytes': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_1_SUBTOPIC_DICT = {
'skill_ids': ['skill_1'],
'id': 1,
'title': 'A subtitle',
}
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with YAML generation tests. The
# indentations are also important, since it is used to define nesting (just
# like Python).
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
card_is_checkpoint: true
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
linked_skill_id: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
New state:
card_is_checkpoint: false
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
linked_skill_id: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
GenericTestBase's override of run() wraps super().run() in swap
contexts to mock out the cache and taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
memory_cache_services_stub = MemoryCacheServicesStub()
memory_cache_services_stub.flush_caches()
es_stub = ElasticSearchStub()
es_stub.reset()
with python_utils.ExitStack() as stack:
stack.callback(AuthServicesStub.install_stub(self))
stack.enter_context(self.swap(
elastic_search_services.ES.indices, 'create',
es_stub.mock_create_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'index',
es_stub.mock_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'exists',
es_stub.mock_exists))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete',
es_stub.mock_delete))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete_by_query',
es_stub.mock_delete_by_query))
stack.enter_context(self.swap(
elastic_search_services.ES, 'search',
es_stub.mock_search))
stack.enter_context(self.swap(
memory_cache_services, 'flush_caches',
memory_cache_services_stub.flush_caches))
stack.enter_context(self.swap(
memory_cache_services, 'get_multi',
memory_cache_services_stub.get_multi))
stack.enter_context(self.swap(
memory_cache_services, 'set_multi',
memory_cache_services_stub.set_multi))
stack.enter_context(self.swap(
memory_cache_services, 'get_memory_cache_stats',
memory_cache_services_stub.get_memory_cache_stats))
stack.enter_context(self.swap(
memory_cache_services, 'delete_multi',
memory_cache_services_stub.delete_multi))
super(GenericTestBase, self).run(result=result)
def setUp(self) -> None:
super(GenericTestBase, self).setUp()
if self.AUTO_CREATE_DEFAULT_SUPERADMIN_USER:
self.signup_superadmin_user()
def login(self, email: str, is_super_admin: Optional[bool] = False) -> None:
"""Sets the environment variables to simulate a login.
Args:
email: str. The email of the user who is to be logged in.
is_super_admin: bool. Whether the user is a super admin.
"""
os.environ['USER_ID'] = self.get_auth_id_from_email(email)
os.environ['USER_EMAIL'] = email
os.environ['USER_IS_ADMIN'] = ('1' if is_super_admin else '0')
def logout(self) -> None:
"""Simulates a logout by resetting the environment variables."""
os.environ['USER_ID'] = ''
os.environ['USER_EMAIL'] = ''
os.environ['USER_IS_ADMIN'] = '0'
@contextlib.contextmanager
def mock_datetime_utcnow(self, mocked_now):
"""Mocks parts of the datastore to accept a fake datetime type that
always returns the same value for utcnow.
Example:
import datetime
mocked_now = datetime.datetime.utcnow() - datetime.timedelta(days=1)
with mock_datetime_utcnow(mocked_now):
self.assertEqual(datetime.datetime.utcnow(), mocked_now)
actual_now = datetime.datetime.utcnow() # Returns actual time.
Args:
mocked_now: datetime.datetime. The datetime which will be used
instead of the current UTC datetime.
Yields:
None. Empty yield statement.
"""
if not isinstance(mocked_now, datetime.datetime):
raise Exception('mocked_now must be datetime, got: %r' % mocked_now)
old_datetime = datetime.datetime
class MockDatetimeType(type):
"""Overrides isinstance() behavior."""
@classmethod
def __instancecheck__(cls, instance):
return isinstance(instance, old_datetime)
class MockDatetime(old_datetime, metaclass=MockDatetimeType):
"""Always returns mocked_now as the current UTC time."""
@classmethod
def utcnow(cls) -> datetime.datetime:
"""Returns the mocked datetime."""
return mocked_now
setattr(datetime, 'datetime', MockDatetime)
try:
yield
finally:
setattr(datetime, 'datetime', old_datetime)
@contextlib.contextmanager
def login_context(self, email, is_super_admin=False):
"""Log in with the given email under the context of a 'with' statement.
Args:
email: str. An email associated with a user account.
is_super_admin: bool. Whether the user is a super admin.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
self.login(email, is_super_admin=is_super_admin)
try:
yield self.get_user_id_from_email(email)
finally:
self.logout()
@contextlib.contextmanager
def super_admin_context(self):
"""Log in as a global admin under the context of a 'with' statement.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
email = self.SUPER_ADMIN_EMAIL
with self.login_context(email, is_super_admin=True) as user_id:
yield user_id
def signup(
self,
email: str,
username: str,
is_super_admin: Optional[bool] = False
) -> None:
"""Complete the signup process for the user with the given username.
Args:
email: str. Email of the given user.
username: str. Username of the given user.
is_super_admin: bool. Whether the user is a super admin.
"""
user_services.create_new_user(self.get_auth_id_from_email(email), email)
login_context = self.login_context(email, is_super_admin=is_super_admin)
with login_context, requests_mock.Mocker() as m:
# We mock out all HTTP requests while trying to signup to avoid
# calling out to real backend services.
m.request(requests_mock.ANY, requests_mock.ANY)
response = self.get_html_response(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.post(feconf.SIGNUP_DATA_URL, params={
'csrf_token': self.get_new_csrf_token(),
'payload': json.dumps(
{'username': username, 'agreed_to_terms': True}),
})
self.assertEqual(response.status_int, 200)
def signup_superadmin_user(self):
"""Signs up a superadmin user. Must be called at the end of setUp()."""
self.signup(self.SUPER_ADMIN_EMAIL, self.SUPER_ADMIN_USERNAME)
def set_config_property(self, config_obj, new_config_value):
"""Sets a given configuration object's value to the new value specified
using a POST request.
"""
with self.super_admin_context():
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_obj.name: new_config_value,
},
}, csrf_token=self.get_new_csrf_token())
def add_user_role(self, username: str, user_role: str) -> None:
"""Adds the given role to the user account with the given username.
Args:
username: str. Username of the given user.
user_role: str. Role of the given user.
"""
with self.super_admin_context():
self.put_json('/adminrolehandler', {
'username': username,
'role': user_role
}, csrf_token=self.get_new_csrf_token())
def set_curriculum_admins(self, curriculum_admin_usernames):
"""Sets role of given users as CURRICULUM_ADMIN.
Args:
curriculum_admin_usernames: list(str). List of usernames.
"""
for name in curriculum_admin_usernames:
self.add_user_role(name, feconf.ROLE_ID_CURRICULUM_ADMIN)
def set_topic_managers(self, topic_manager_usernames, topic_id):
"""Sets role of given users as TOPIC_MANAGER.
Args:
topic_manager_usernames: list(str). List of usernames.
topic_id: str. The topic Id.
"""
with self.super_admin_context():
for username in topic_manager_usernames:
self.put_json('/topicmanagerrolehandler', {
'username': username,
'action': 'assign',
'topic_id': topic_id
}, csrf_token=self.get_new_csrf_token())
def set_moderators(self, moderator_usernames):
"""Sets role of given users as MODERATOR.
Args:
moderator_usernames: list(str). List of usernames.
"""
for name in moderator_usernames:
self.add_user_role(name, feconf.ROLE_ID_MODERATOR)
def set_voiceover_admin(self, voiceover_admin_username):
"""Sets role of given users as VOICEOVER ADMIN.
Args:
voiceover_admin_username: list(str). List of usernames.
"""
for name in voiceover_admin_username:
self.add_user_role(name, feconf.ROLE_ID_VOICEOVER_ADMIN)
def mark_user_banned(self, username):
"""Marks a user banned.
Args:
username: str. The username of the user to ban.
"""
with self.super_admin_context():
self.put_json('/bannedusershandler', {
'username': username
}, csrf_token=self.get_new_csrf_token())
def set_collection_editors(self, collection_editor_usernames):
"""Sets role of given users as COLLECTION_EDITOR.
Args:
collection_editor_usernames: list(str). List of usernames.
"""
for name in collection_editor_usernames:
self.add_user_role(name, feconf.ROLE_ID_COLLECTION_EDITOR)
def get_user_id_from_email(self, email):
"""Gets the user ID corresponding to the given email.
Args:
email: str. A valid email stored in the App Engine database.
Returns:
str|None. ID of the user possessing the given email, or None if
the user does not exist.
"""
user_settings = user_services.get_user_settings_by_auth_id(
self.get_auth_id_from_email(email))
return user_settings and user_settings.user_id
@classmethod
def get_auth_id_from_email(cls, email):
"""Returns a mock auth ID corresponding to the given email.
This method can use any algorithm to produce results as long as, during
the runtime of each test case/method, it is:
1. Pure (same input always returns the same output).
2. One-to-one (no two distinct inputs return the same output).
3. An integer byte-string (integers are always valid in auth IDs).
Args:
email: str. The email address of the user.
Returns:
bytes. The mock auth ID of a user possessing the given email.
"""
# Although the hash function doesn't guarantee a one-to-one mapping, in
# practice it is sufficient for our tests. We make it a positive integer
# because those are always valid auth IDs.
return python_utils.UNICODE(abs(hash(email)))
def get_all_python_files(self):
"""Recursively collects all Python files in the core/ and extensions/
directory.
Returns:
list(str). A list of Python files.
"""
current_dir = os.getcwd()
files_in_directory = []
for _dir, _, files in os.walk(current_dir):
for file_name in files:
filepath = os.path.relpath(
os.path.join(_dir, file_name), start=current_dir)
if (
filepath.endswith('.py') and (
filepath.startswith('core/') or
filepath.startswith('extensions/')
)
):
module = filepath[:-3].replace('/', '.')
files_in_directory.append(module)
return files_in_directory
def _get_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(
url,
params=params,
expect_errors=expect_errors,
status=expected_status_int
)
if expect_errors:
self.assertTrue(response.status_int >= 400)
else:
self.assertTrue(200 <= response.status_int < 400)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(response.status_int, expected_status_int)
self.assertEqual(response.content_type, expected_content_type)
return response
def get_html_response(self, url, params=None, expected_status_int=200):
"""Get a HTML response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will
be 200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
return self._get_response(
url, 'text/html', params=params,
expected_status_int=expected_status_int)
def get_custom_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response other than HTML or JSON as a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
self.assertNotIn(
expected_content_type, ['text/html', 'application/json'])
return self._get_response(
url, expected_content_type, params=params,
expected_status_int=expected_status_int)
def get_response_without_checking_for_errors(
self, url, expected_status_int_list, params=None):
"""Get a response, transformed to a Python object and checks for a list
of status codes.
Args:
url: str. The URL to fetch the response.
expected_status_int_list: list(int). A list of integer status code
to expect.
params: dict. A dictionary that will be encoded into a query string.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(url, params=params, expect_errors=True)
self.assertIn(response.status_int, expected_status_int_list)
return response
def _parse_json_response(self, json_response, expect_errors):
"""Convert a JSON server response to an object (such as a dict)."""
if expect_errors:
self.assertTrue(json_response.status_int >= 400)
else:
self.assertTrue(200 <= json_response.status_int < 400)
self.assertEqual(json_response.content_type, 'application/json')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(
self,
url: str,
params: Optional[Dict[str, str]] = None,
expected_status_int: int = 200,
headers: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""Get a JSON response, transformed to a Python object."""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
json_response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int, headers=headers
)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def post_json(
self, url, data, headers=None, csrf_token=None,
expected_status_int=200, upload_files=None, use_payload=True,
source=None):
"""Post an object to the server by JSON; return the received object.
Args:
url: str. The URL to send the POST request to.
data: dict. The dictionary that acts as the body of the request.
headers: dict. The headers set in the request.
csrf_token: str. The csrf token to identify the user.
expected_status_int: int. Expected return status of the POST
request.
upload_files: list(tuple). List of
(fieldname, filename, file_content) tuples. Can also provide
just (fieldname, filename) to have the file contents be
read from disk.
use_payload: bool. If true, a new dict is created (which is sent as
the body of the POST request) with one key - 'payload' - and the
dict passed in 'data' is used as the value for that key. If
false, the dict in 'data' is directly passed as the body of the
request. For all requests called from the frontend, this should
be set to 'true'.
source: unicode. The url from which the post call is requested.
Returns:
dict. The JSON response for the request in dict form.
"""
if use_payload:
data = {'payload': json.dumps(data)}
if csrf_token:
data['csrf_token'] = csrf_token
if source:
data['source'] = source
expect_errors = expected_status_int >= 400
json_response = self._send_post_request(
self.testapp, url, data, expect_errors,
expected_status_int=expected_status_int, upload_files=upload_files,
headers=headers)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def delete_json(self, url, params='', expected_status_int=200):
"""Delete object on the server using a JSON call."""
if params:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
expect_errors = expected_status_int >= 400
json_response = self.testapp.delete(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def _send_post_request(
self, app, url, data, expect_errors, expected_status_int=200,
upload_files=None, headers=None):
"""Sends a post request with the data provided to the url specified.
Args:
app: TestApp. The WSGI application which receives the request and
produces response.
url: str. The URL to send the POST request to.
data: *. To be put in the body of the request. If params is an
iterator, it will be urlencoded. If it is a string, it will not
be encoded, but placed in the body directly. Can be a
collections.OrderedDict with webtest.forms.Upload fields
included.
expect_errors: bool. Whether errors are expected.
expected_status_int: int. The expected status code.
upload_files: list(tuple). List of
(fieldname, filename, file_content) tuples. Can also provide
just (fieldname, filename) to have the file contents will be
read from disk.
headers: dict(str, *). Extra headers to send.
Returns:
webtest.TestResponse. The response of the POST request.
"""
# Convert the files to bytes.
if upload_files is not None:
upload_files = tuple(
tuple(
f.encode('utf-8') if isinstance(f, str) else f
for f in upload_file
) for upload_file in upload_files
)
return app.post(
url, params=data, headers=headers, status=expected_status_int,
upload_files=upload_files, expect_errors=expect_errors)
def post_task(
self, url, payload, headers, csrf_token=None, expect_errors=False,
expected_status_int=200):
"""Posts an object to the server by JSON with the specific headers
specified; return the received object.
"""
if csrf_token:
payload['csrf_token'] = csrf_token
return self.testapp.post(
url, params=json.dumps(payload), headers=headers,
status=expected_status_int, expect_errors=expect_errors,
content_type='application/json')
def put_json(self, url, payload, csrf_token=None, expected_status_int=200):
"""PUT an object to the server with JSON and return the response."""
params = {'payload': json.dumps(payload)}
if csrf_token:
params['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self.testapp.put(
url, params=params, expect_errors=expect_errors)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def get_new_csrf_token(self):
"""Generates CSRF token for test."""
response = self.get_json('/csrfhandler')
return response['token']
def save_new_default_exploration(
self, exploration_id, owner_id, title='A title'):
"""Saves a new default exploration written by owner_id.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category='Algebra')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def set_interaction_for_state(self, state, interaction_id):
"""Sets the interaction_id, sets the fully populated default interaction
customization arguments, and increments next_content_id_index as needed.
Args:
state: State. The state domain object to set the interaction for.
interaction_id: str. The interaction id to set. Also sets the
default customization args for the given interaction id.
"""
# We wrap next_content_id_index in a dict so that modifying it in the
# inner function modifies the value.
next_content_id_index_dict = {'value': state.next_content_id_index}
def traverse_schema_and_assign_content_ids(value, schema, contentId):
"""Generates content_id from recursively traversing the schema, and
assigning to the current value.
Args:
value: *. The current traversed value in customization
arguments.
schema: dict. The current traversed schema.
contentId: str. The content_id generated so far.
"""
is_subtitled_html_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML)
is_subtitled_unicode_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE)
if is_subtitled_html_spec or is_subtitled_unicode_spec:
value['content_id'] = '%s_%i' % (
contentId, next_content_id_index_dict['value'])
next_content_id_index_dict['value'] += 1
elif schema['type'] == schema_utils.SCHEMA_TYPE_LIST:
for x in value:
traverse_schema_and_assign_content_ids(
x, schema['items'], contentId)
elif schema['type'] == schema_utils.SCHEMA_TYPE_DICT:
for schema_property in schema['properties']:
traverse_schema_and_assign_content_ids(
schema['properties'][schema_property.name],
schema_property['schema'],
'%s_%s' % (contentId, schema_property.name))
interaction = (
interaction_registry.Registry.get_interaction_by_id(interaction_id))
ca_specs = interaction.customization_arg_specs
customization_args = {}
for ca_spec in ca_specs:
ca_name = ca_spec.name
ca_value = ca_spec.default_value
traverse_schema_and_assign_content_ids(
ca_value, ca_spec.schema, 'ca_%s' % ca_name)
customization_args[ca_name] = {'value': ca_value}
state.update_interaction_id(interaction_id)
state.update_interaction_customization_args(customization_args)
state.update_next_content_id_index(next_content_id_index_dict['value'])
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE, end_state_name=None,
interaction_id='TextInput', correctness_feedback_enabled=False):
"""Saves a new strictly-validated exploration.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
end_state_name: str. The name of the end state for the exploration.
interaction_id: str. The id of the interaction.
correctness_feedback_enabled: bool. Whether correctness feedback is
enabled for the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category=category,
language_code=language_code)
self.set_interaction_for_state(
exploration.states[exploration.init_state_name], interaction_id)
exploration.objective = objective
exploration.correctness_feedback_enabled = correctness_feedback_enabled
# If an end state name is provided, add terminal node with that name.
if end_state_name is not None:
exploration.add_states([end_state_name])
end_state = exploration.states[end_state_name]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
# Link first state to ending state (to maintain validity).
init_state = exploration.states[exploration.init_state_name]
init_interaction = init_state.interaction
init_interaction.default_outcome.dest = end_state_name
if correctness_feedback_enabled:
init_interaction.default_outcome.labelled_as_correct = True
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_linear_exp_with_state_names_and_interactions(
self, exploration_id, owner_id, state_names, interaction_ids,
title='A title', category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE,
correctness_feedback_enabled=False):
"""Saves a new strictly-validated exploration with a sequence of states.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
state_names: list(str). The names of states to be linked
sequentially in the exploration. Must be a non-empty list and
contain no duplicates.
interaction_ids: list(str). The names of the interaction ids to be
assigned to each state. Values will be cycled, so it doesn't
need to be the same size as state_names, but it must be
non-empty.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
correctness_feedback_enabled: bool. Whether the correctness feedback
is enabled or not for the exploration.
Returns:
Exploration. The exploration domain object.
"""
if not state_names:
raise ValueError('must provide at least one state name')
if not interaction_ids:
raise ValueError('must provide at least one interaction type')
interaction_ids = itertools.cycle(interaction_ids)
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, init_state_name=state_names[0],
category=category, objective=objective, language_code=language_code)
exploration.correctness_feedback_enabled = correctness_feedback_enabled
exploration.add_states(state_names[1:])
for from_state_name, dest_state_name in (
python_utils.ZIP(state_names[:-1], state_names[1:])):
from_state = exploration.states[from_state_name]
self.set_interaction_for_state(
from_state, python_utils.NEXT(interaction_ids))
from_state.interaction.default_outcome.dest = dest_state_name
if correctness_feedback_enabled:
from_state.interaction.default_outcome.labelled_as_correct = (
True)
end_state = exploration.states[state_names[-1]]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_exp_with_custom_states_schema_version(
self, exp_id, user_id, states_dict, version):
"""Saves a new default exploration with the given version of state dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
states_dict: dict. The dict representation of all the states.
version: int. Custom states schema version.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title='title',
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=version,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME, states=states_dict,
param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'title\'.'
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title='title', category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.update_timestamps()
exp_summary_model.put()
def publish_exploration(self, owner_id, exploration_id):
"""Publish the exploration with the given exploration_id.
Args:
owner_id: str. The user_id of the owner of the exploration.
exploration_id: str. The ID of the new exploration.
"""
committer = user_services.get_user_actions_info(owner_id)
rights_manager.publish_exploration(committer, exploration_id)
def save_new_default_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default collection written by owner_id.
Args:
collection_id: str. The id of the new default collection.
owner_id: str. The user_id of the creator of the collection.
title: str. The title of the collection.
category: str. The category this collection belongs to.
objective: str. The objective of this collection.
language_code: str. The language_code of this collection.
Returns:
Collection. The collection domain object.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
collection_services.save_new_collection(owner_id, collection)
return collection
def save_new_valid_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE,
exploration_id='an_exploration_id',
end_state_name=DEFAULT_END_STATE_NAME):
"""Creates an Oppia collection and adds a node saving the exploration
details.
Args:
collection_id: str. ID for the collection to be created.
owner_id: str. The user_id of the creator of the collection.
title: str. Title for the collection.
category: str. The category of the exploration.
objective: str. Objective for the exploration.
language_code: str. The language code for the exploration.
exploration_id: str. The exploration_id for the Oppia exploration.
end_state_name: str. The name of the end state for the exploration.
Returns:
Collection. A newly-created collection containing the corresponding
exploration details.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
# Check whether exploration with given exploration_id exists or not.
exploration = (
exp_fetchers.get_exploration_by_id(exploration_id, strict=False))
if exploration is None:
exploration = self.save_new_valid_exploration(
exploration_id, owner_id, title=title, category=category,
objective=objective, end_state_name=end_state_name)
collection.add_node(exploration.id)
collection_services.save_new_collection(owner_id, collection)
return collection
def publish_collection(self, owner_id, collection_id):
"""Publish the collection with the given collection_id.
Args:
owner_id: str. The user_id of the owner of the collection.
collection_id: str. ID of the collection to be published.
"""
committer = user_services.get_user_actions_info(owner_id)
rights_manager.publish_collection(committer, collection_id)
def save_new_story(
self, story_id, owner_id, corresponding_topic_id,
title='Title', description='Description', notes='Notes',
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='title', meta_tag_content='story meta tag content'):
"""Creates an Oppia Story and saves it.
NOTE: Callers are responsible for ensuring that the
'corresponding_topic_id' provided is valid, unless a test explicitly
requires it to be invalid.
Args:
story_id: str. ID for the story to be created.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters,
main storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The url fragment of the story.
meta_tag_content: str. The meta tag content of the story.
Returns:
Story. A newly-created story.
"""
story = story_domain.Story.create_default_story(
story_id, title, description, corresponding_topic_id, url_fragment)
story.title = title
story.description = description
story.notes = notes
story.language_code = language_code
story.url_fragment = url_fragment
story.meta_tag_content = meta_tag_content
story_services.save_new_story(owner_id, story)
return story
def save_new_story_with_story_contents_schema_v1(
self, story_id, thumbnail_filename, thumbnail_bg_color,
thumbnail_size_in_bytes, owner_id, title, description,
notes, corresponding_topic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='story-frag',
meta_tag_content='story meta tag content'):
"""Saves a new story with a default version 1 story contents data dict.
This function should only be used for creating stories in tests
involving migration of datastore stories that use an old story contents
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating stories. This is because
the latter approach would result in a story with the *current* story
contents schema version.
Args:
story_id: str. ID for the story to be created.
thumbnail_filename: str|None. Thumbnail filename for the story.
thumbnail_bg_color: str|None. Thumbnail background color for the
story.
thumbnail_size_in_bytes: int|None. The thumbnail size in bytes of
the story.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters, main
storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The URL fragment for the story.
meta_tag_content: str. The meta tag content of the story.
"""
story_model = story_models.StoryModel(
id=story_id, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color,
thumbnail_size_in_bytes=thumbnail_size_in_bytes,
description=description, title=title,
language_code=language_code,
story_contents_schema_version=1, notes=notes,
corresponding_topic_id=corresponding_topic_id,
story_contents=self.VERSION_1_STORY_CONTENTS_DICT,
url_fragment=url_fragment, meta_tag_content=meta_tag_content)
commit_message = 'New story created with title \'%s\'.' % title
story_model.commit(
owner_id, commit_message,
[{'cmd': story_domain.CMD_CREATE_NEW, 'title': title}])
def save_new_story_with_story_contents_schema_v5(
self, story_id, thumbnail_filename, thumbnail_bg_color,
thumbnail_size_in_bytes, owner_id, title, description,
notes, corresponding_topic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='story-frag',
meta_tag_content='story meta tag content'):
"""Saves a new story with a default version 1 story contents data dict.
This function should only be used for creating stories in tests
involving migration of datastore stories that use an old story contents
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating stories. This is because
the latter approach would result in a story with the *current* story
contents schema version.
Args:
story_id: str. ID for the story to be created.
thumbnail_filename: str|None. Thumbnail filename for the story.
thumbnail_bg_color: str|None. Thumbnail background color for the
story.
thumbnail_size_in_bytes: int|None. The thumbnail size in bytes of
the story.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters, main
storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The URL fragment for the story.
meta_tag_content: str. The meta tag content of the story.
"""
story_content_v5 = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math math_content-with-value="{'
'&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, ' # pylint: disable=line-too-long
'&quot;svg_filename&quot;: &quot;&quot;'
'}">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': 'image.svg',
'thumbnail_bg_color': None,
'thumbnail_size_in_bytes': 21131,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
story_model = story_models.StoryModel(
id=story_id, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color,
thumbnail_size_in_bytes=thumbnail_size_in_bytes,
description=description, title=title,
language_code=language_code,
story_contents_schema_version=5, notes=notes,
corresponding_topic_id=corresponding_topic_id,
story_contents=story_content_v5,
url_fragment=url_fragment, meta_tag_content=meta_tag_content)
commit_message = 'New story created with title \'%s\'.' % title
story_model.commit(
owner_id, commit_message,
[{'cmd': story_domain.CMD_CREATE_NEW, 'title': title}])
story_services.create_story_summary(story_id)
def save_new_subtopic(self, subtopic_id, owner_id, topic_id):
"""Creates an Oppia subtopic and saves it.
Args:
subtopic_id: str. ID for the subtopic to be created.
owner_id: str. The user_id of the creator of the topic.
topic_id: str. ID for the topic that the subtopic belongs to.
Returns:
SubtopicPage. A newly-created subtopic.
"""
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
subtopic_id, topic_id))
subtopic_changes = [
subtopic_page_domain.SubtopicPageChange({
'cmd': subtopic_page_domain.CMD_CREATE_NEW,
'topic_id': topic_id,
'subtopic_id': subtopic_id,
})
]
subtopic_page_services.save_subtopic_page(
owner_id, subtopic_page, 'Create new subtopic', subtopic_changes)
return subtopic_page
def save_new_topic(
self, topic_id, owner_id, name='topic', abbreviated_name='topic',
url_fragment='topic',
thumbnail_filename='topic.svg',
thumbnail_bg_color=(
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0]),
thumbnail_size_in_bytes=21131,
description='description', canonical_story_ids=None,
additional_story_ids=None, uncategorized_skill_ids=None,
subtopics=None, next_subtopic_id=0,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Creates an Oppia Topic and saves it.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
thumbnail_filename: str|None. The thumbnail filename of the topic.
thumbnail_bg_color: str|None. The thumbnail background color of the
topic.
thumbnail_size_in_bytes: int|None. The thumbnail size in bytes of
the topic.
description: str. The description of the topic.
canonical_story_ids: list(str). The list of ids of canonical stories
that are part of the topic.
additional_story_ids: list(str). The list of ids of additional
stories that are part of the topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
subtopics: list(Subtopic). The different subtopics that are part of
this topic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
Returns:
Topic. A newly-created topic.
"""
canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (canonical_story_ids or [])
]
additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (additional_story_ids or [])
]
uncategorized_skill_ids = uncategorized_skill_ids or []
subtopics = subtopics or []
topic = topic_domain.Topic(
topic_id, name, abbreviated_name, url_fragment, thumbnail_filename,
thumbnail_bg_color, thumbnail_size_in_bytes, description,
canonical_story_references, additional_story_references,
uncategorized_skill_ids, subtopics,
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION, next_subtopic_id,
language_code, 0, feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION,
meta_tag_content, practice_tab_is_displayed,
page_title_fragment_for_web)
topic_services.save_new_topic(owner_id, topic)
return topic
def save_new_topic_with_subtopic_schema_v1(
self, topic_id, owner_id, name, abbreviated_name, url_fragment,
canonical_name, description, thumbnail_filename, thumbnail_bg_color,
canonical_story_references, additional_story_references,
uncategorized_skill_ids, next_subtopic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Saves a new topic with a default version 1 subtopic data dict.
This function should only be used for creating topics in tests involving
migration of datastore topics that use an old subtopic schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating topics. This is because
the latter approach would result in a topic with the *current* subtopic
schema version.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
canonical_name: str. The canonical name (lowercase) of the topic.
description: str. The description of the topic.
thumbnail_filename: str. The thumbnail file name of the topic.
thumbnail_bg_color: str. The thumbnail background color of the
topic.
canonical_story_references: list(StoryReference). A set of story
reference objects representing the canonical stories that are
part of this topic.
additional_story_references: list(StoryReference). A set of story
reference object representing the additional stories that are
part of this topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
"""
topic_rights_model = topic_models.TopicRightsModel(
id=topic_id, manager_ids=[], topic_is_published=True)
topic_model = topic_models.TopicModel(
id=topic_id, name=name, abbreviated_name=abbreviated_name,
url_fragment=url_fragment, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color,
canonical_name=canonical_name, description=description,
language_code=language_code,
canonical_story_references=canonical_story_references,
additional_story_references=additional_story_references,
uncategorized_skill_ids=uncategorized_skill_ids,
subtopic_schema_version=1,
story_reference_schema_version=(
feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION),
next_subtopic_id=next_subtopic_id,
subtopics=[self.VERSION_1_SUBTOPIC_DICT],
meta_tag_content=meta_tag_content,
practice_tab_is_displayed=practice_tab_is_displayed,
page_title_fragment_for_web=page_title_fragment_for_web)
commit_message = 'New topic created with name \'%s\'.' % name
topic_rights_model.commit(
committer_id=owner_id,
commit_message='Created new topic rights',
commit_cmds=[{'cmd': topic_domain.CMD_CREATE_NEW}])
topic_model.commit(
owner_id, commit_message,
[{'cmd': topic_domain.CMD_CREATE_NEW, 'name': name}])
def save_new_question(
self, question_id, owner_id, question_state_data,
linked_skill_ids, inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Creates an Oppia Question and saves it.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
question_state_data: State. The state data for the question.
linked_skill_ids: list(str). List of skill IDs linked to the
question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
Returns:
Question. A newly-created question.
"""
# This needs to be done because default arguments can not be of list
# type.
question = question_domain.Question(
question_id, question_state_data,
feconf.CURRENT_STATE_SCHEMA_VERSION, language_code, 0,
linked_skill_ids, inapplicable_skill_misconception_ids or [])
question_services.add_question(owner_id, question)
return question
def save_new_question_with_state_data_schema_v27(
self, question_id, owner_id, linked_skill_ids,
inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default question with a default version 27 state data
dict.
This function should only be used for creating questions in tests
involving migration of datastore questions that use an old state data
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
linked_skill_ids: list(str). The skill IDs linked to the question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
"""
# This needs to be done because default arguments can not be of list
# type.
question_model = question_models.QuestionModel(
id=question_id, question_state_data=self.VERSION_27_STATE_DICT,
language_code=language_code, version=1,
question_state_data_schema_version=27,
linked_skill_ids=linked_skill_ids,
inapplicable_skill_misconception_ids=(
inapplicable_skill_misconception_ids or []))
question_model.commit(
owner_id, 'New question created',
[{'cmd': question_domain.CMD_CREATE_NEW}])
def save_new_question_suggestion_with_state_data_schema_v27(
self, author_id, skill_id, suggestion_id=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new question suggestion with a default version 27 state data
dict.
This function should only be used for creating question suggestion in
tests involving migration of datastore question suggestions that use an
old state data schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
"""
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + skill_id)
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': 27,
'language_code': language_code,
'linked_skill_ids': [skill_id],
'inapplicable_skill_misconception_ids': []
},
'skill_id': skill_id,
'skill_difficulty': 0.3
}
if suggestion_id is None:
suggestion_id = (
feedback_models.GeneralFeedbackThreadModel.
generate_new_thread_id(
feconf.ENTITY_TYPE_SKILL, skill_id))
suggestion_models.GeneralSuggestionModel.create(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL, skill_id, 1,
suggestion_models.STATUS_IN_REVIEW, author_id, None, change,
score_category, suggestion_id, language_code)
return suggestion_id
def save_new_skill(
self, skill_id, owner_id, description='description',
misconceptions=None, rubrics=None, skill_contents=None,
language_code=constants.DEFAULT_LANGUAGE_CODE,
prerequisite_skill_ids=None):
"""Creates an Oppia Skill and saves it.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
misconceptions: list(Misconception)|None. A list of Misconception
objects that contains the various misconceptions of the skill.
rubrics: list(Rubric)|None. A list of Rubric objects that contain
the rubric for each difficulty of the skill.
skill_contents: SkillContents|None. A SkillContents object
containing the explanation and examples of the skill.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
prerequisite_skill_ids: list(str)|None. The prerequisite skill IDs
for the skill.
Returns:
Skill. A newly-created skill.
"""
skill = (
skill_domain.Skill.create_default_skill(skill_id, description, []))
if misconceptions is not None:
skill.misconceptions = misconceptions
skill.next_misconception_id = len(misconceptions) + 1
if skill_contents is not None:
skill.skill_contents = skill_contents
if prerequisite_skill_ids is not None:
skill.prerequisite_skill_ids = prerequisite_skill_ids
if rubrics is not None:
skill.rubrics = rubrics
else:
skill.rubrics = [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3']),
]
skill.language_code = language_code
skill.version = 0
skill_services.save_new_skill(owner_id, skill)
return skill
def save_new_skill_with_defined_schema_versions(
self, skill_id, owner_id, description, next_misconception_id,
misconceptions=None, rubrics=None, skill_contents=None,
misconceptions_schema_version=1, rubric_schema_version=1,
skill_contents_schema_version=1,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default skill with the given versions for misconceptions
and skill contents.
This function should only be used for creating skills in tests involving
migration of datastore skills that use an old schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating skills. This is because
the latter approach would result in a skill with the *current* schema
version.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
next_misconception_id: int. The misconception id to be used by the
next misconception added.
misconceptions: list(Misconception.to_dict()). The list of
misconception dicts associated with the skill.
rubrics: list(Rubric.to_dict()). The list of rubric dicts associated
with the skill.
skill_contents: SkillContents.to_dict(). A SkillContents dict
containing the explanation and examples of the skill.
misconceptions_schema_version: int. The schema version for the
misconceptions object.
rubric_schema_version: int. The schema version for the rubric
object.
skill_contents_schema_version: int. The schema version for the
skill_contents object.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
"""
skill_model = skill_models.SkillModel(
id=skill_id, description=description, language_code=language_code,
misconceptions=misconceptions, rubrics=rubrics,
skill_contents=skill_contents,
next_misconception_id=next_misconception_id,
misconceptions_schema_version=misconceptions_schema_version,
rubric_schema_version=rubric_schema_version,
skill_contents_schema_version=skill_contents_schema_version,
superseding_skill_id=None, all_questions_merged=False)
skill_model.commit(
owner_id, 'New skill created.',
[{'cmd': skill_domain.CMD_CREATE_NEW}])
def _create_valid_question_data(self, default_dest_state_name):
"""Creates a valid question_data dict.
Args:
default_dest_state_name: str. The default destination state.
Returns:
dict. The default question_data dict.
"""
state = state_domain.State.create_default_state(
default_dest_state_name, is_initial_state=True)
state.update_interaction_id('TextInput')
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>This is a solution.</p>',
},
}
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>')),
]
solution = state_domain.Solution.from_dict(
state.interaction.id, solution_dict)
state.update_interaction_solution(solution)
state.update_interaction_hints(hints_list)
state.update_interaction_customization_args({
'placeholder': {
'value': {
'content_id': 'ca_placeholder',
'unicode_str': 'Enter text here',
},
},
'rows': {'value': 1},
})
state.update_next_content_id_index(2)
state.interaction.default_outcome.labelled_as_correct = True
state.interaction.default_outcome.dest = None
return state
class LinterTestBase(GenericTestBase):
"""Base class for linter tests."""
def setUp(self):
super(LinterTestBase, self).setUp()
self.linter_stdout = []
def mock_print(*args):
"""Mock for python_utils.PRINT. Append the values to print to
linter_stdout list.
Args:
*args: list(*). Variable length argument list of values to print
in the same line of output.
"""
self.linter_stdout.append(
' '.join(python_utils.UNICODE(arg) for arg in args))
self.print_swap = self.swap(python_utils, 'PRINT', mock_print)
def assert_same_list_elements(self, phrases, stdout):
"""Checks to see if all of the phrases appear in at least one of the
stdout outputs.
Args:
phrases: list(str). A list of phrases we are trying to find in one
of the stdout outputs. For example, python linting outputs a
success string that includes data we don't have easy access to,
like how long the test took, so we may want to search for a
substring of that success string in stdout.
stdout: list(str). A list of the output results from the method's
execution.
"""
self.assertTrue(
any(all(p in output for p in phrases) for output in stdout))
def assert_failed_messages_count(self, stdout, expected_failed_count):
"""Assert number of expected failed checks to actual number of failed
checks.
Args:
stdout: list(str). A list of linter output messages.
expected_failed_count: int. Expected number of failed messages.
"""
failed_count = sum(msg.startswith('FAILED') for msg in stdout)
self.assertEqual(failed_count, expected_failed_count)
class EmailMessageMock:
"""Mock for core.platform.models email services messages."""
def __init__(
self, sender_email, recipient_email, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Inits a mock email message with all the necessary data.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_email: str. The email address of the recipient. Must be
utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Emails
must be utf-8.
reply_to: str|None. Optional argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'bob@example.com': {'first': 'Bob', 'id': 1},
'alice@example.com': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
"""
self.sender = sender_email
self.to = recipient_email
self.subject = subject
self.body = plaintext_body
self.html = html_body
self.bcc = bcc
self.reply_to = reply_to
self.recipient_variables = recipient_variables
class GenericEmailTestBase(GenericTestBase):
"""Base class for tests requiring email services."""
emails_dict = collections.defaultdict(list)
def run(self, result=None):
"""Adds a context swap on top of the test_utils.run() method so that
test classes extending GenericEmailTestBase will automatically have a
mailgun api key, mailgun domain name and mocked version of
send_email_to_recipients().
"""
with self.swap(
email_services, 'send_email_to_recipients',
self._send_email_to_recipients):
super(EmailTestBase, self).run(result=result)
def setUp(self):
super(GenericEmailTestBase, self).setUp()
self._wipe_emails_dict()
def _wipe_emails_dict(self):
"""Reset email dictionary for a new test."""
self.emails_dict = collections.defaultdict(list)
def _send_email_to_recipients(
self, sender_email, recipient_emails, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Mocks sending an email to each email in recipient_emails.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_emails: list(str). The email addresses of the recipients.
Must be utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Must be
utf-8.
reply_to: str|None. Optional Argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional Argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'bob@example.com': {'first': 'Bob', 'id': 1},
'alice@example.com': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
Returns:
bool. Whether the emails are sent successfully.
"""
bcc_emails = None
if bcc:
bcc_emails = bcc[0] if len(bcc) == 1 else bcc
new_email = EmailMessageMock(
sender_email, recipient_emails, subject, plaintext_body, html_body,
bcc=bcc_emails, reply_to=(reply_to if reply_to else None),
recipient_variables=(
recipient_variables if recipient_variables else None))
for recipient_email in recipient_emails:
self.emails_dict[recipient_email].append(new_email)
return True
def _get_sent_email_messages(self, to):
"""Gets messages to a single recipient email.
Args:
to: str. The recipient email address.
Returns:
list(EmailMessageMock). The list of email messages corresponding to
that recipient email.
"""
return self.emails_dict[to] if to in self.emails_dict else []
def _get_all_sent_email_messages(self):
"""Gets the entire messages dictionary.
Returns:
dict(str, list(EmailMessageMock)). The dict keyed by recipient
email. Each value contains a list of EmailMessageMock objects
corresponding to that recipient email; in other words, all
individual emails sent to that specific recipient email.
"""
return self.emails_dict
EmailTestBase = GenericEmailTestBase
class ClassifierTestBase(GenericEmailTestBase):
"""Base class for classifier test classes that need common functions
for related to reading classifier data and mocking the flow of the
storing the trained models through post request.
This class is derived from GenericEmailTestBase because the
TrainedClassifierHandlerTests test suite requires email services test
functions in addition to the classifier functions defined below.
"""
def post_blob(self, url, payload, expected_status_int=200):
"""Post a BLOB object to the server; return the received object.
Note that this method should only be used for
classifier.TrainedClassifierHandler handler and for no one else. The
reason being, we don't have any general mechanism for security for
transferring binary data. TrainedClassifierHandler implements a
specific mechanism which is restricted to the handler.
Args:
url: str. The URL to which BLOB object in payload should be sent
through a post request.
payload: bytes. Binary data which needs to be sent.
expected_status_int: int. The status expected as a response of post
request.
Returns:
dict. Parsed JSON response received upon invoking the post request.
"""
data = payload
expect_errors = False
if expected_status_int >= 400:
expect_errors = True
response = self._send_post_request(
self.testapp, url, data,
expect_errors, expected_status_int=expected_status_int,
headers={'content-type': 'application/octet-stream'})
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
# Reference URL:
# https://github.com/Pylons/webtest/blob/
# bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119 .
self.assertEqual(response.status_int, expected_status_int)
return self._parse_json_response(response, expect_errors)
def _get_classifier_data_from_classifier_training_job(
self, classifier_training_job):
"""Retrieves classifier training job from GCS using metadata stored in
classifier_training_job.
Args:
classifier_training_job: ClassifierTrainingJob. Domain object
containing metadata of the training job which is used to
retrieve the trained model.
Returns:
FrozenModel. Protobuf object containing classifier data.
"""
filename = classifier_training_job.classifier_data_filename
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(
feconf.ENTITY_TYPE_EXPLORATION, classifier_training_job.exp_id))
classifier_data = utils.decompress_from_zlib(fs.get(filename))
classifier_data_proto = text_classifier_pb2.TextClassifierFrozenModel()
classifier_data_proto.ParseFromString(classifier_data)
return classifier_data_proto
class FunctionWrapper:
"""A utility for making function wrappers. Create a subclass and override
any or both of the pre_call_hook and post_call_hook methods. See these
methods for more info.
"""
def __init__(self, func):
"""Creates a new FunctionWrapper instance.
Args:
func: a callable, or data descriptor. If it's a descriptor, then
__get__ should return a bound method. For example, func can be
a function, a method, a static or class method, but not a
@property.
"""
self._func = func
self._instance = None
def __call__(self, *args, **kwargs):
"""Overrides the call method for the function to call pre_call_hook
method which would be called before the function is executed and
post_call_hook which would be called after the function is executed.
"""
if self._instance is not None:
args = [self._instance] + list(args)
args_dict = inspect.getcallargs(self._func, *args, **kwargs)
self.pre_call_hook(args_dict)
result = self._func(*args, **kwargs)
self.post_call_hook(args_dict, result)
return result
def __get__(self, instance, owner):
# We have to implement __get__ because otherwise, we don't have a chance
# to bind to the instance self._func was bound to. See the following SO
# answer: https://stackoverflow.com/a/22555978/675311
self._instance = instance
return self
def pre_call_hook(self, args):
"""Override this to do tasks that should be executed before the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
"""
pass
def post_call_hook(self, args, result):
"""Override this to do tasks that should be executed after the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
result: *. Result returned from the function.
"""
pass
class CallCounter(FunctionWrapper):
"""A function wrapper that keeps track of how often the function is called.
Note that the counter is incremented before each call, so it is also
increased when the function raises an exception.
"""
def __init__(self, f):
"""Counts the number of times the given function has been called. See
FunctionWrapper for arguments.
"""
super(CallCounter, self).__init__(f)
self._times_called = 0
@property
def times_called(self):
"""Property that returns the number of times the wrapped function has
been called.
Returns:
int. The number of times the wrapped function has been called.
"""
return self._times_called
def pre_call_hook(self, args):
"""Method that is called before each function call to increment the
counter tracking the number of times a function is called. This will
also be called even when the function raises an exception.
Args:
args: list(*). Set of arguments that the function accepts.
"""
self._times_called += 1
class FailingFunction(FunctionWrapper):
"""A function wrapper that makes a function fail, raising a given exception.
It can be set to succeed after a given number of calls.
"""
INFINITY = 'infinity'
def __init__(self, f, exception, num_tries_before_success):
"""Create a new Failing function.
Args:
f: func. See FunctionWrapper.
exception: Exception. The exception to be raised.
num_tries_before_success: int. The number of times to raise an
exception, before a call succeeds. If this is 0, all calls will
succeed, if it is FailingFunction. INFINITY, all calls will
fail.
"""
super(FailingFunction, self).__init__(f)
self._exception = exception
self._num_tries_before_success = num_tries_before_success
self._always_fail = (
self._num_tries_before_success == FailingFunction.INFINITY)
self._times_called = 0
if not self._always_fail and self._num_tries_before_success < 0:
raise ValueError(
'num_tries_before_success should either be an '
'integer greater than or equal to 0, '
'or FailingFunction.INFINITY')
def pre_call_hook(self, args):
"""Method that is called each time before the actual function call to
check if the exception is to be raised based on the number of tries
before success.
Args:
args: list(*). Set of arguments this function accepts.
"""
self._times_called += 1
call_should_fail = (
self._always_fail or
self._num_tries_before_success >= self._times_called)
if call_should_fail:
raise self._exception
| 41.360765 | 114 | 0.625457 |
acea85d97e653e643be3c475704748ceb20a43d3 | 3,827 | py | Python | tests/test_simulated_module_offline.py | robertopreste/HmtNote | 0f2c0f684a45c0087cabc3cb15f61803fac7daf1 | [
"MIT"
] | 11 | 2019-04-11T07:06:41.000Z | 2021-03-22T09:13:40.000Z | tests/test_simulated_module_offline.py | robertopreste/HmtNote | 0f2c0f684a45c0087cabc3cb15f61803fac7daf1 | [
"MIT"
] | 64 | 2019-03-04T11:18:25.000Z | 2022-03-31T23:03:01.000Z | tests/test_simulated_module_offline.py | robertopreste/HmtNote | 0f2c0f684a45c0087cabc3cb15f61803fac7daf1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by Roberto Preste
import os
import pytest
from hmtnote import annotate, dump
DATADIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
SIMULATED_VCF = os.path.join(DATADIR, "simulated.vcf")
TEST_VCF = os.path.join(DATADIR, "test.vcf")
TEST_CSV = os.path.join(DATADIR, "test.csv")
BASE_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestSimulatedModuleOffline:
def test_simulated_module_dump(self):
"""Test the dump command of the module."""
dump()
assert os.path.isfile(os.path.join(BASE_DIR, "hmtnote_dump.pkl"))
def test_simulated_module_annotation_offline(self,
simulated_ann_offline_vcf,
simulated_ann_offline_csv):
"""Test the full offline annotation of the simulated VCF file using
the module."""
annotate(SIMULATED_VCF, TEST_VCF, offline=True, csv=True)
with open(TEST_VCF) as out:
assert simulated_ann_offline_vcf.read() == out.read()
with open(TEST_CSV) as out:
assert simulated_ann_offline_csv.read() == out.read()
def test_simulated_module_annotation_offline_basic(self,
simulated_ann_offline_basic_vcf,
simulated_ann_offline_basic_csv):
"""Test the basic offline annotation of the simulated VCF file using
the module."""
annotate(SIMULATED_VCF, TEST_VCF, basic=True, offline=True, csv=True)
with open(TEST_VCF) as out:
assert simulated_ann_offline_basic_vcf.read() == out.read()
with open(TEST_CSV) as out:
assert simulated_ann_offline_basic_csv.read() == out.read()
def test_simulated_module_annotation_offline_crossref(self,
simulated_ann_offline_crossref_vcf,
simulated_ann_offline_crossref_csv):
"""Test the cross-reference offline annotation of the simulated VCF
file using the module."""
annotate(SIMULATED_VCF, TEST_VCF, crossref=True, offline=True, csv=True)
with open(TEST_VCF) as out:
assert simulated_ann_offline_crossref_vcf.read() == out.read()
with open(TEST_CSV) as out:
assert simulated_ann_offline_crossref_csv.read() == out.read()
def test_simulated_module_annotation_offline_variab(self,
simulated_ann_offline_variab_vcf,
simulated_ann_offline_variab_csv):
"""Test the variability offline annotation of the simulated VCF file
using the module."""
annotate(SIMULATED_VCF, TEST_VCF, variab=True, offline=True, csv=True)
with open(TEST_VCF) as out:
assert simulated_ann_offline_variab_vcf.read() == out.read()
with open(TEST_CSV) as out:
assert simulated_ann_offline_variab_csv.read() == out.read()
def test_simulated_module_annotation_offline_predict(self,
simulated_ann_offline_predict_vcf,
simulated_ann_offline_predict_csv):
"""Test the predictions offline annotation of the simulated VCF file
using the module."""
annotate(SIMULATED_VCF, TEST_VCF, predict=True, offline=True, csv=True)
with open(TEST_VCF) as out:
assert simulated_ann_offline_predict_vcf.read() == out.read()
with open(TEST_CSV) as out:
assert simulated_ann_offline_predict_csv.read() == out.read()
| 49.701299 | 94 | 0.61249 |
acea860d10e3974ea219fab7461a76dade4c5cd1 | 2,242 | py | Python | pycode/pre_alpha.py | micovey/Quantify | c502480105b29259a638af252f9b35e5957ed16a | [
"MIT"
] | 8 | 2019-11-19T07:19:11.000Z | 2021-07-26T03:50:39.000Z | pycode/pre_alpha.py | micovey/Quantify | c502480105b29259a638af252f9b35e5957ed16a | [
"MIT"
] | 1 | 2019-11-19T06:27:50.000Z | 2019-11-19T06:28:52.000Z | pycode/pre_alpha.py | micovey/Quantify | c502480105b29259a638af252f9b35e5957ed16a | [
"MIT"
] | 5 | 2020-01-07T01:50:56.000Z | 2021-11-27T12:06:32.000Z | import pandas as pd
from statsmodels.tsa.arima_model import ARMA
import warnings
warnings.filterwarnings("ignore")
import datetime
from arch import arch_model ##Garch模型
file_place='D:\\'
df_store=pd.read_csv(file_place+'Quantify\\factor\\factor_alpha_2019.csv')
code=[]
p=[]
pre_a=[]
cal_len2=30
date1=[]
coeff=[]
for group in df_store.groupby(['code']):
code_store=group[0]
# print(group[0])#输出股票代码,提示进度
g=group[1]#每一个股票的数据
roll_len2 = len(g) - cal_len2##滚动长度
for i in range(0, roll_len2+1):
g1 = g.iloc[0 + i:cal_len2 + i, :]
if g1["p"].mean()<0.05:
model = ARMA(g1['alpha_c'], order=(2,0,0))
result_arma = model.fit(disp=-1, method='css')
co=result_arma.params.iloc[1]
predict = result_arma.predict()
predict = (predict.iloc[0]+predict.iloc[1]+predict.iloc[2])/3
# am = arch_model(g1['alpha_c'], p=1, o=0, q=1, dist='StudentsT')
# res = am.fit(update_freq=0)
# forecasts = res.forecast()
# predict = forecasts.mean.iloc[-1,:]
# predict = predict.iloc[0]
# print(predict)
if predict>0:
# if predict<0:
print(group[0])
code.append(group[0])
# coeff.append(co)
p.append(g1['p'].mean())
pre_a.append(predict)
date1.append(g1.iloc[cal_len2-1, 1])
else:
continue
df_alpha = pd.DataFrame({'code': code,'date':date1,'p': p, 'pre_a': pre_a})#,'coeff': coeff})
df_alpha['date'] = [datetime.datetime.strptime(x, '%Y/%m/%d') for x in df_alpha['date']]
df_alpha['date'] = [datetime.datetime.strftime(x, '%Y/%m/%d') for x in df_alpha['date']]
df_alpha=df_alpha.sort_values(by=['date','pre_a'],ascending=(True,False))
#df_alpha=df_alpha.sort_values(by=['date','pre_a'],ascending=(True,True))
alpha = pd.DataFrame(columns=['code','date','p', 'pre_a'])#,'coeff'])
for group in df_alpha.groupby(['date']):
g=group[1]
g=[ g.iloc[0:2,:] if len(g)>=2 else g ]
alpha =alpha.append(g)
print(alpha)
alpha.to_csv(file_place + 'Quantify\\factor\\factor_alpha_predict.csv', header=True, index=False)
| 40.035714 | 98 | 0.5843 |
acea8639b18c278a2469a736b8558848822041e8 | 1,117 | py | Python | tranzact/types/blockchain_format/classgroup.py | Tranzact-Network/tranzact-blockchain | 692362155e46563aa70559123b93bc9379cac111 | [
"Apache-2.0"
] | 8 | 2021-09-19T18:57:49.000Z | 2022-02-09T04:32:50.000Z | tranzact/types/blockchain_format/classgroup.py | Tranzact-Network/tranzact-blockchain | 692362155e46563aa70559123b93bc9379cac111 | [
"Apache-2.0"
] | 3 | 2021-09-29T10:56:48.000Z | 2021-11-19T00:09:28.000Z | tranzact/types/blockchain_format/classgroup.py | Tranzact-Network/tranzact-blockchain | 692362155e46563aa70559123b93bc9379cac111 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from tranzact.consensus.constants import ConsensusConstants
from tranzact.types.blockchain_format.sized_bytes import bytes100
from tranzact.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class ClassgroupElement(Streamable):
"""
Represents a classgroup element (a,b,c) where a, b, and c are 512 bit signed integers. However this is using
a compressed representation. VDF outputs are a single classgroup element. VDF proofs can also be one classgroup
element (or multiple).
"""
data: bytes100
@staticmethod
def from_bytes(data) -> "ClassgroupElement":
if len(data) < 100:
data += b"\x00" * (100 - len(data))
return ClassgroupElement(bytes100(data))
@staticmethod
def get_default_element() -> "ClassgroupElement":
# Bit 3 in the first byte of serialized compressed form indicates if
# it's the default generator element.
return ClassgroupElement.from_bytes(b"\x08")
@staticmethod
def get_size(constants: ConsensusConstants):
return 100
| 32.852941 | 115 | 0.71889 |
acea864732605b6d5e6b20d4ca685d5cfc98557e | 1,803 | py | Python | codes/show_result.py | baiyuang/Tensorflow-Computer-Vision-Tutorial | 09db553dc23f0edeb8a9b0d51c13d0a27290016a | [
"MIT"
] | 1 | 2021-06-02T01:09:32.000Z | 2021-06-02T01:09:32.000Z | codes/show_result.py | Rookie-Eric/Tensorflow-Computer-Vision-Tutorial | 09db553dc23f0edeb8a9b0d51c13d0a27290016a | [
"MIT"
] | null | null | null | codes/show_result.py | Rookie-Eric/Tensorflow-Computer-Vision-Tutorial | 09db553dc23f0edeb8a9b0d51c13d0a27290016a | [
"MIT"
] | 1 | 2020-07-23T08:17:20.000Z | 2020-07-23T08:17:20.000Z | import matplotlib.pyplot as plt
from PIL import Image
import os
def deep_dream():
contents = ['morvan1', 'sky']
channels = [60, 61, 62, 121, 122, 123]
plt.figure(1, figsize=(4, 12))
for i in range(6):
for j in range(2):
plt.subplot(6, 2, 2*i+j+1)
path = '../results/%s_mixed4d_3x3_bottleneck_pre_relu_%i.jpeg' % (contents[j], channels[i])
image = Image.open(path)
plt.imshow(image)
plt.axis('off')
plt.subplots_adjust(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
plt.savefig('../results/sum_deepdream.png', dpi=500)
def style_transfer():
# plotting
content = Image.open('../example_images/morvan2.jpg').resize((400, 400))
plt.figure(1, figsize=(4, 7))
for i in range(5):
for j in range(3):
plt.subplot(5, 3, 3*i+j+1)
if j == 0:
plt.imshow(content)
elif j == 1:
style = Image.open('../example_images/style%i.jpg' % (i+1)).resize((400, 400))
plt.imshow(style)
else:
styled = Image.open('../results/morvan2_style%i.jpeg' % (i+1)).resize((400, 400))
plt.imshow(styled)
plt.axis('off')
plt.subplots_adjust(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
plt.savefig('../results/sum_style_transfer.png', dpi=500)
# plt.show()
def dcgan():
import imageio
images = []
files = [file for file in os.listdir('../results') if file.startswith('dcgan') and file.endswith('.png')]
for file in files:
images.append(imageio.imread(os.path.join('../results', file)))
imageio.mimsave('../results/dcgan.gif', images, fps=5)
if __name__ == '__main__':
# deep_dream()
# style_transfer()
dcgan() | 33.388889 | 109 | 0.571825 |
acea876930ba9fdddd344b3bdfe82cbbae510d24 | 2,309 | py | Python | hhs_oauth_server/settings/themes.py | dtisza1/bluebutton-web-server | 6322f28d75bd9e00f8dc4b5988a0cd5f7c6c80cb | [
"Apache-2.0"
] | 25 | 2017-12-10T00:48:31.000Z | 2022-03-25T01:29:13.000Z | hhs_oauth_server/settings/themes.py | dtisza1/bluebutton-web-server | 6322f28d75bd9e00f8dc4b5988a0cd5f7c6c80cb | [
"Apache-2.0"
] | 298 | 2017-12-05T05:53:32.000Z | 2022-03-21T19:29:03.000Z | hhs_oauth_server/settings/themes.py | dtisza1/bluebutton-web-server | 6322f28d75bd9e00f8dc4b5988a0cd5f7c6c80cb | [
"Apache-2.0"
] | 31 | 2017-12-04T16:01:12.000Z | 2021-09-26T22:34:55.000Z | from getenv import env
""" theme selection """
DEFAULT_THEME = 0
THEME_SELECTED = DEFAULT_THEME
THEMES = {
0: {
'NAME': 'Default-Readable',
'PATH': 'theme/default/',
'INFO': 'Readable san-serif base theme',
},
3: {
'NAME': 'Readable',
'PATH': 'theme/readable/',
'INFO': 'Easy to read Bootswatch Theme',
},
4: {
'NAME': 'Cerulean',
'PATH': 'theme/cerulean/',
'INFO': 'Blue Bootswatch theme theme',
},
5: {
'NAME': 'Cosmo',
'PATH': 'theme/cosmo/',
'INFO': 'Cosmo bootswatch theme',
},
6: {
'NAME': 'Cyborg',
'PATH': 'theme/cyborg/',
'INFO': 'Cyborg bootswatch theme',
},
7: {
'NAME': 'Darkly',
'PATH': 'theme/darkly/',
'INFO': 'Darkly bootswatch theme',
},
8: {
'NAME': 'Flatly',
'PATH': 'theme/flatly/',
'INFO': 'Flatly bootswatch theme',
},
9: {
'NAME': 'Journal',
'PATH': 'theme/journal/',
'INFO': 'Journal bootswatch theme',
},
10: {
'NAME': 'Lumen',
'PATH': 'theme/lumen/',
'INFO': 'Lumen bootswatch theme',
},
11: {
'NAME': 'Paper',
'PATH': 'theme/paper/',
'INFO': 'Paper bootswatch theme',
},
12: {
'NAME': 'Sandstone',
'PATH': 'theme/sandstone/',
'INFO': 'Sandstone bootswatch theme',
},
13: {
'NAME': 'Simplex',
'PATH': 'theme/simplex/',
'INFO': 'Simplex bootswatch theme',
},
14: {
'NAME': 'Slate',
'PATH': 'theme/slate/',
'INFO': 'Slate bootswatch theme',
},
15: {
'NAME': 'Spacelab',
'PATH': 'theme/spacelab/',
'INFO': 'Spacelab bootswatch theme',
},
16: {
'NAME': 'Superhero',
'PATH': 'theme/superhero/',
'INFO': 'Superhero bootswatch theme',
},
17: {
'NAME': 'United',
'PATH': 'theme/united/',
'INFO': 'United bootswatch theme',
},
18: {
'NAME': 'Yeti',
'PATH': 'theme/yeti/',
'INFO': 'Yeti bootswatch theme',
},
'SELECTED': env('DJANGO_SELECTED_THEME', DEFAULT_THEME),
}
if THEMES['SELECTED'] in THEMES:
THEME_SELECTED = THEMES['SELECTED']
| 23.323232 | 60 | 0.473798 |
acea8829111393a7451709522ca499a8e7e19eb1 | 6,447 | py | Python | Course_2/Week_02/binary_tree.py | KnightZhang625/Stanford_Algorithm | 7dacbbfa50e7b0e8380cf500df24af60cb9f42df | [
"Apache-2.0"
] | null | null | null | Course_2/Week_02/binary_tree.py | KnightZhang625/Stanford_Algorithm | 7dacbbfa50e7b0e8380cf500df24af60cb9f42df | [
"Apache-2.0"
] | 1 | 2020-07-16T08:03:22.000Z | 2020-07-16T08:09:34.000Z | Course_2/Week_02/binary_tree.py | KnightZhang625/Stanford_Algorithm | 7dacbbfa50e7b0e8380cf500df24af60cb9f42df | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
class Node(object):
def __init__(self, value):
self.value = value
self.p_parent = None
self.p_left = None
self.p_right = None
self.size = 1
def __str__(self):
return str(self.value)
def __eq__(self, other):
return self.value == other.value
def __lt__(self, other):
return self.value < other.value
class BinaryTree(object):
def __init__(self, root):
if not isinstance(root, Node):
raise ValueError
self.root = root
def insert(self, value):
self._insert(self.root, value)
def _insert(self, node, value):
if node.value == value:
raise ValueError('\'{}\' exists.'.format(value))
if value < node.value:
if node.p_left is None:
new_node = Node(value)
new_node.p_parent = node
node.p_left = new_node
node.size += 1
else:
node.size += 1
self._insert(node.p_left, value)
else:
if node.p_right is None:
new_node = Node(value)
new_node.p_parent = node
node.p_right = new_node
node.size += 1
else:
node.size += 1
self._insert(node.p_right, value)
def search(self, value):
return self._search(value, self.root)
def _search(self, value, node):
if node is None:
return (False, None)
elif node.value == value:
return (True, node)
elif value < node.value:
return self._search(value, node.p_left)
else:
return self._search(value, node.p_right)
def minimum(self):
return self._minimum(self.root)
def _minimum(self, node):
if node.p_left is None:
return node
return self._minimum(node.p_left)
def maximum(self):
return self._maximum(self.root)
def _maximum(self, node):
if node.p_right is None:
return node
return self._maximum(node.p_right)
def next_node(self, value):
is_exist, node = self.search(value)
if is_exist:
if node.p_right is not None:
return self._minimum(node.p_right)
else:
return self._next_node(node, node.p_parent)
else:
raise ValueError('\'{}\' does not exist.'.format(value))
def _next_node(self, base_node, node):
if node is None:
return 'Not Exist.'
if node > base_node:
return node
return self._next_node(base_node, node.p_parent)
def previous_node(self, value):
is_exist, node = self.search(value)
if is_exist:
if node.p_left is not None:
return self._maximum(node.p_left)
else:
return self._previous_node(node, node.p_parent)
else:
raise ValueError('\'{}\' does not exist.'.format(value))
def _previous_node(self, base_node, node):
if node is None:
return 'Not Exist.'
if node < base_node:
return node
return self._previous_node(base_node, node.p_parent)
def delete(self, value):
is_exist, node = self.search(value)
if is_exist:
if node.p_left is None and node.p_right is None:
self._deleteNoChild(node)
elif node.p_left is None or node.p_right is None:
self._deleteOneChild(node)
else:
self._deleteTwoChildren(node)
else:
raise ValueError('\'{}\' not exist.'.format(value))
self.updateSize(self.root)
def _deleteNoChild(self, node):
parent_node = node.p_parent
if node < parent_node:
parent_node.p_left = None
else:
parent_node.p_right = None
node.p_parent = None
def _deleteOneChild(self, node):
parent_node = node.p_parent
swap_node = node.p_right if node.p_right is not None else node.p_left
if node > parent_node:
parent_node.p_right = swap_node
else:
parent_node.p_left = swap_node
node.p_parent, node.p_left, node.p_right = None, None, None
def _deleteTwoChildren(self, node):
prev_node = self.previous_node(node.value)
parent_node, left_node, right_node = node.p_parent, node.p_left, node.p_right
if node < parent_node:
parent_node.p_left = prev_node
else:
parent_node.p_right = prev_node
if prev_node != left_node:
left_node.p_parent = prev_node
right_node.p_parent = prev_node
prev_node_parent, prev_node_left = prev_node.p_parent, prev_node.p_left
if prev_node != left_node:
prev_node.p_left = left_node
prev_node.p_right = right_node
if prev_node_left is not None:
if prev_node != left_node:
prev_node_parent.p_right = prev_node_left
else:
prev_node.p_left = prev_node_left
else:
prev_node_parent.p_right = None
node.p_parent, node.p_left, node.p_right = None, None, None
def updateSize(self, node):
if node.p_left is None and node.p_right is None:
node.size = 1
return 1
elif node.p_left is None:
node.size = self.updateSize(node.p_right) + 1
return node.size
elif node.p_right is None:
node.size = self.updateSize(node.p_left) + 1
return node.size
else:
node.size = self.updateSize(node.p_left) + self.updateSize(node.p_right) + 1
return node.size
def select(self, i):
return self._select(self.root, i)
def _select(self, node, i):
left_node = node.p_left
right_node = node.p_right
a = 0 if left_node is None else left_node.size
if a == i - 1:
return node
elif a >= i:
return self._select(left_node, i)
else:
return self._select(right_node, i - a - 1)
def display(self):
self._display(self.root)
def _display(self, node):
if node is None:
return
self._display(node.p_left)
print(node, ' Size: ', node.size)
self._display(node.p_right)
if __name__ == '__main__':
"""
10
3 20
1 12 30
"""
print('*** Insert ###')
binary_tree = BinaryTree(Node(10))
binary_tree.insert(3)
binary_tree.insert(20)
binary_tree.insert(1)
binary_tree.insert(30)
binary_tree.insert(12)
binary_tree.insert(11)
binary_tree.insert(15)
binary_tree.insert(25)
binary_tree.insert(35)
binary_tree.insert(14)
binary_tree.display()
print('*** Search ###')
print(binary_tree.search(30))
print('Minimum: ', binary_tree.minimum())
print('Maximum: ', binary_tree.maximum())
print(binary_tree.previous_node(12))
print(binary_tree.next_node(10))
print('*** Delete ###')
binary_tree.delete(12)
binary_tree.display()
print('*** Select ***')
print(binary_tree.select(5)) | 26.101215 | 82 | 0.64402 |
acea88b43b96648d2a71cb9b3b2d4ca899543799 | 390 | py | Python | amquery/core/biom/_biom.py | nromashchenko/nir | 4b0c91d670462ca33a9b224740a2977e99546440 | [
"MIT"
] | 3 | 2016-09-13T16:31:05.000Z | 2016-09-14T06:36:44.000Z | amquery/core/biom/_biom.py | nromashchenko/nir | 4b0c91d670462ca33a9b224740a2977e99546440 | [
"MIT"
] | 36 | 2016-09-14T06:26:20.000Z | 2017-05-04T19:11:30.000Z | amquery/core/biom/_biom.py | nromashchenko/amquery | 4b0c91d670462ca33a9b224740a2977e99546440 | [
"MIT"
] | null | null | null | from biom import load_table
from biom.util import biom_open
def merge_biom_tables(master_fp, additional_fp):
"""
:param master_fp: str
:param additional_fp: str
:return: None
"""
master = load_table(master_fp)
master = master.merge(load_table(additional_fp))
with biom_open(master_fp, 'w') as biom_file:
master.to_hdf5(biom_file, "amquery", True)
| 24.375 | 52 | 0.7 |
acea89289063ebe27cdacaaee4c59a4ea28d5024 | 28,408 | py | Python | test/functional/rpc_fundrawtransaction.py | mrhappy2018/test-1 | f9bb4f8d9b796ef9acf0e6f5f6c0aad6e999c57b | [
"MIT"
] | null | null | null | test/functional/rpc_fundrawtransaction.py | mrhappy2018/test-1 | f9bb4f8d9b796ef9acf0e6f5f6c0aad6e999c57b | [
"MIT"
] | null | null | null | test/functional/rpc_fundrawtransaction.py | mrhappy2018/test-1 | f9bb4f8d9b796ef9acf0e6f5f6c0aad6e999c57b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'foo': 'bar'})
raise AssertionError("Accepted invalid option foo")
except JSONRPCException as e:
assert("Unexpected key foo" in e.error['message'])
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': 'foobar'})
raise AssertionError("Accepted invalid cotton address")
except JSONRPCException as e:
assert("changeAddress must be a valid cotton address" in e.error['message'])
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 2})
except JSONRPCException as e:
assert('changePosition out of bounds' == e.error['message'])
else:
assert(False)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0];
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.2)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
if __name__ == '__main__':
RawTransactionsTest().main()
| 42.718797 | 223 | 0.562377 |
acea8a0cadd68ece1d0d8efede2003201e2e0ec9 | 2,193 | py | Python | python/orca/src/bigdl/orca/ray/utils.py | sgwhat/BigDL | 25b402666fbb26b0bc18fc8100e9a00469844778 | [
"Apache-2.0"
] | null | null | null | python/orca/src/bigdl/orca/ray/utils.py | sgwhat/BigDL | 25b402666fbb26b0bc18fc8100e9a00469844778 | [
"Apache-2.0"
] | null | null | null | python/orca/src/bigdl/orca/ray/utils.py | sgwhat/BigDL | 25b402666fbb26b0bc18fc8100e9a00469844778 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from bigdl.dllib.utils.log4Error import invalidInputError
def to_list(input):
if isinstance(input, (list, tuple)):
return list(input)
else:
return [input]
def resource_to_bytes(resource_str):
if not resource_str:
return resource_str
matched = re.compile("([0-9]+)([a-z]+)?").match(resource_str.lower())
fraction_matched = re.compile("([0-9]+\\.[0-9]+)([a-z]+)?").match(resource_str.lower())
if fraction_matched:
invalidInputError(False,
"Fractional values are not supported. Input"
" was: {}".format(resource_str))
try:
value = int(matched.group(1))
postfix = matched.group(2)
if postfix == 'b':
value = value
elif postfix == 'k':
value = value * 1000
elif postfix == "m":
value = value * 1000 * 1000
elif postfix == 'g':
value = value * 1000 * 1000 * 1000
else:
invalidInputError(False,
"Not supported type: {}".format(resource_str))
return value
except Exception:
invalidInputError(False,
"Size must be specified as bytes(b),"
"kilobytes(k), megabytes(m), gigabytes(g). "
"E.g. 50b, 100k, 250m, 30g")
def is_local(sc):
master = sc.getConf().get("spark.master")
return master == "local" or master.startswith("local[")
def get_parent_pid(pid):
import psutil
cur_proc = psutil.Process(pid)
return cur_proc.ppid()
| 32.25 | 91 | 0.603739 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.