hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf1ae4ad92032dbea861b733506dec7943bd9c0 | 4,083 | py | Python | core/domain/takeout_service.py | IMADILKHAN/oppia | 454bf732dfd0087bcc0b8b7cd65d80ba386f4929 | [
"Apache-2.0"
] | 1 | 2022-02-22T09:27:22.000Z | 2022-02-22T09:27:22.000Z | core/domain/takeout_service.py | IMADILKHAN/oppia | 454bf732dfd0087bcc0b8b7cd65d80ba386f4929 | [
"Apache-2.0"
] | null | null | null | core/domain/takeout_service.py | IMADILKHAN/oppia | 454bf732dfd0087bcc0b8b7cd65d80ba386f4929 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to export the data of all user related models from a given
user_id.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import re
from core.domain import takeout_domain
from core.platform import models
(
base_models, collection_models, email_models,
exploration_models, feedback_models, topic_models,
suggestion_models, user_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.collection, models.NAMES.email,
models.NAMES.exploration, models.NAMES.feedback, models.NAMES.topic,
models.NAMES.suggestion, models.NAMES.user])
def get_models_which_should_be_exported():
"""Returns list of models to export.
Returns:
list(ndb.Model). List of models whose data should be
exported.
"""
return [model_class for model_class in
models.Registry.get_all_storage_model_classes()
if model_class.get_export_policy() ==
base_models.EXPORT_POLICY.CONTAINS_USER_DATA]
def export_data_for_user(user_id):
"""Exports selected models according to model defined export_data functions.
Args:
user_id: str. The user_id of the user whose data is being exported.
Returns:
dict. Dictionary containing all user data in the following format:
{
<MODEL_NAME>_data: <dict of data in format as specified by
model export policy>
}
"""
exported_data = dict()
models_to_export = get_models_which_should_be_exported()
for model in models_to_export:
split_name = re.findall('[A-Z][^A-Z]*', model.__name__)[:-1]
# Join the split name with underscores and add _data for final name.
final_name = ('_').join([x.lower() for x in split_name])
exported_data[final_name] = model.export_data(user_id)
# Separate out images. We store the images that need to be separated here
# as a dictionary mapping tuples to strings. The tuple value indicates the
# "path" to take to the image in the user's data dictionary, and the string
# indicates the filename that the exported image will be saved to.
replacement_instructions = [
takeout_domain.TakeoutImageReplacementInstruction(
('user_settings', 'profile_picture_data_url'),
'user_settings_profile_picture.png',
'profile_picture_filename'
)
]
takeout_image_files = []
for replacement_instruction in replacement_instructions:
dictionary_path = replacement_instruction.dictionary_path
replacement_filename = replacement_instruction.export_filename
replacement_key = replacement_instruction.new_key
# Move pointer to the position indicated by the tuple.
pointer = exported_data
for key in dictionary_path[:-1]:
pointer = pointer[key]
# Swap out data with replacement filename.
image_key = dictionary_path[-1]
image_data = pointer[image_key]
if image_data is not None:
takeout_image_files.append(
takeout_domain.TakeoutImage(image_data, replacement_filename))
pointer[image_key] = replacement_filename
# Rename the key.
pointer[replacement_key] = pointer.pop(image_key)
return takeout_domain.TakeoutData(exported_data, takeout_image_files)
| 39.259615 | 80 | 0.708058 |
acf1af70bddc353dbab1fa7feb65b316c7a555d8 | 556 | py | Python | ringo/config.py | GemHQ/slackbots | 5966870f302f8e08c801df7f9065e5c62f8f942e | [
"MIT"
] | null | null | null | ringo/config.py | GemHQ/slackbots | 5966870f302f8e08c801df7f9065e5c62f8f942e | [
"MIT"
] | null | null | null | ringo/config.py | GemHQ/slackbots | 5966870f302f8e08c801df7f9065e5c62f8f942e | [
"MIT"
] | null | null | null |
API_KEY='FILLMEIN'
USERS = {'memmaniac': 'U03RJDUUT',
'James': 'U035KQ55W',
'elitistbot': 'U03R6U9J7'}
CONFS = ['You got it.',
'Sure thing, broheim.',
'Totally agree - on it.',
'Yeah, okay.',
'If you say so.',
'Fiiiiiiine.',
'Really? If you\'re sure.']
ATTITUDES = {'amicable': CONFS[0:3],
'indifferent': CONFS[3:5],
'sassy': CONFS[5:]}
USAGE = "Usage: adhocbot [info, watch, unwatch] [btc,tbtc:]txhash -- or did you want me to say something clever?"
| 24.173913 | 113 | 0.530576 |
acf1af92e66ea64a605d50c40f8627227e789280 | 8,897 | py | Python | User.py | KeJunMao/CUSX-course-report-query | 2917a4831ab96fc0baf5844db7494f88fef5e475 | [
"MIT"
] | 1 | 2021-08-13T05:42:53.000Z | 2021-08-13T05:42:53.000Z | User.py | KeJunMao/CUSX-course-report-query | 2917a4831ab96fc0baf5844db7494f88fef5e475 | [
"MIT"
] | null | null | null | User.py | KeJunMao/CUSX-course-report-query | 2917a4831ab96fc0baf5844db7494f88fef5e475 | [
"MIT"
] | null | null | null | import threading
import requests
import hashlib
from bs4 import BeautifulSoup
import re
import time
from dotenv import load_dotenv
import os
from sqlalchemy import create_engine, Column, Integer, String, TEXT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import json
load_dotenv()
Base = declarative_base()
engine = create_engine(os.environ.get("DB_PATH"))
baseUrl = os.getenv('BASE_URL')
Session = sessionmaker(bind=engine)
session = Session()
class Course(Base):
__tablename__ = 'course'
username = Column(String(10), primary_key=True) # 学号
password = Column(String(16)) # 密码
realyname = Column(String(16)) # 姓名
semester = Column(Integer) # 学期
course_report = Column(TEXT) # 课程名
def check_existing(self):
existing = session.query(Course).filter_by(
username=self.username).first()
if not existing:
course = self
else:
course = existing
session.close()
return course
Base.metadata.create_all(engine)
class User:
def __init__(self, username, password, evaluate=True, force_update=False):
self.username = username
self.password = password
self.evaluate = evaluate # 是否评教
self.force_update = force_update # 是否强制更新
self.isLogin = False
self.name = ""
self.hashPassword = ""
self.cookies = None
self.semester = os.getenv('SEMESTER')
self.headers = {
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Referer': 'http://172-18-2-55.vpn.arft.net:8118/',
'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
'Content-Type': 'application/x-www-form-urlencoded',
}
def getTotal(self, source_list):
count = 0.0
for s in source_list:
if (s['成绩'] < 60):
print(f"{self.name} - {s['名称']}挂科: {s['成绩']}")
count += s['成绩']
return count
def getSource(self):
if self.isLogin:
source = requests.get(
f"{baseUrl}/eams/teach/grade/course/person!search.action?semesterId={self.semester}", cookies=self.cookies, headers=self.headers)
soup = BeautifulSoup(source.text, 'html.parser')
if soup.text.find('完成本学期评教后再查看成绩') != -1 and self.evaluate:
self.stdEvaluate()
return self.getSource()
data_list = []
for idx, tr in enumerate(soup.find_all('tr')):
if idx != 0:
tds = tr.find_all('td')
if len(tds) >= 3 and tds[3].text:
name = tds[3].text
data_list.append({
"名称": ''.join(name.split()),
"成绩": float(tds[7].string.strip())
})
if len(data_list) > 0:
course = Course(username=self.username, password=self.password,
realyname=self.name, semester=self.semester, course_report=json.dumps(data_list))
course = course.check_existing()
session.add(course)
session.commit()
return data_list
else:
print(f"{self.username}: 账号或密码错误")
return []
def stdEvaluate(self):
source = requests.get(
f"{baseUrl}/eams/quality/stdEvaluate.action", cookies=self.cookies, headers=self.headers)
soup = BeautifulSoup(source.text, 'html.parser')
for idx, tr in enumerate(soup.find_all('tr')):
if idx != 0:
td = tr.find_all('td')[-1]
alinks = td.find_all('a')
for a in alinks:
param = a['href'].split('?')[-1].split('&')
evaluationLesson = param[0].split('=')[-1]
teacher = param[1].split('=')[-1]
formData = {
'teacher.id': teacher,
'semester.id': self.semester,
'evaluationLesson.id': evaluationLesson,
'result1_0.questionName': '作业(理论课、实践课)批改认真、注重讲评',
'result1_0.content': 'A',
'result1_0.score': 0.05500000000000001,
'result1_1.questionName': '能针对学生特点,因材施教',
'result1_1.content': 'A',
'result1_1.score': 0.05500000000000001,
'result1_2.questionName': '仪表端正,教态大方,为人师表',
'result1_2.content': 'A',
'result1_2.score': 0.05500000000000001,
'result1_3.questionName': '教学目的明确,符合大纲要求',
'result1_3.content': 'A',
'result1_3.score': 0.05500000000000001,
'result1_4.questionName': '教材掌握熟练,观点正确,概念准确内容充实,有一定的深度和广度',
'result1_4.content': 'A',
'result1_4.score': 0.05500000000000001,
'result1_5.questionName': '重点、难点突出能反映本学科发展趋势,将最新理论与研究成果融于教学',
'result1_5.content': 'A',
'result1_5.score': 0.05500000000000001,
'result1_6.questionName': '课堂内容衔接紧密,用正确的方法指导学生领会;注重理论联系实际',
'result1_6.content': 'A',
'result1_6.score': 0.05500000000000001,
'result1_7.questionName': '理论教学板书规范合理;实践教学示范准确;讲课语言表达简洁生动准确',
'result1_7.content': 'A',
'result1_7.score': 0.05500000000000001,
'result1_8.questionName': '在完成教学任务的前提下教学效果好',
'result1_8.content': 'A',
'result1_8.score': 0.06,
'result1Num': 9,
'result2Num': 0,
}
time.sleep(float(os.getenv('SLEEP')))
requests.post(f"{baseUrl}/eams/quality/stdEvaluate!finishAnswer.action",
cookies=self.cookies, data=formData)
def login(self):
r = self.getHtml()
self.cookies = {
'JSESSIONID': r.cookies.get('JSESSIONID'),
'TWFID': os.getenv('TWFID'),
'GSESSIONID': r.cookies.get('GSESSIONID'),
}
self.hashPassword = self.getShaPassword(r)
formData = {
"username": self.username,
"password": self.hashPassword,
"encodedPassword": "",
"session_locale": "zh_CN"
}
time.sleep(float(os.getenv('SLEEP')))
user = requests.post(f"{baseUrl}/eams/login.action",
cookies=self.cookies, data=formData, headers=self.headers)
soup = BeautifulSoup(user.text, 'html.parser')
c = soup.find('a', {"href": '/eams/security/my.action'})
if c:
self.isLogin = True
self.name = c.string
def getHtml(self):
r = requests.get(f"{baseUrl}/eams/login.action",
headers=self.headers, cookies=self.cookies)
return r
def getCookies(self, r):
return r.cookies
def getShaPassword(self, r):
soup = BeautifulSoup(r.text, 'html.parser')
pattern = re.compile(r"CryptoJS.SHA1\(\'(.*?)\'")
script = soup.find("script", text=pattern)
shabasetext = pattern.search(str(script)).group(1)
hash_object = hashlib.sha1(str.encode(shabasetext + self.password))
return hash_object.hexdigest()
def do(self):
if self.force_update:
self.login()
return self.getSource()
else:
course = session.query(Course).filter_by(
username=self.username, semester=self.semester).first()
if course is None:
self.login()
return self.getSource()
else:
self.isLogin = True
self.name = course.realyname
return json.loads(course.course_report)
def keeplive():
test_user = User(
username='2020202020',
password='2020202020', force_update=True
)
test_user.getHtml()
def set_interval(func, sec):
def func_wrapper():
set_interval(func, sec)
func()
t = threading.Timer(sec, func_wrapper)
t.start()
return t
if os.getenv('KEEPLIVE') == '1':
set_interval(keeplive, 20)
| 38.349138 | 160 | 0.53445 |
acf1afe268bc428b0fc572a02129aee043397e30 | 9,067 | py | Python | api_services/models.py | pwelagedara/django_nba_starter | 40bca3ebc3c2a67e1d875c12d83e4ccd94b2e030 | [
"MIT"
] | null | null | null | api_services/models.py | pwelagedara/django_nba_starter | 40bca3ebc3c2a67e1d875c12d83e4ccd94b2e030 | [
"MIT"
] | null | null | null | api_services/models.py | pwelagedara/django_nba_starter | 40bca3ebc3c2a67e1d875c12d83e4ccd94b2e030 | [
"MIT"
] | null | null | null | from django.db import models
from django.conf import settings
from enumfields import EnumField
from django_db_views.db_view import DBView
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from api_services import enums
class UserManager(BaseUserManager):
"""User management"""
def create_user(self, email, name, role, password=None):
"""Creates a new User in the system"""
email = self.normalize_email(email)
user = self.model(email=email, name=name, role=role)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Creates Super User"""
user = self.create_user(email, name, enums.RoleChoice.SUPER_ADMIN, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Database model for a User in the system"""
class Meta:
ordering = ['id']
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
role = EnumField(enums.RoleChoice, max_length=20)
login_count = models.IntegerField(default=0)
is_online = models.BooleanField(default=False)
total_time_online = models.IntegerField(default=0)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def __str__(self):
return self.email
class Team(models.Model):
"""Database model for a Team"""
name = models.CharField(max_length=255)
arena_name = models.CharField(max_length=255)
class Meta:
ordering = ['id']
def __str__(self):
return self.name
def get_team_players(self):
return list(self.player_set.all())
def get_average_team_score(self):
# TODO: Optimize the performance using Database Views
players = self.get_team_players()
team_total_score = 0
for player in players:
player_scores = player.playerscore_set.all()
for player_score in player_scores:
team_total_score += player_score.points
total_games = len(self.away_team.all()) + len(self.home_team.all())
return team_total_score / total_games
def get_team_players_as_users(self):
players = self.get_team_players()
users = []
for player in players:
users.append(player.user)
return users
class Player(models.Model):
"""Database model for a Player which extends User"""
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
primary_key=True
)
height = models.IntegerField(default=0)
team = models.ForeignKey(
Team,
on_delete=models.DO_NOTHING,
null=True
)
class Admin(models.Model):
"""Database model for a Admin which extends User"""
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
primary_key=True
)
# TODO: Add more model attributes
class Coach(models.Model):
"""Database model for a Coach which extends User"""
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
primary_key=True
)
team = models.OneToOneField(
Team,
on_delete=models.DO_NOTHING,
null=True
)
class Tournament(models.Model):
"""Database model for a Tournament"""
class Meta:
ordering = ['id']
name = models.CharField(max_length=255, default="NBA")
class TournamentRound(models.Model):
"""Database model for a Tournament Round"""
class Meta:
ordering = ['id']
tournament = models.ForeignKey(
Tournament,
on_delete=models.DO_NOTHING
)
name = EnumField(enums.TournamentRoundChoice, max_length=20)
class Game(models.Model):
"""Database model for a Game"""
tournament_round = models.ForeignKey(
TournamentRound,
on_delete=models.DO_NOTHING
)
home_team = models.ForeignKey(
Team,
on_delete=models.DO_NOTHING,
related_name='home_team'
)
away_team = models.ForeignKey(
Team,
on_delete=models.DO_NOTHING,
related_name='away_team'
)
def get_total_points(self):
""""Calculates Team points"""
# Using database view for faster processing
game_scores = list(self.gamescoresdbview_set.all())
home_team_total = 0
away_team_total = 0
for game_score in game_scores:
if game_score.team == game_score.game.home_team:
home_team_total = game_score.team_score
else:
away_team_total = game_score.team_score
return {
"home_team_total": int(home_team_total),
"away_team_total": int(away_team_total)
}
def get_winning_team(self):
"""Finds the winning team"""
team_totals = self.get_total_points()
if team_totals["home_team_total"] > team_totals["away_team_total"]:
return self.home_team
return self.away_team
class PlayerScore(models.Model):
"""Database model to store Player Scores"""
game = models.ForeignKey(
Game,
on_delete=models.DO_NOTHING
)
player = models.ForeignKey(
Player,
on_delete=models.DO_NOTHING
)
points = models.IntegerField(default=0)
class GameScoresDBView(DBView):
"""Database view to store Game scores to aid team total calculation and winner calculation"""
class Meta:
managed = False
db_table = 'api_services_gamescoresdbview'
game = models.ForeignKey(
Game,
on_delete=models.DO_NOTHING
)
team = models.ForeignKey(
Team,
on_delete=models.DO_NOTHING
)
team_score = models.IntegerField(default=0)
db_script = """
SELECT
row_number() over () AS id, game_id, team_id, SUM(player_score) AS team_score
FROM
(
SELECT
asps.game_id, asps.player_score, asp.team_id
FROM
(
SELECT game_id, player_id, SUM(points) AS player_score
FROM api_services_playerscore
GROUP BY player_id, game_id
) asps INNER JOIN api_services_player AS asp ON asps.player_id=asp.user_id
) tps GROUP BY game_id, team_id
"""
view_definition = {
"django.db.backends.sqlite3": db_script,
"django.db.backends.postgresql": db_script
}
class PlayerAverageDBView(DBView):
"""Database view to store Player averages"""
class Meta:
managed = False
db_table = 'api_services_playeraveragedbview'
ordering = ['player_id']
player = models.OneToOneField(
Player,
on_delete=models.CASCADE,
primary_key=True
)
team = models.ForeignKey(
Team,
on_delete=models.DO_NOTHING
)
player_average = models.FloatField(default=0.0)
db_script = """
SELECT
apl.player_id, COALESCE(apl.player_average, 0.0) AS player_average, apisp.team_id
FROM
(
SELECT
asp.user_id AS player_id, a.player_average AS player_average
FROM api_services_player AS asp
LEFT JOIN
(
SELECT
pid AS player_id, ROUND(AVG(player_score),2) AS player_average
FROM
(
SELECT player_id AS pid, SUM(points) AS player_score
FROM api_services_playerscore
GROUP BY player_id, game_id
) player_totals GROUP BY pid
) a ON asp.user_id=a.player_id
) apl INNER JOIN api_services_player AS apisp ON apl.player_id=apisp.user_id
"""
view_definition = {
"django.db.backends.sqlite3": db_script,
"django.db.backends.postgresql": db_script
}
class TeamPlayerScoresDBView(DBView):
"""Database view to store Team scores to aid 90th percentile calculation"""
class Meta:
managed = False
db_table = 'api_services_teamplayerscoresdbview'
team = models.ForeignKey(
Team,
on_delete=models.DO_NOTHING
)
player = models.ForeignKey(
Player,
on_delete=models.DO_NOTHING
)
player_score = models.IntegerField(default=0)
db_script = """
SELECT
row_number() over () AS id, player_totals.pid AS player_id, pl.team_id, player_totals.player_score
FROM
(
SELECT player_id AS pid, SUM(points) AS player_score
FROM api_services_playerscore
GROUP BY player_id, game_id
) player_totals INNER JOIN api_services_player AS pl ON player_totals.pid=pl.user_id
"""
view_definition = {
"django.db.backends.sqlite3": db_script,
"django.db.backends.postgresql": db_script
}
| 25.116343 | 108 | 0.641668 |
acf1b031ff522c9992c0ffc3e4ac1506656e4425 | 2,490 | py | Python | portfolio_app/views.py | madelinepet/portfolio | c9b1b6318a4ff0018cb55d722d4705057acfe1ee | [
"MIT"
] | null | null | null | portfolio_app/views.py | madelinepet/portfolio | c9b1b6318a4ff0018cb55d722d4705057acfe1ee | [
"MIT"
] | null | null | null | portfolio_app/views.py | madelinepet/portfolio | c9b1b6318a4ff0018cb55d722d4705057acfe1ee | [
"MIT"
] | null | null | null | from django.shortcuts import render
import os
import requests
from .models import Image
from .forms import ImageForm
from django.views.generic import CreateView
from django.urls import reverse_lazy
import geocoder
g = geocoder.ip('me')
def maps_view(request):
"""This is the function defining the map view.
"""
if request.method == 'POST':
if ' ' in request.POST:
request.POST.split(' ')
'+'.join(request.POST)
map_manip = os.environ.get('MAPS_URL') + request.POST['search-map'] + request.POST['search-loc'] + '&zoom=11'
else:
map_manip = os.environ.get('MAPS_DEFAULT') + '¢er=' + str(g.latlng[0]) + ',' + str(g.latlng[1]) + '&zoom=3'
context = {
'maps': map_manip
}
return render(request, 'maps/maps.html', context)
def game_view(request):
"""
"""
return render(request, 'game/game.html')
class nasa_view(CreateView):
""" This is the function defining the nasa image of the day view
"""
template_name = 'nasa/nasa.html'
context_object_name = 'images'
model = Image
form_class = ImageForm
success_url = reverse_lazy('nasa')
def get_form_kwargs(self):
""" Gets the username
"""
kwargs = super().get_form_kwargs()
kwargs.update({'username': self.request.user.username})
return kwargs
def get(self, request):
""" atattches the user's images to the ctx obj to be used in template
"""
url = os.environ.get('NASA_URL')
response = requests.get(url)
data = response.json()
image_otd = data['url']
username = self.request.user.get_username()
user_images = Image.objects.filter(user__username=username)
context = {
'nasa': image_otd,
'user_images': user_images,
}
return render(request, 'nasa/nasa.html', context)
def form_valid(self, form):
""" Adds the user and the url to the image on submit
"""
nasa_url = os.environ.get('NASA_URL')
response = requests.get(nasa_url)
data = response.json()
image_otd = data['url']
form.instance.user = self.request.user
form.instance.url = image_otd
return super().form_valid(form)
# add users field to Image model and have an array in there. If the user is in there for that image, display the image to them
# have url be unique and if IntegrityError, just add user to array in users
| 30.365854 | 126 | 0.625703 |
acf1b0bdb8bfc2bd3d3590dbb2011a2b0a30441c | 6,646 | py | Python | mailgun/examples/suppressions_examples.py | diskovod/mailgun_beta | 5a595704c1eb887f87ad72b900c1e6fb613412a1 | [
"MIT"
] | null | null | null | mailgun/examples/suppressions_examples.py | diskovod/mailgun_beta | 5a595704c1eb887f87ad72b900c1e6fb613412a1 | [
"MIT"
] | null | null | null | mailgun/examples/suppressions_examples.py | diskovod/mailgun_beta | 5a595704c1eb887f87ad72b900c1e6fb613412a1 | [
"MIT"
] | null | null | null | import os
from mailgun.client import Client
key = os.environ["APIKEY"]
domain = os.environ["DOMAIN"]
client = Client(auth=("api", key))
# Bounces
def get_bounces():
"""
GET /<domain>/bounces
:return:
"""
req = client.bounces.get(domain=domain)
print(req.json())
def post_bounces():
"""
POST /<domain>/bounces
:return:
"""
data = {
"address": "test120@gmail.com",
"code": 550,
"error": "Test error"
}
req = client.bounces.create(data=data, domain=domain)
print(req.json())
def get_single_bounce():
"""
GET /<domain>/bounces/<address>
:return:
"""
req = client.bounces.get(domain=domain, bounce_address="test120@gmail.com")
print(req.json())
def add_multiple_bounces():
"""
POST /<domain>/bounces, Content-Type: application/json
:return:
"""
data = [{
"address": "test121@i.ua",
"code": "550",
"error": "Test error2312"
},
{
"address": "test122@gmail.com",
"code": "550",
"error": "Test error"
}]
req = client.bounces.create(data=data, domain=domain, headers='application/json')
print(req.json())
def import_bounce_list():
"""
POST /<domain>/bounces/import, Content-Type: multipart/form-data
:return:
"""
files = {"bounce_csv": open("../doc_tests/files/mailgun_bounces_test.csv", "rb")}
req = client.bounces_import.create(domain=domain, files=files)
print(req.json())
def delete_single_bounce():
"""
DELETE /<domain>/bounces/<address>
:return:
"""
req = client.bounces.delete(domain=domain, bounce_address="test122@gmail.com")
print(req.json())
def delete_bounce_list():
"""
DELETE /<domain>/bounces
:return:
"""
req = client.bounces.delete(domain=domain)
print(req.json())
# Unsubscribes
def get_unsubs():
"""
GET /<domain>/unsubscribes
:return:
"""
req = client.unsubscribes.get(domain=domain)
print(req.json())
def get_single_unsub():
"""
GET /<domain>/unsubscribes/<address>
:return:
"""
req = client.unsubscribes.get(domain=domain, unsubscribe_address="test1@gmail.com")
print(req.json())
def create_single_unsub():
"""
POST /<domain>/unsubscribes
:return:
"""
data = {'address':'bob@example.com',
'tag': '*'}
req = client.unsubscribes.create(data=data, domain=domain)
print(req.json())
def create_multiple_unsub():
"""
POST /<domain>/unsubscribes, Content-Type: application/json
:return:
"""
data = [
{
"address": "alice@example.com",
"tags": ["some tag"],
"created_at": "Thu, 13 Oct 2011 18:02:00 UTC"
},
{
"address": "bob@example.com",
"tags": ["*"],
},
{
"address": "carol@example.com"
}
]
req = client.unsubscribes.create(data=data, domain=domain,
headers='application/json')
print(req.json())
def import_list_unsubs():
"""
POST /<domain>/unsubscribes/import, Content-Type: multipart/form-data
:return:
"""
files = {"unsubscribe2_csv": open("../doc_tests/files/mailgun_unsubscribes.csv", "rb")}
req = client.unsubscribes_import.create(domain=domain, files=files)
print(req.json())
def delete_single_unsub():
"""
DELETE /<domain>/unsubscribes/<address>
:return:
"""
req = client.unsubscribes.delete(domain=domain, unsubscribe_address="alice@example.com")
print(req.json())
def delete_all_unsubs():
"""
DELETE /<domain>/unsubscribes/
:return:
"""
req = client.unsubscribes.delete(domain=domain)
print(req.json())
# Complaints
def get_complaints():
"""
GET /<domain>/complaints
:return:
"""
req = client.complaints.get(domain=domain)
print(req.json())
def add_complaints():
"""
POST /<domain>/complaints
:return:
"""
data = {
"address": "bob@gmail.com",
"tag": "compl_test_tag"
}
req = client.complaints.create(data=data, domain=domain)
print(req.json())
def add_multiple_complaints():
"""
POST /<domain>/complaints, Content-Type: application/json
:return:
"""
data = [
{
"address": "alice1@example.com",
"tags": ["some tag"],
"created_at": "Thu, 13 Oct 2011 18:02:00 UTC"
},
{
"address": "carol1@example.com"
}
]
req = client.complaints.create(data=data, domain=domain, headers='application/json')
print(req.json())
def import_complaint_list():
"""
POST /<domain>/complaints/import, Content-Type: multipart/form-data
:return:
"""
files = {"complaints_csv": open("../doc_tests/files/mailgun_complaints.csv", "rb")}
req = client.complaints_import.create(domain=domain, files=files)
print(req.json())
def delete_single_complaint():
"""
DELETE /<domain>/complaints/<address>
:return:
"""
req = client.complaints.delete(domain=domain, complaint_address="carol1@example.com")
print(req.json())
def delete_all_complaints():
"""
DELETE /<domain>/complaints/
:return:
"""
req = client.complaints.delete(domain=domain)
print(req.json())
# Whitelists
def get_whitelists():
"""
GET /<domain>/whitelists
:return:
"""
req = client.whitelists.get(domain=domain)
print(req.json())
def create_whitelist():
"""
POST /<domain>/whitelists
:return:
"""
data = {
"address": "bob@gmail.com",
"tag": "whitel_test"
}
req = client.whitelists.create(data=data, domain=domain)
print(req.json())
def get_single_whitelist():
"""
GET /<domain>/whitelists/<address or domain>
:return:
"""
# You can set domain name or address for whitelist_address option
req = client.whitelists.get(domain=domain, whitelist_address="bob@gmail.com")
print(req.json())
def import_list_whitelists():
"""
POST /<domain>/whitelists/import, Content-Type: multipart/form-data
:return:
"""
files = {"whitelist_csv": open("../doc_tests/files/mailgun_whitelists.csv", "rb")}
req = client.whitelists_import.create(domain=domain, files=files)
print(req.json())
def delete_single_whitelist():
"""
DELETE /<domain>/whitelists/<address or domain>
:return:
"""
req = client.whitelists.delete(domain=domain, whitelist_address="bob@gmail.com")
print(req.json())
if __name__ == '__main__':
delete_single_whitelist()
| 24.07971 | 92 | 0.597051 |
acf1b27b1e03ed9bcebedc62276654a22febb140 | 10,050 | py | Python | hyperion/io/audio_reader.py | jsalt2019-diadet/hyperion | 14a11436d62f3c15cd9b1f70bcce3eafbea2f753 | [
"Apache-2.0"
] | 9 | 2019-09-22T05:19:59.000Z | 2022-03-05T18:03:37.000Z | hyperion/io/audio_reader.py | jsalt2019-diadet/hyperion | 14a11436d62f3c15cd9b1f70bcce3eafbea2f753 | [
"Apache-2.0"
] | null | null | null | hyperion/io/audio_reader.py | jsalt2019-diadet/hyperion | 14a11436d62f3c15cd9b1f70bcce3eafbea2f753 | [
"Apache-2.0"
] | 4 | 2019-10-10T06:34:05.000Z | 2022-03-05T18:03:56.000Z | """
Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
from six import string_types
import os
import logging
import io
import subprocess
import soundfile as sf
import numpy as np
from ..hyp_defs import float_cpu
from ..utils import SCPList, SegmentList
valid_ext = ['.wav', '.flac', '.ogg' , '.au', '.avr', '.caf', '.htk', '.iff', '.mat', '.mpc', '.oga', '.pvf', '.rf64', '.sd2', '.sds', '.sf', '.voc', 'w64', '.wve', '.xi']
class AudioReader(object):
"""Class to read audio files from wav, flac or pipe
Attributes:
file_path: scp file with formant file_key wavspecifier (audio_file/pipe).
segments_path: segments file with format: segment_id file_id tbeg tend
scale: Multiplies signal by scale factor
"""
def __init__(self, file_path, segments_path=None, wav_scale=2**15):
self.file_path = file_path
if isinstance(file_path, SCPList):
self.scp = file_path
else:
self.scp = SCPList.load(file_path, sep=' ')
self.segments_path = segments_path
if segments_path is None:
self.segments = None
self.with_segments = False
else:
self.with_segments = True
if isinstance(file_path, SegmentList):
self.segments = segments_path
else:
self.segments = SegmentList.load(segments_path, sep=' ', index_by_file=False)
self.scale = wav_scale
def __enter__(self):
"""Function required when entering contructions of type
with AudioReader('file.h5') as f:
keys, data = f.read()
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Function required when exiting from contructions of type
with DataReader('file.h5') as f:
keys, data = f.read()
"""
pass
@staticmethod
def read_wavspecifier(wavspecifier, scale=2**15):
"""Reads an audiospecifier (audio_file/pipe)
It reads from pipe or from all the files that can be read
by `libsndfile <http://www.mega-nerd.com/libsndfile/#Features>`
Args:
wavspecifier: A pipe, wav, flac, ogg file etc.
scale: Multiplies signal by scale factor
"""
wavspecifier = wavspecifier.strip()
if wavspecifier[-1] == '|':
wavspecifier = wavspecifier[:-1]
return AudioReader.read_pipe(wavspecifier, scale)
else:
ext = os.path.splitext(wavspecifier)[1]
if ext in valid_ext:
x, fs = sf.read(wavspecifier, dtype=float_cpu())
x *= scale
else:
raise Exception('Unknown format for %s' % (wavspecifier))
return x, fs
@staticmethod
def read_pipe(wavspecifier, scale=2**15):
"""Reads wave file from a pipe
Args:
wavspecifier: Shell command with pipe output
scale: Multiplies signal by scale factor
"""
#proc = subprocess.Popen(wavspecifier, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
proc = subprocess.Popen(wavspecifier, shell=True, stdout=subprocess.PIPE)
pipe = proc.communicate()[0]
if proc.returncode !=0:
raise Exception('Wave read pipe command %s returned code %d' % (wavspecifier, proc.returncode))
x, fs = sf.read(io.BytesIO(pipe), dtype=float_cpu())
x *= scale
return x, fs
def _read_segment(self, segment):
"""Reads a wave segment
Args:
segment: pandas DataFrame (segment_id , file_id, tbeg, tend)
Returns:
Wave, sampling frequency
"""
file_id = segment['file_id']
t_beg = segment['tbeg']
t_end = segment['tend']
file_path, _, _ = self.scp[file_id]
x_i, fs_i = self.read_wavspecifier(file_path, self.scale)
num_samples_i = len(x_i)
s_beg = int(t_beg * fs_i)
if s_beg >= num_samples_i:
raise Exception('segment %s tbeg=%.2f (num_sample=%d) longer that wav file %s (num_samples=%d)' % (
key, tbeg, sbeg, file_id, num_samples_i))
s_end = int(t_end * fs_i)
if s_end > num_samples_i or t_end < 0:
s_end = num_samples_i
x_i = x_i[s_beg:s_end]
return x_i, fs_i
def read(self):
pass
class SequentialAudioReader(AudioReader):
def __init__(self, file_path, segments_path=None, wav_scale=2**15, part_idx=1, num_parts=1):
super(SequentialAudioReader, self).__init__(file_path, segments_path, wav_scale=wav_scale)
self.cur_item = 0
self.part_idx = part_idx
self.num_parts = num_parts
if self.num_parts > 1:
if self.with_segments:
self.segments = self.segments.split(self.part_idx, self.num_parts)
else:
self.scp = self.scp.split(self.part_idx, self.num_parts)
def __iter__(self):
"""Needed to build an iterator, e.g.:
r = SequentialAudioReader(...)
for key, s, fs in r:
print(key)
process(s)
"""
return self
def __next__(self):
"""Needed to build an iterator, e.g.:
r = SequentialAudioReader(...)
for key , s, fs in r:
process(s)
"""
key, x, fs = self.read(1)
if len(key)==0:
raise StopIteration
return key[0], x[0], fs[0]
def next(self):
"""__next__ for Python 2"""
return self.__next__()
def reset(self):
"""Returns the file pointer to the begining of the dataset,
then we can start reading the features again.
"""
self.cur_item=0
def eof(self):
"""End of file.
Returns:
True, when we have read all the recordings in the dataset.
"""
if self.with_segments:
return self.cur_item == len(self.segments)
return self.cur_item == len(self.scp)
def read(self, num_records=0):
"""Reads next num_records audio files
Args:
num_records: Number of audio files to read.
Returns:
key: List of recording names.
data: List of waveforms
"""
if num_records == 0:
num_records = len(self.scp) - self.cur_item
keys = []
data = []
fs = []
for i in xrange(num_records):
if self.eof():
break
if self.with_segments:
segment = self.segments[self.cur_item]
key = segment['segment_id']
x_i, fs_i = self._read_segment(segment)
else:
key, file_path, _, _ = self.scp[self.cur_item]
x_i, fs_i = self.read_wavspecifier(file_path, self.scale)
keys.append(key)
data.append(x_i)
fs.append(fs_i)
self.cur_item += 1
return keys, data, fs
@staticmethod
def filter_args(prefix=None, **kwargs):
if prefix is None:
p = ''
else:
p = prefix + '_'
valid_args = ('part_idx', 'num_parts')
return dict((k, kwargs[p+k])
for k in valid_args if p+k in kwargs)
@staticmethod
def add_argparse_args(parser, prefix=None):
if prefix is None:
p1 = '--'
p2 = ''
else:
p1 = '--' + prefix + '-'
p2 = prefix + '_'
parser.add_argument(p1+'wav-scale', dest=(p2+'wav_scale'), default=2**15, type=float,
help=('multiplicative factor for waveform'))
parser.add_argument(p1+'part-idx', dest=(p2+'part_idx'), type=int, default=1,
help=('splits the list of files in num-parts and process part_idx'))
parser.add_argument(p1+'num-parts', dest=(p2+'num_parts'), type=int, default=1,
help=('splits the list of files in num-parts and process part_idx'))
class RandomAccessAudioReader(AudioReader):
def __init__(self, file_path, segments_path=None, wav_scale=2**15):
super(RandomAccessAudioReader, self).__init__(file_path, segments_path, wav_scale)
def read(self, keys):
"""Reads the waveforms for the recordings in keys.
Args:
keys: List of recording/segment_ids names.
Returns:
data: List of waveforms
"""
if isinstance(keys, string_types):
keys = [keys]
data = []
fs = []
for i,key in enumerate(keys):
if self.with_segments:
if (not key in self.segments):
raise Exception('Key %s not found' % key)
segment = self.segments[key]
x_i, fs_i = self._read_segment(segment)
else:
if not (key in self.scp):
raise Exception('Key %s not found' % key)
file_path, _, _ = self.scp[key]
x_i, fs_i = self.read_wavspecifier(file_path, self.scale)
data.append(x_i)
fs.append(fs_i)
return data, fs
@staticmethod
def add_argparse_args(parser, prefix=None):
if prefix is None:
p1 = '--'
p2 = ''
else:
p1 = '--' + prefix + '-'
p2 = prefix + '_'
parser.add_argument(p1+'wav-scale', dest=(p2+'wav_scale'), default=2**15, type=float,
help=('multiplicative factor for waveform'))
| 30.547112 | 171 | 0.55204 |
acf1b2ad71721cf4562a69130c661925c36108dc | 45,019 | py | Python | python_modules/dagster-graphql/dagster_graphql_tests/graphql/snapshots/snap_test_solids.py | camvogel/dagster | b4df94bf34906e7f81c973a7fdad5429ae3697ba | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql_tests/graphql/snapshots/snap_test_solids.py | camvogel/dagster | b4df94bf34906e7f81c973a7fdad5429ae3697ba | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql_tests/graphql/snapshots/snap_test_solids.py | camvogel/dagster | b4df94bf34906e7f81c973a7fdad5429ae3697ba | [
"Apache-2.0"
] | 1 | 2021-12-08T18:13:19.000Z | 2021-12-08T18:13:19.000Z | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_query_all_solids 1'] = {
'repositoryOrError': {
'usedSolids': [
{
'__typename': 'UsedSolid',
'definition': {
'name': 'a_solid_with_config'
},
'invocations': [
{
'pipeline': {
'name': 'job_with_default_config'
},
'solidHandle': {
'handleID': 'a_solid_with_config'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'a_solid_with_multilayered_config'
},
'invocations': [
{
'pipeline': {
'name': 'more_complicated_nested_config'
},
'solidHandle': {
'handleID': 'a_solid_with_multilayered_config'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'a_solid_with_three_field_config'
},
'invocations': [
{
'pipeline': {
'name': 'more_complicated_config'
},
'solidHandle': {
'handleID': 'a_solid_with_three_field_config'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'add_four'
},
'invocations': [
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'add_one'
},
'invocations': [
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four.adder_1.adder_1'
}
},
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four.adder_1.adder_2'
}
},
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four.adder_2.adder_1'
}
},
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four.adder_2.adder_2'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'add_two'
},
'invocations': [
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four.adder_1'
}
},
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'add_four.adder_2'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'after_failure'
},
'invocations': [
{
'pipeline': {
'name': 'chained_failure_pipeline'
},
'solidHandle': {
'handleID': 'after_failure'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'alp_a'
},
'invocations': [
{
'pipeline': {
'name': 'asset_lineage_pipeline'
},
'solidHandle': {
'handleID': 'alp_a'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'alp_b'
},
'invocations': [
{
'pipeline': {
'name': 'asset_lineage_pipeline'
},
'solidHandle': {
'handleID': 'alp_b'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'always_succeed'
},
'invocations': [
{
'pipeline': {
'name': 'chained_failure_pipeline'
},
'solidHandle': {
'handleID': 'always_succeed'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'apply_to_three'
},
'invocations': [
{
'pipeline': {
'name': 'multi_mode_with_resources'
},
'solidHandle': {
'handleID': 'apply_to_three'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'backcompat_materialize'
},
'invocations': [
{
'pipeline': {
'name': 'backcompat_materialization_pipeline'
},
'solidHandle': {
'handleID': 'backcompat_materialize'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'can_fail'
},
'invocations': [
{
'pipeline': {
'name': 'retry_multi_output_pipeline'
},
'solidHandle': {
'handleID': 'can_fail'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'collect'
},
'invocations': [
{
'pipeline': {
'name': 'eventually_successful'
},
'solidHandle': {
'handleID': 'collect'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'conditionally_fail'
},
'invocations': [
{
'pipeline': {
'name': 'chained_failure_pipeline'
},
'solidHandle': {
'handleID': 'conditionally_fail'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'df_expectations_solid'
},
'invocations': [
{
'pipeline': {
'name': 'csv_hello_world_with_expectations'
},
'solidHandle': {
'handleID': 'df_expectations_solid'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'div_four'
},
'invocations': [
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'div_four'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'div_two'
},
'invocations': [
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'div_four.div_1'
}
},
{
'pipeline': {
'name': 'composites_pipeline'
},
'solidHandle': {
'handleID': 'div_four.div_2'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'emit'
},
'invocations': [
{
'pipeline': {
'name': 'dynamic_pipeline'
},
'solidHandle': {
'handleID': 'emit'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'emit_failed_expectation'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_expectations'
},
'solidHandle': {
'handleID': 'emit_failed_expectation'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'emit_successful_expectation'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_expectations'
},
'solidHandle': {
'handleID': 'emit_successful_expectation'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'emit_successful_expectation_no_metadata'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_expectations'
},
'solidHandle': {
'handleID': 'emit_successful_expectation_no_metadata'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'emit_ten'
},
'invocations': [
{
'pipeline': {
'name': 'dynamic_pipeline'
},
'solidHandle': {
'handleID': 'emit_ten'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'fail'
},
'invocations': [
{
'pipeline': {
'name': 'eventually_successful'
},
'solidHandle': {
'handleID': 'fail'
}
},
{
'pipeline': {
'name': 'eventually_successful'
},
'solidHandle': {
'handleID': 'fail_2'
}
},
{
'pipeline': {
'name': 'eventually_successful'
},
'solidHandle': {
'handleID': 'fail_3'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'fail_subset'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_invalid_definition_error'
},
'solidHandle': {
'handleID': 'fail_subset'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'first_asset'
},
'invocations': [
{
'pipeline': {
'name': 'hanging_job'
},
'solidHandle': {
'handleID': 'first_asset'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'get_input_one'
},
'invocations': [
{
'pipeline': {
'name': 'retry_multi_input_early_terminate_pipeline'
},
'solidHandle': {
'handleID': 'get_input_one'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'get_input_two'
},
'invocations': [
{
'pipeline': {
'name': 'retry_multi_input_early_terminate_pipeline'
},
'solidHandle': {
'handleID': 'get_input_two'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'hanging_asset'
},
'invocations': [
{
'pipeline': {
'name': 'hanging_job'
},
'solidHandle': {
'handleID': 'hanging_asset'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'hard_fail_or_0'
},
'invocations': [
{
'pipeline': {
'name': 'hard_failer'
},
'solidHandle': {
'handleID': 'hard_fail_or_0'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'increment'
},
'invocations': [
{
'pipeline': {
'name': 'hard_failer'
},
'solidHandle': {
'handleID': 'increment'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'loop'
},
'invocations': [
{
'pipeline': {
'name': 'infinite_loop_pipeline'
},
'solidHandle': {
'handleID': 'loop'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'materialize'
},
'invocations': [
{
'pipeline': {
'name': 'materialization_pipeline'
},
'solidHandle': {
'handleID': 'materialize'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'multi'
},
'invocations': [
{
'pipeline': {
'name': 'retry_multi_output_pipeline'
},
'solidHandle': {
'handleID': 'multi'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'multiply_by_two'
},
'invocations': [
{
'pipeline': {
'name': 'dynamic_pipeline'
},
'solidHandle': {
'handleID': 'double_total'
}
},
{
'pipeline': {
'name': 'dynamic_pipeline'
},
'solidHandle': {
'handleID': 'multiply_by_two'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'multiply_inputs'
},
'invocations': [
{
'pipeline': {
'name': 'dynamic_pipeline'
},
'solidHandle': {
'handleID': 'multiply_inputs'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'never_runs_asset'
},
'invocations': [
{
'pipeline': {
'name': 'hanging_job'
},
'solidHandle': {
'handleID': 'never_runs_asset'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'no_output'
},
'invocations': [
{
'pipeline': {
'name': 'retry_multi_output_pipeline'
},
'solidHandle': {
'handleID': 'child_multi_skip'
}
},
{
'pipeline': {
'name': 'retry_multi_output_pipeline'
},
'solidHandle': {
'handleID': 'child_skip'
}
},
{
'pipeline': {
'name': 'retry_multi_output_pipeline'
},
'solidHandle': {
'handleID': 'grandchild_fail'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'noop_solid'
},
'invocations': [
{
'pipeline': {
'name': 'asset_lineage_pipeline'
},
'solidHandle': {
'handleID': 'noop_solid'
}
},
{
'pipeline': {
'name': 'more_complicated_config'
},
'solidHandle': {
'handleID': 'noop_solid'
}
},
{
'pipeline': {
'name': 'noop_pipeline'
},
'solidHandle': {
'handleID': 'noop_solid'
}
},
{
'pipeline': {
'name': 'partitioned_asset_lineage_pipeline'
},
'solidHandle': {
'handleID': 'noop_solid'
}
},
{
'pipeline': {
'name': 'simple_job_a'
},
'solidHandle': {
'handleID': 'noop_solid'
}
},
{
'pipeline': {
'name': 'simple_job_b'
},
'solidHandle': {
'handleID': 'noop_solid'
}
},
{
'pipeline': {
'name': 'composed_graph'
},
'solidHandle': {
'handleID': 'simple_graph.noop_solid'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'one'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_invalid_definition_error'
},
'solidHandle': {
'handleID': 'one'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'op_1'
},
'invocations': [
{
'pipeline': {
'name': 'two_ins_job'
},
'solidHandle': {
'handleID': 'op_1'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'op_2'
},
'invocations': [
{
'pipeline': {
'name': 'two_ins_job'
},
'solidHandle': {
'handleID': 'op_2'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'op_with_2_ins'
},
'invocations': [
{
'pipeline': {
'name': 'two_ins_job'
},
'solidHandle': {
'handleID': 'op_with_2_ins'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'palp_a'
},
'invocations': [
{
'pipeline': {
'name': 'partitioned_asset_lineage_pipeline'
},
'solidHandle': {
'handleID': 'palp_a'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'palp_b'
},
'invocations': [
{
'pipeline': {
'name': 'partitioned_asset_lineage_pipeline'
},
'solidHandle': {
'handleID': 'palp_b'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'passthrough'
},
'invocations': [
{
'pipeline': {
'name': 'retry_multi_output_pipeline'
},
'solidHandle': {
'handleID': 'child_fail'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'reset'
},
'invocations': [
{
'pipeline': {
'name': 'eventually_successful'
},
'solidHandle': {
'handleID': 'reset'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_any'
},
'invocations': [
{
'pipeline': {
'name': 'scalar_output_pipeline'
},
'solidHandle': {
'handleID': 'return_any'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_bool'
},
'invocations': [
{
'pipeline': {
'name': 'scalar_output_pipeline'
},
'solidHandle': {
'handleID': 'return_bool'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_foo'
},
'invocations': [
{
'pipeline': {
'name': 'no_config_chain_pipeline'
},
'solidHandle': {
'handleID': 'return_foo'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_hello'
},
'invocations': [
{
'pipeline': {
'name': 'no_config_pipeline'
},
'solidHandle': {
'handleID': 'return_hello'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_hello_world'
},
'invocations': [
{
'pipeline': {
'name': 'no_config_chain_pipeline'
},
'solidHandle': {
'handleID': 'return_hello_world'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_int'
},
'invocations': [
{
'pipeline': {
'name': 'scalar_output_pipeline'
},
'solidHandle': {
'handleID': 'return_int'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_one'
},
'invocations': [
{
'pipeline': {
'name': 'retry_multi_input_early_terminate_pipeline'
},
'solidHandle': {
'handleID': 'return_one'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_six'
},
'invocations': [
{
'pipeline': {
'name': 'multi_mode_with_loggers'
},
'solidHandle': {
'handleID': 'return_six'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'return_str'
},
'invocations': [
{
'pipeline': {
'name': 'scalar_output_pipeline'
},
'solidHandle': {
'handleID': 'return_str'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'simple_graph'
},
'invocations': [
{
'pipeline': {
'name': 'composed_graph'
},
'solidHandle': {
'handleID': 'simple_graph'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'simple_solid'
},
'invocations': [
{
'pipeline': {
'name': 'tagged_pipeline'
},
'solidHandle': {
'handleID': 'simple_solid'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'solid_asset_a'
},
'invocations': [
{
'pipeline': {
'name': 'multi_asset_pipeline'
},
'solidHandle': {
'handleID': 'solid_asset_a'
}
},
{
'pipeline': {
'name': 'single_asset_pipeline'
},
'solidHandle': {
'handleID': 'solid_asset_a'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'solid_asset_b'
},
'invocations': [
{
'pipeline': {
'name': 'multi_asset_pipeline'
},
'solidHandle': {
'handleID': 'solid_asset_b'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'solid_partitioned_asset'
},
'invocations': [
{
'pipeline': {
'name': 'partitioned_asset_pipeline'
},
'solidHandle': {
'handleID': 'solid_partitioned_asset'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'solid_that_gets_tags'
},
'invocations': [
{
'pipeline': {
'name': 'hello_world_with_tags'
},
'solidHandle': {
'handleID': 'solid_that_gets_tags'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'solid_with_list'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_list'
},
'solidHandle': {
'handleID': 'solid_with_list'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'solid_with_required_resource'
},
'invocations': [
{
'pipeline': {
'name': 'required_resource_pipeline'
},
'solidHandle': {
'handleID': 'solid_with_required_resource'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'spawn'
},
'invocations': [
{
'pipeline': {
'name': 'eventually_successful'
},
'solidHandle': {
'handleID': 'spawn'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'spew'
},
'invocations': [
{
'pipeline': {
'name': 'spew_pipeline'
},
'solidHandle': {
'handleID': 'spew'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'start'
},
'invocations': [
{
'pipeline': {
'name': 'retry_resource_pipeline'
},
'solidHandle': {
'handleID': 'start'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'sum_inputs'
},
'invocations': [
{
'pipeline': {
'name': 'retry_multi_input_early_terminate_pipeline'
},
'solidHandle': {
'handleID': 'sum_inputs'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'sum_numbers'
},
'invocations': [
{
'pipeline': {
'name': 'dynamic_pipeline'
},
'solidHandle': {
'handleID': 'sum_numbers'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'sum_solid'
},
'invocations': [
{
'pipeline': {
'name': 'csv_hello_world'
},
'solidHandle': {
'handleID': 'sum_solid'
}
},
{
'pipeline': {
'name': 'csv_hello_world_df_input'
},
'solidHandle': {
'handleID': 'sum_solid'
}
},
{
'pipeline': {
'name': 'csv_hello_world_two'
},
'solidHandle': {
'handleID': 'sum_solid'
}
},
{
'pipeline': {
'name': 'csv_hello_world_with_expectations'
},
'solidHandle': {
'handleID': 'sum_solid'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'sum_sq_solid'
},
'invocations': [
{
'pipeline': {
'name': 'csv_hello_world'
},
'solidHandle': {
'handleID': 'sum_sq_solid'
}
},
{
'pipeline': {
'name': 'csv_hello_world_df_input'
},
'solidHandle': {
'handleID': 'sum_sq_solid'
}
},
{
'pipeline': {
'name': 'csv_hello_world_with_expectations'
},
'solidHandle': {
'handleID': 'sum_sq_solid'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'tag_asset_solid'
},
'invocations': [
{
'pipeline': {
'name': 'asset_tag_pipeline'
},
'solidHandle': {
'handleID': 'tag_asset_solid'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'takes_an_enum'
},
'invocations': [
{
'pipeline': {
'name': 'pipeline_with_enum_config'
},
'solidHandle': {
'handleID': 'takes_an_enum'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'throw_a_thing'
},
'invocations': [
{
'pipeline': {
'name': 'naughty_programmer_pipeline'
},
'solidHandle': {
'handleID': 'throw_a_thing'
}
}
]
},
{
'__typename': 'UsedSolid',
'definition': {
'name': 'will_fail'
},
'invocations': [
{
'pipeline': {
'name': 'retry_resource_pipeline'
},
'solidHandle': {
'handleID': 'will_fail'
}
}
]
}
]
}
}
| 32.364486 | 81 | 0.239788 |
acf1b2c4108f0ae9abfe8cc7ebb525fdad12a054 | 9,261 | py | Python | scripts/atcutils.py | zinint/atc-react | 57c2fbdfc72ead89f12f42f5416777a397c5216a | [
"Apache-2.0"
] | null | null | null | scripts/atcutils.py | zinint/atc-react | 57c2fbdfc72ead89f12f42f5416777a397c5216a | [
"Apache-2.0"
] | null | null | null | scripts/atcutils.py | zinint/atc-react | 57c2fbdfc72ead89f12f42f5416777a397c5216a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import yaml
import re
import warnings
from os import listdir
from os.path import isfile, join
from yaml.scanner import ScannerError
# ########################################################################### #
# ############################ ATCutils ##################################### #
# ########################################################################### #
# Default configuration file path
DEFAULT_PROJECT_CONFIG_PATH = 'scripts/config.default.yml'
DEFAULT_CONFIG_PATH = 'scripts/config.yml'
# Show warnings only once:
with warnings.catch_warnings():
warnings.simplefilter("once")
class ATCConfig(object):
"""Class for handling the project configuration"""
def __init__(self, path='scripts/config.yml'):
"""Constructor that will return an ATCconfig object holding
the project configuration
Keyword Arguments:
path {str} -- 'Path of the local configuration file' (default: {'scripts/config.yml'})
"""
self.config_local = path
self.config_project = DEFAULT_PROJECT_CONFIG_PATH
def get_config_project(self):
"""Get the configuration as defined by the project
Returns:
config {dict} -- Dictionary object containing configuration,
as set in the project configuration.
"""
return self.__config_project
def get_config_local(self):
"""Get the configuartion that is defined locally,
only contains local overrides and additions.
Returns:
config {dict} -- Dictionary object containing local configuration,
containing only overrides and additions.
"""
return self.__config_local
@property
def config(self):
"""Get the whole configuration including local settings and additions.
This the configuation that is used by the application.
Returns:
config {dict} -- Dictionary object containing default settings, overriden by local settings if set.
"""
config_final = dict(self.config_project)
config_final.update(self.config_local)
return config_final
def set_config_project(self, path):
"""Set the project configuration via file path
Arguments:
path {str} -- File location of the config (yaml)
"""
self.__config_project = dict(self.__read_yaml_file(path))
def set_config_local(self, path):
"""Set the local configration via file path.
This will override project defaults in the final configuration.
If no local configuration is found on the argument path, a warning will be shown, and only default config is used.
Arguments:
path {str} -- Local config file location
"""
try:
self.__config_local = dict(self.__read_yaml_file(path))
except FileNotFoundError:
wrn = "Local config '{path}' not found, using project default"
# Warning will show because it is in Exception block.
warnings.warn(wrn.format(path=path))
self.__config_local = {}
def __read_yaml_file(self, path):
"""Open the yaml file and load it to the variable.
Return created list"""
with open(path) as f:
yaml_fields = yaml.load_all(f.read(), Loader=yaml.FullLoader)
buff_results = [x for x in yaml_fields]
if len(buff_results) > 1:
result = buff_results[0]
result['additions'] = buff_results[1:]
else:
result = buff_results[0]
return result
def get(self, key):
""" Maps to 'get' Function of configuration {dict} object """
return self.config.get(key)
config_local = property(get_config_local, set_config_local)
config_project = property(get_config_project, set_config_project)
# Initialize global config
ATCconfig = ATCConfig()
class ATCutils:
"""Class which consists of handful methods used throughout the project"""
def __init__(self):
"""Init method"""
pass
@staticmethod
def read_rule_file(path):
"""Open the file and load it to the variable. Return text"""
with open(path) as f:
rule_text = f.read()
return rule_text
@staticmethod
def load_yamls_with_paths(path):
yamls = [join(path, f) for f in listdir(path) if isfile(
join(path, f)) if f.endswith('.yaml') or f.endswith('.yml')]
result = []
for yaml in yamls:
try:
result.append(ATCutils.read_yaml_file(yaml))
except ScannerError:
raise ScannerError('yaml is bad! %s' % yaml)
return (result, yamls)
@staticmethod
def read_yaml_file(path):
"""Open the yaml file and load it to the variable.
Return created list"""
if path == 'scripts/config.yml':
wrn = "Use 'load_config' or 'ATCConfig' instead for config"
# Warning will not show,
# unless captured by logging facility or python called with -Wd
warnings.warn(message=wrn,
category=DeprecationWarning)
return ATCConfig(path).config
with open(path) as f:
yaml_fields = yaml.load_all(f.read(), Loader=yaml.FullLoader)
buff_results = [x for x in yaml_fields]
if len(buff_results) > 1:
result = buff_results[0]
result['additions'] = buff_results[1:]
else:
result = buff_results[0]
return result
@staticmethod
def load_config(path):
"""Load the configuration YAML files used ofr ATC into a dictionary
Arguments:
path {filepath} -- File path of the local configuration file
Returns:
dict -- Configuration for ATC in dictionary format
"""
return ATCConfig(path).config
@staticmethod
def load_yamls(path):
"""Load multiple yamls into list"""
yamls = [
join(path, f) for f in listdir(path)
if isfile(join(path, f))
if f.endswith('.yaml')
or f.endswith('.yml')
]
result = []
for yaml in yamls:
try:
result.append(ATCutils.read_yaml_file(yaml))
except ScannerError:
raise ScannerError('yaml is bad! %s' % yaml)
return result
@staticmethod
def write_file(path, content, options="w+"):
"""Simple method for writing content to some file"""
with open(path, options) as file:
file.write(content)
return True
@staticmethod
def normalize_react_title(title):
"""Normalize title if it is a RA/RP title in the following format:
RP_0003_identification_make_sure_email_is_a_phishing
"""
react_id_re = re.compile(r'R[AP]_\d{4}.*$')
if react_id_re.match(title):
title = title[8:].split('_', 0)[-1].replace('_', ' ').capitalize()
new_title = ""
for word in title.split():
if word.lower() in [
"ip", "dns", "ms", "ngfw", "ips", "url", "pe", "pdf",
"elf", "dhcp", "vpn", "smb", "ftp", "http" ]:
new_title += word.upper()
new_title += " "
continue
elif word.lower() in [ "unix", "windows", "proxy", "firewall", "mach-o" ]:
new_title += word.capitalize()
new_title += " "
continue
new_title += word
new_title += " "
return new_title.strip()
return title
@staticmethod
def get_ra_category(ra_id):
"""Get a Response Action category, i.e. file, network, email, etc
Using the the RA ID
"""
categories = {
"General": 0,
"Network": 1,
"Email": 2,
"File": 3,
"Process": 4,
"Configuration": 5,
"Identity": 6,
}
for name, number in categories.items():
category_re = re.compile(r'RA\d{1}' + str(number) + '.*$')
if category_re.match(ra_id):
return name
return "N/A"
@staticmethod
def normalize_rs_name(rs_name):
"""Revieve a Response Stage name, i.e. reparation, lessons_learned, etc
Return normalized RS name
"""
stages = {
"preparation": "Preparation",
"identification": "Identification",
"containment": "Containment",
"eradication": "Eradication",
"recovery": "Recovery",
"lessons_learned": "Lessons Learned"
}
for stage_name, normal_stage_name in stages.items():
if rs_name == stage_name:
return normal_stage_name
return "N/A"
| 32.044983 | 115 | 0.549509 |
acf1b2ec52710d3e7a6a0950b9e8fb7a8a53b92b | 2,358 | py | Python | 20200416_Socialmail/Data/socket/server.py | karta1782310/python-docx-automated-report-generation | f0e02a50a9e9547d131e583be0711aad72f08b51 | [
"MIT"
] | null | null | null | 20200416_Socialmail/Data/socket/server.py | karta1782310/python-docx-automated-report-generation | f0e02a50a9e9547d131e583be0711aad72f08b51 | [
"MIT"
] | null | null | null | 20200416_Socialmail/Data/socket/server.py | karta1782310/python-docx-automated-report-generation | f0e02a50a9e9547d131e583be0711aad72f08b51 | [
"MIT"
] | null | null | null | # coding: utf-8
import os
import sys
import socket
import threading
import buffer
from time import sleep
from scheduler import scheduler
class Receiver :
def __init__(self, host='0.0.0.0', port=6666):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(5)
except socket.error as msg:
print(msg)
sys.exit(1)
print('Server is ready...')
print('Waiting connection...')
while True:
conn, addr = s.accept()
connbuf = buffer.Buffer(conn)
recv_thread = threading.Thread(target = self.deal_data, args=(connbuf, addr))
recv_thread.start()
def deal_data(self, connbuf, addr):
print()
print("Got a connection from ", addr)
absolute_path = '/var/www/socialmails/schedule_server/'
connbuf.put_utf8('Hi, Welcome to the server!')
eml_type = connbuf.get_utf8()
eml_name = absolute_path+'eml/'+connbuf.get_utf8()
user_group = connbuf.get_utf8()
mail_excel = absolute_path+'excel/'+connbuf.get_utf8()
annex = absolute_path+'annex/'+connbuf.get_utf8()
datetime = connbuf.get_utf8()
absolute_path = '/var/www/socialmails/schedule_server/'
for file_name in [eml_name, mail_excel, annex]:
file_size = int(connbuf.get_utf8())
print('file size: ', file_size )
with open(file_name, 'wb') as f:
remaining = file_size
while remaining:
chunk_size = 4096 if remaining >= 4096 else remaining
chunk = connbuf.get_bytes(chunk_size)
if not chunk: break
f.write(chunk)
remaining -= len(chunk)
if remaining:
print(file_name,' incomplete. Missing',remaining,'bytes.')
else:
print(file_name,' received successfully.')
# print('All data ({0}, {1}, {2})'.format(eml_type, user_group, datetime))
print()
scheduler(datetime, [eml_type, eml_name, user_group, mail_excel, annex])
if __name__ == "__main__":
receiver = Receiver()
| 32.75 | 89 | 0.568278 |
acf1b30044287b953f3de350a51cb43289b476d0 | 2,035 | py | Python | app/iclass/views/baseuser_view.py | edisonlz/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 285 | 2019-12-23T09:50:21.000Z | 2021-12-08T09:08:49.000Z | app/iclass/views/baseuser_view.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | null | null | null | app/iclass/views/baseuser_view.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 9 | 2019-12-23T12:59:25.000Z | 2022-03-15T05:12:11.000Z | # coding=utf-8
import json
from django.contrib import messages
from django.shortcuts import render, get_object_or_404
from wi_model_util.imodel import get_object_or_none
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from app.iclass.utils import redefine_item_pos
from app.iclass.utils.common import get_paged_dict
from app.iclass.models import *
@login_required
def baseuser_list(request):
qd = request.GET
datas = BaseUser.objects.filter().order_by("-pk")
search_key = qd.get('search_key', '')
if search_key:
datas = datas.filter(title__contains=search_key)
context = {}
context.update(get_paged_dict(datas, request.GET.get('page'), 20, 'datas'))
return render(request, 'baseuser/list.html', context)
@login_required
def baseuser_new(request):
if request.method == 'POST':
qd = request.POST
_id = qd.get("itemid", "")
if _id:
item = BaseUser.objects.get(pk=_id)
else:
item = BaseUser()
item.user_id = qd.get("user_id", '')
item.username = qd.get("username", '')
item.nickname = qd.get("nickname", '')
item.password = qd.get("password", '')
item.image_url = qd.get("image_url", '')
item.sex = qd.get("sex", '')
item.email = qd.get("email", '')
item.status = qd.get("status", '')
item.register_from = qd.get("register_from", '')
item.last_login_time = qd.get("last_login_time", '')
item.create_time = qd.get("create_time", '')
item.save()
return HttpResponseRedirect(reverse("baseuser_list"))
else:
qd = request.GET
_id = qd.get("itemid", "")
if _id:
item = BaseUser.objects.get(pk=_id)
else:
item = BaseUser()
context = {
"item": item,
}
return render(request, 'baseuser/new.html', context)
| 32.301587 | 79 | 0.623587 |
acf1b340af73969ef23d643004bf362a6c3bdea6 | 2,461 | py | Python | lineid_plot/utils.py | phn/lineid_plot | 7c7a1af53fe439b3a7c5a57f01680575837fb978 | [
"BSD-2-Clause"
] | 18 | 2015-12-20T19:47:51.000Z | 2021-07-25T06:26:59.000Z | lineid_plot/utils.py | phn/lineid_plot | 7c7a1af53fe439b3a7c5a57f01680575837fb978 | [
"BSD-2-Clause"
] | 9 | 2015-11-26T13:52:51.000Z | 2020-01-29T06:19:21.000Z | lineid_plot/utils.py | phn/lineid_plot | 7c7a1af53fe439b3a7c5a57f01680575837fb978 | [
"BSD-2-Clause"
] | 9 | 2015-02-12T17:00:56.000Z | 2021-07-08T14:19:33.000Z | """Some utility functions."""
import matplotlib as mpl
import lineid_plot
from lineid_plot import unique_labels
def get_labels(labels):
"""Create unique labels."""
label_u = unique_labels(labels)
label_u_line = [i + "_line" for i in label_u]
return label_u, label_u_line
def get_boxes_and_lines(ax, labels):
"""Get boxes and lines using labels as id."""
labels_u, labels_u_line = get_labels(labels)
boxes = ax.findobj(mpl.text.Annotation)
lines = ax.findobj(mpl.lines.Line2D)
lineid_boxes = []
lineid_lines = []
for box in boxes:
l = box.get_label()
try:
loc = labels_u.index(l)
except ValueError:
# this box is either one not added by lineidplot or has no label.
continue
lineid_boxes.append(box)
for line in lines:
l = line.get_label()
try:
loc = labels_u_line.index(l)
except ValueError:
# this line is either one not added by lineidplot or has no label.
continue
lineid_lines.append(line)
return lineid_boxes, lineid_lines
def color_text_boxes(ax, labels, colors, color_arrow=True):
"""Color text boxes.
Instead of this function, one can pass annotate_kwargs and plot_kwargs to
plot_line_ids function.
"""
assert len(labels) == len(colors), \
"Equal no. of colors and lables must be given"
boxes = ax.findobj(mpl.text.Annotation)
box_labels = lineid_plot.unique_labels(labels)
for box in boxes:
l = box.get_label()
try:
loc = box_labels.index(l)
except ValueError:
continue # No changes for this box
box.set_color(colors[loc])
if color_arrow:
box.arrow_patch.set_color(colors[loc])
ax.figure.canvas.draw()
def color_lines(ax, labels, colors):
"""Color lines.
Instead of this function, one can pass annotate_kwargs and plot_kwargs to
plot_line_ids function.
"""
assert len(labels) == len(colors), \
"Equal no. of colors and lables must be given"
lines = ax.findobj(mpl.lines.Line2D)
line_labels = [i + "_line" for i in lineid_plot.unique_labels(labels)]
for line in lines:
l = line.get_label()
try:
loc = line_labels.index(l)
except ValueError:
continue # No changes for this line
line.set_color(colors[loc])
ax.figure.canvas.draw()
| 28.952941 | 78 | 0.633482 |
acf1b3cf3b88aa573ccfb1227a4622aaf5356975 | 4,893 | py | Python | shinrl/envs/mountaincar/calc.py | omron-sinicx/ShinRL | 09f4ae274a33d1fc1d9d542f816aef40014af6b5 | [
"MIT"
] | 34 | 2021-12-09T07:12:57.000Z | 2022-03-11T08:17:20.000Z | shinrl/envs/mountaincar/calc.py | omron-sinicx/ShinRL | 09f4ae274a33d1fc1d9d542f816aef40014af6b5 | [
"MIT"
] | null | null | null | shinrl/envs/mountaincar/calc.py | omron-sinicx/ShinRL | 09f4ae274a33d1fc1d9d542f816aef40014af6b5 | [
"MIT"
] | 4 | 2021-12-11T07:48:01.000Z | 2022-03-01T23:50:33.000Z | """
Author: Toshinori Kitamura
Affiliation: NAIST & OSX
"""
from typing import Tuple
import chex
import jax
import jax.numpy as jnp
from chex import Array
import shinrl as srl
from .config import MountainCarConfig
@jax.jit
def to_discrete_act(config: MountainCarConfig, c_act: float) -> int:
"""Convert a continuous action to a discrete action.
Args:
config (MountainCarConfig)
c_act (float): Continuous action in range [-1, 1].
Returns:
A discretized action id.
"""
chex.assert_type(c_act, float)
dA = config.dA
c_act = jnp.clip(c_act, -1.0, 1.0)
c_step = 2 / dA
act = jnp.floor((c_act + 1.0) / c_step + 1e-5).astype(jnp.uint32)
return jnp.clip(act, 0, dA - 1)
@jax.jit
def to_continuous_act(config: MountainCarConfig, act: int) -> float:
"""Convert a discrete action to a continuous action.
Args:
config (MountainCarConfig)
act (int): Discrete action in [0, ..., dA-1].
Returns:
A continuous action in range [-1.0, 1.0]
"""
chex.assert_type(act, int)
dA = config.dA
c_step = 2 / dA
c_act = act * c_step - 1.0
return jnp.clip(c_act, -1.0, 1.0)
@jax.jit
def state_to_pos_vel(config: MountainCarConfig, state: int) -> Tuple[float, float]:
"""Convert a state id to position and velocity.
Args:
config (MountainCarConfig)
state (int)
Returns:
position and velocity
"""
pos_res, vel_res = config.pos_res, config.vel_res
pos_max, vel_max = config.pos_max, config.vel_max
pos_min, vel_min = config.pos_min, config.vel_min
pos_idx = state % pos_res
vel_idx = state // vel_res
pos = pos_min + (pos_max - pos_min) / (pos_res - 1) * pos_idx
pos = jnp.clip(pos, pos_min, pos_max)
vel = vel_min + (vel_max - vel_min) / (vel_res - 1) * vel_idx
vel = jnp.clip(vel, vel_min, vel_max)
return pos, vel
@jax.jit
def pos_vel_to_state(config: MountainCarConfig, pos: float, vel: float) -> float:
"""Convert position and velocity to state id
Args:
config (MountainCarConfig)
pos (float): pos value
vel (float): velocity value
Returns:
state id (int)
"""
pos_res, vel_res = config.pos_res, config.vel_res
pos_max, vel_max = config.pos_max, config.vel_max
pos_min, vel_min = config.pos_min, config.vel_min
pos_step = (pos_max - pos_min) / (pos_res - 1)
vel_step = (vel_max - vel_min) / (vel_res - 1)
pos_idx = jnp.floor((pos - pos_min) / pos_step + 1e-5)
vel_idx = jnp.floor((vel - vel_min) / vel_step + 1e-5)
state = (pos_idx + pos_res * vel_idx).astype(jnp.uint32)
return jnp.clip(state, 0, pos_res * vel_res - 1)
@jax.jit
def transition(
config: MountainCarConfig, state: int, action: int
) -> Tuple[Array, Array]:
chex.assert_type([state, action], int)
c_act = to_continuous_act(config, action)
force = jnp.squeeze(c_act) * config.force_mag
def body_fn(_, pos_vel):
pos, vel = pos_vel
vel = vel + force + jnp.cos(3 * pos) * (-0.0025)
vel = jnp.clip(vel, config.vel_min, config.vel_max)
pos = pos + vel
pos = jnp.clip(pos, config.pos_min, config.pos_max)
return (pos, vel)
pos, vel = state_to_pos_vel(config, state)
# one step is not enough when state is discretized
pos, vel = jax.lax.fori_loop(0, 8, body_fn, (pos, vel))
vel = jax.lax.cond(pos == config.pos_min, lambda _: 0.0, lambda _: vel, None)
next_state = pos_vel_to_state(config, pos, vel).reshape((1,))
prob = jnp.array((1.0,), dtype=float)
return next_state, prob
@jax.jit
def reward(config: MountainCarConfig, state: int, action: int) -> float:
pos, vel = state_to_pos_vel(config, state)
goal = pos >= config.goal_pos
return jax.lax.cond(goal, lambda _: 0.0, lambda _: -1.0, None)
@jax.jit
def observation_tuple(config: MountainCarConfig, state: int) -> Array:
"""Make the tuple observation."""
pos, vel = state_to_pos_vel(config, state)
return jnp.array([pos, vel], dtype=float)
@jax.jit
def observation_image(config: MountainCarConfig, state: int) -> Array:
"""Make the image observation."""
pos, vel = state_to_pos_vel(config, state)
image = jnp.zeros((28, 28), dtype=float)
pos2pxl = 28 / (config.pos_max - config.pos_min)
to_hight = lambda _x: jnp.sin(3 * _x) * 0.45 + 0.75
x = ((pos - config.pos_min) * pos2pxl).astype(jnp.uint32)
y = (to_hight(pos - config.pos_min) * pos2pxl).astype(jnp.uint32)
pos_circle = srl.draw_circle(image, x, y, 4)
image = image + pos_circle * 0.8
x = ((pos - vel * 5.0 - config.pos_min) * pos2pxl).astype(jnp.uint32)
y = (to_hight(pos - vel * 5.0 - config.pos_min) * pos2pxl).astype(jnp.uint32)
vel_circle = srl.draw_circle(image, x, y, 4)
image = image + vel_circle * 0.2
return jnp.expand_dims(image, axis=-1) # 28x28x1
| 31.365385 | 83 | 0.645207 |
acf1b474fe8733e97c558186b8cb3eff038a001b | 622 | py | Python | fme_server_lib/FMEAPI/CallAPIPost.py | bcgov/dss-fme | 0564363c59ccd06fe79459ec08ff3b7c5bf7e061 | [
"Apache-2.0"
] | null | null | null | fme_server_lib/FMEAPI/CallAPIPost.py | bcgov/dss-fme | 0564363c59ccd06fe79459ec08ff3b7c5bf7e061 | [
"Apache-2.0"
] | 2 | 2021-07-29T07:19:49.000Z | 2021-07-29T07:21:46.000Z | fme_server_lib/FMEAPI/CallAPIPost.py | bcgov/fme | 0564363c59ccd06fe79459ec08ff3b7c5bf7e061 | [
"Apache-2.0"
] | null | null | null | import requests
from FMEAPI.CallAPI import CallAPI
class CallAPIPOST(CallAPI):
def __init__(self, server, token):
super().__init__(server, token)
self.http_method = "POST"
def execute_api(self, url, headers, body):
response = requests.post(url=url, data=body, headers=headers)
return response
def call_api_upload(self, method, files, url_params=None, return_codes=None, headers=None):
url = self.populate_url(method, url_params)
response = requests.post(url=url, headers=headers, files=files)
return self.check_response(response, url, return_codes)
| 32.736842 | 95 | 0.700965 |
acf1b5e199b3091192a00c3d0ed02c4fe3693231 | 1,852 | py | Python | DataScience-Practice/code.py | singhpratapaayush07/ga-learner-dsb-repo | ce5a518c2bb806076410e46f8b4d248e70c92e58 | [
"MIT"
] | null | null | null | DataScience-Practice/code.py | singhpratapaayush07/ga-learner-dsb-repo | ce5a518c2bb806076410e46f8b4d248e70c92e58 | [
"MIT"
] | null | null | null | DataScience-Practice/code.py | singhpratapaayush07/ga-learner-dsb-repo | ce5a518c2bb806076410e46f8b4d248e70c92e58 | [
"MIT"
] | null | null | null | # --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
data=np.genfromtxt(path,delimiter=",", skip_header=1)
#Code starts here
census=np.concatenate((data,new_record),axis=0)
# --------------
#Code starts here
age=census[:,0]
max_age=np.max(age)
min_age=np.min(age)
age_mean=np.mean(age)
age_std=np.std(age)
#print(age,max_age,min_age,age_mean,age_std, sep='\n')
# --------------
#Code starts here
race=census[:,2]
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
minority=min(len_0,len_1,len_2,len_3,len_4)
minority_race=10
if(len_0==minority):
minority_race=0
elif(len_1==minority):
minority_race=1
elif(len_2==minority):
minority_race=2
elif(len_3==minority):
minority_race=3
elif(len_4==minority):
minority_race=4
#print(minority_race,len_0,len_1,len_2,len_3,len_4,sep='\n')
# --------------
#Code starts here
#Subsetting the array based on the age
senior_citizens=census[census[:,0]>60]
#Calculating the sum of all the values of array
working_hours_sum=senior_citizens.sum(axis=0)[6]
#Finding the length of the array
senior_citizens_len=len(senior_citizens)
#Finding the average working hours
avg_working_hours=working_hours_sum/senior_citizens_len
#Printing the average working hours
print((avg_working_hours))
#Code ends here
# --------------
#Code starts here
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
avg_pay_high=(np.mean(high,axis=0)[7])
avg_pay_low=(low.sum(axis=0)[7])/len(low)
| 19.092784 | 61 | 0.673326 |
acf1b60d17d0d57fd916dc7de819d3ce134e3690 | 13,118 | py | Python | beet/core/file.py | mcbeet/beet | e79e3d37ecdbd9b411ba9a912a3e5b8c6892da0d | [
"MIT"
] | 46 | 2021-03-09T23:34:52.000Z | 2022-03-08T01:30:04.000Z | beet/core/file.py | mcbeet/beet | e79e3d37ecdbd9b411ba9a912a3e5b8c6892da0d | [
"MIT"
] | 127 | 2021-02-24T00:41:44.000Z | 2022-03-31T05:14:31.000Z | beet/core/file.py | mcbeet/beet | e79e3d37ecdbd9b411ba9a912a3e5b8c6892da0d | [
"MIT"
] | 9 | 2021-03-11T18:18:28.000Z | 2022-03-11T20:32:01.000Z | __all__ = [
"File",
"FileOrigin",
"FileSerialize",
"FileDeserialize",
"TextFileBase",
"TextFileContent",
"TextFile",
"BinaryFileBase",
"BinaryFileContent",
"BinaryFile",
"JsonFileBase",
"JsonFile",
"YamlFileBase",
"YamlFile",
"PngFile",
]
import io
import json
import shutil
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Callable, ClassVar, Generic, Optional, Type, TypeVar, Union
from zipfile import ZipFile
import yaml
from pydantic import BaseModel
try:
from PIL.Image import Image
from PIL.Image import new as new_image
from PIL.Image import open as open_image
except ImportError:
Image = Any
def new_image(*args: Any, **kwargs: Any) -> Any:
raise RuntimeError("Please install Pillow to create images programmatically")
def open_image(*args: Any, **kwargs: Any) -> Any:
raise RuntimeError("Please install Pillow to edit images programmatically")
from .utils import FileSystemPath, JsonDict, dump_json, extra_field
ValueType = TypeVar("ValueType", bound=Any)
SerializeType = TypeVar("SerializeType", bound=Any)
FileType = TypeVar("FileType", bound="File[Any, Any]")
FileOrigin = Union[FileSystemPath, ZipFile]
TextFileContent = Union[ValueType, str, None]
BinaryFileContent = Union[ValueType, bytes, None]
@dataclass(eq=False)
class File(Generic[ValueType, SerializeType]):
"""Base file class."""
content: Union[ValueType, SerializeType, None] = None
source_path: Optional[FileSystemPath] = None
on_bind: Optional[Callable[[Any, Any, str], Any]] = extra_field(default=None)
serializer: Callable[[ValueType], SerializeType] = extra_field(init=False)
deserializer: Callable[[SerializeType], ValueType] = extra_field(init=False)
def __post_init__(self):
if self.content is self.source_path is None:
self.content = self.default()
def merge(self: FileType, other: FileType) -> bool:
"""Merge the given file or return False to indicate no special handling."""
return False
def bind(self, pack: Any, path: str) -> Any:
"""Handle file binding."""
if self.on_bind:
self.on_bind(self, pack, path)
def set_content(self, content: Union[ValueType, SerializeType]):
"""Update the internal content."""
self.content = content
self.source_path = None
def get_content(self) -> Union[ValueType, SerializeType]:
"""Return the internal content."""
return (
self.decode(Path(self.ensure_source_path()).read_bytes())
if self.content is None
else self.content
)
def ensure_source_path(self) -> FileSystemPath:
"""Make sure that the file has a source path and return it."""
if self.source_path:
return self.source_path
raise ValueError(
f"Expected {self.__class__.__name__} object to be initialized with "
"a source path."
)
def ensure_serialized(
self,
serializer: Optional[Callable[[ValueType], SerializeType]] = None,
) -> SerializeType:
"""Make sure that the content of the file is serialized."""
backup = self.serializer
if serializer:
self.serializer = serializer
try:
content = self.serialize(self.get_content())
finally:
self.serializer = backup
self.set_content(content)
return content # type: ignore
def ensure_deserialized(
self,
deserializer: Optional[Callable[[SerializeType], ValueType]] = None,
) -> ValueType:
"""Make sure that the content of the file is deserialized."""
backup = self.deserializer
if deserializer:
self.deserializer = deserializer
try:
content = self.deserialize(self.get_content())
finally:
self.deserializer = backup
self.set_content(content)
return content # type: ignore
def __eq__(self, other: Any) -> bool:
if type(self) != type(other):
return NotImplemented
return (
(self.source_path is not None and self.source_path == other.source_path)
or self.ensure_serialized() == other.ensure_serialized()
or self.ensure_deserialized() == other.ensure_deserialized()
)
def __hash__(self) -> int:
return id(self)
@classmethod
def default(cls) -> ValueType:
"""Return the file's default value."""
raise ValueError(
f"{cls.__name__} object must be initialized with "
"either a value, serialized data, or a source path."
)
def serialize(self, content: Union[ValueType, SerializeType]) -> SerializeType:
"""Serialize file content."""
raise NotImplementedError()
def deserialize(self, content: Union[ValueType, SerializeType]) -> ValueType:
"""Deserialize file content."""
raise NotImplementedError()
@classmethod
def decode(cls, raw: bytes) -> SerializeType:
"""Convert bytes to serialized representation."""
raise NotImplementedError()
@classmethod
def encode(cls, raw: SerializeType) -> bytes:
"""Convert serialized representation to bytes."""
raise NotImplementedError()
@classmethod
def load(cls: Type[FileType], origin: FileOrigin, path: FileSystemPath) -> FileType:
"""Load a file from a zipfile or from the filesystem."""
instance = cls.try_load(origin, path)
if instance is None:
raise FileNotFoundError(path)
return instance
@classmethod
def try_load(
cls: Type[FileType], origin: FileOrigin, path: FileSystemPath
) -> Optional[FileType]:
"""Try to load a file from a zipfile or from the filesystem."""
if isinstance(origin, ZipFile):
try:
return cls(cls.decode(origin.read(str(path))))
except KeyError:
return None
path = Path(origin, path)
return cls(source_path=path) if path.is_file() else None
def dump(self, origin: FileOrigin, path: FileSystemPath):
"""Write the file to a zipfile or to the filesystem."""
if self.content is None:
if isinstance(origin, ZipFile):
origin.write(self.ensure_source_path(), str(path))
else:
shutil.copyfile(self.ensure_source_path(), str(Path(origin, path)))
else:
raw = self.encode(self.ensure_serialized())
if isinstance(origin, ZipFile):
origin.writestr(str(path), raw)
else:
Path(origin, path).write_bytes(raw)
class FileSerialize(Generic[SerializeType]):
"""Descriptor that makes sure that content of the file is serialized."""
def __get__(
self,
obj: File[Any, SerializeType],
objtype: Optional[Type[Any]] = None,
) -> SerializeType:
return obj.ensure_serialized()
def __set__(self, obj: File[Any, SerializeType], value: SerializeType):
obj.set_content(value)
class FileDeserialize(Generic[ValueType]):
"""Descriptor that makes sure that content of the file is deserialized."""
def __get__(
self,
obj: File[ValueType, Any],
objtype: Optional[Type[Any]] = None,
) -> ValueType:
return obj.ensure_deserialized()
def __set__(self, obj: File[ValueType, Any], value: ValueType):
obj.set_content(value)
class TextFileBase(File[ValueType, str]):
"""Base class for files that get serialized to strings."""
text: FileSerialize[str] = FileSerialize()
def __post_init__(self):
super().__post_init__()
self.serializer = self.to_str
self.deserializer = self.from_str
def serialize(self, content: Union[ValueType, str]) -> str:
return content if isinstance(content, str) else self.serializer(content)
def deserialize(self, content: Union[ValueType, str]) -> ValueType:
return self.deserializer(content) if isinstance(content, str) else content
@classmethod
def decode(cls, raw: bytes) -> str:
return raw.decode()
@classmethod
def encode(cls, raw: str) -> bytes:
return raw.encode()
@classmethod
def to_str(cls, content: ValueType) -> str:
"""Convert content to string."""
raise NotImplementedError()
@classmethod
def from_str(cls, content: str) -> ValueType:
"""Convert string to content."""
raise NotImplementedError()
class TextFile(TextFileBase[str]):
"""Class representing a text file."""
@classmethod
def to_str(cls, content: str) -> str:
return content
@classmethod
def from_str(cls, content: str) -> str:
return content
@classmethod
def default(cls) -> str:
return ""
class BinaryFileBase(File[ValueType, bytes]):
"""Base class for files that get serialized to bytes."""
blob: FileSerialize[bytes] = FileSerialize()
def __post_init__(self):
super().__post_init__()
self.serializer = self.to_bytes
self.deserializer = self.from_bytes
def serialize(self, content: Union[ValueType, bytes]) -> bytes:
return content if isinstance(content, bytes) else self.serializer(content)
def deserialize(self, content: Union[ValueType, bytes]) -> ValueType:
return self.deserializer(content) if isinstance(content, bytes) else content
@classmethod
def decode(cls, raw: bytes) -> bytes:
return raw
@classmethod
def encode(cls, raw: bytes) -> bytes:
return raw
@classmethod
def to_bytes(cls, content: ValueType) -> bytes:
"""Convert content to bytes."""
raise NotImplementedError()
@classmethod
def from_bytes(cls, content: bytes) -> ValueType:
"""Convert bytes to content."""
raise NotImplementedError()
class BinaryFile(BinaryFileBase[bytes]):
"""Class representing a binary file."""
@classmethod
def to_bytes(cls, content: bytes) -> bytes:
return content
@classmethod
def from_bytes(cls, content: bytes) -> bytes:
return content
@classmethod
def default(cls) -> bytes:
return b""
class JsonFileBase(TextFileBase[ValueType]):
"""Base class for json files."""
data: FileDeserialize[ValueType] = FileDeserialize()
model: ClassVar[Optional[Type[Any]]] = None
@classmethod
def to_str(cls, content: ValueType) -> str:
return dump_json(
content.dict()
if (
cls.model
and issubclass(cls.model, BaseModel)
and isinstance(content, cls.model)
)
else content
)
@classmethod
def from_str(cls, content: str) -> ValueType:
value = json.loads(content)
if cls.model and issubclass(cls.model, BaseModel):
value = cls.model(**value)
return value # type: ignore
@classmethod
def default(cls) -> ValueType:
return cls.model() if cls.model and issubclass(cls.model, BaseModel) else {} # type: ignore
class JsonFile(JsonFileBase[JsonDict]):
"""Class representing a json file."""
data: FileDeserialize[JsonDict] = FileDeserialize()
@classmethod
def default(cls) -> JsonDict:
return {}
class YamlFileBase(TextFileBase[ValueType]):
"""Base class for yaml files."""
data: FileDeserialize[ValueType] = FileDeserialize()
model: ClassVar[Optional[Type[Any]]] = None
@classmethod
def to_str(cls, content: ValueType) -> str:
return yaml.dump( # type: ignore
content.dict()
if (
cls.model
and issubclass(cls.model, BaseModel)
and isinstance(content, cls.model)
)
else content
)
@classmethod
def from_str(cls, content: str) -> ValueType:
value = yaml.safe_load(content)
if cls.model and issubclass(cls.model, BaseModel):
value = cls.model(**value)
return value # type: ignore
@classmethod
def default(cls) -> ValueType:
return cls.model() if cls.model and issubclass(cls.model, BaseModel) else {} # type: ignore
class YamlFile(YamlFileBase[JsonDict]):
"""Class representing a yaml file."""
data: FileDeserialize[JsonDict] = FileDeserialize()
@classmethod
def default(cls) -> JsonDict:
return {}
class PngFile(BinaryFileBase[Image]):
"""Class representing a png file."""
image: FileDeserialize[Image] = FileDeserialize()
@classmethod
def to_bytes(cls, content: Image) -> bytes:
dst = io.BytesIO()
content.save(dst, format="png")
return dst.getvalue()
@classmethod
def from_bytes(cls, content: bytes) -> Image:
return open_image(io.BytesIO(content))
@classmethod
def default(cls) -> Image:
return new_image("RGB", (16, 16), "black")
| 29.746032 | 100 | 0.631194 |
acf1b6a44fd86e2b9a36c4996da4613242d215d3 | 2,107 | py | Python | teuthology/suite/test/test_placeholder.py | julpark-rh/teuthology | 7cebb3f2319fd6c8340c0f7cd15a137e747fd32e | [
"MIT"
] | 117 | 2015-03-24T17:30:44.000Z | 2022-03-27T13:29:55.000Z | teuthology/suite/test/test_placeholder.py | julpark-rh/teuthology | 7cebb3f2319fd6c8340c0f7cd15a137e747fd32e | [
"MIT"
] | 1,014 | 2015-01-05T21:33:17.000Z | 2022-03-31T13:10:09.000Z | teuthology/suite/test/test_placeholder.py | julpark-rh/teuthology | 7cebb3f2319fd6c8340c0f7cd15a137e747fd32e | [
"MIT"
] | 237 | 2015-01-04T03:37:42.000Z | 2022-03-31T16:53:19.000Z | from teuthology.suite.placeholder import (
substitute_placeholders, dict_templ, Placeholder
)
class TestPlaceholder(object):
def test_substitute_placeholders(self):
suite_hash = 'suite_hash'
input_dict = dict(
suite='suite',
suite_branch='suite_branch',
suite_hash=suite_hash,
ceph_branch='ceph_branch',
ceph_hash='ceph_hash',
teuthology_branch='teuthology_branch',
teuthology_sha1='teuthology_sha1',
machine_type='machine_type',
distro='distro',
distro_version='distro_version',
archive_upload='archive_upload',
archive_upload_key='archive_upload_key',
suite_repo='https://example.com/ceph/suite.git',
suite_relpath='',
ceph_repo='https://example.com/ceph/ceph.git',
flavor='default'
)
output_dict = substitute_placeholders(dict_templ, input_dict)
assert output_dict['suite'] == 'suite'
assert output_dict['suite_sha1'] == suite_hash
assert isinstance(dict_templ['suite'], Placeholder)
assert isinstance(
dict_templ['overrides']['admin_socket']['branch'],
Placeholder)
def test_null_placeholders_dropped(self):
input_dict = dict(
suite='suite',
suite_branch='suite_branch',
suite_hash='suite_hash',
ceph_branch='ceph_branch',
ceph_hash='ceph_hash',
teuthology_branch='teuthology_branch',
teuthology_sha1='teuthology_sha1',
machine_type='machine_type',
archive_upload='archive_upload',
archive_upload_key='archive_upload_key',
distro=None,
distro_version=None,
suite_repo='https://example.com/ceph/suite.git',
suite_relpath='',
ceph_repo='https://example.com/ceph/ceph.git',
flavor=None,
)
output_dict = substitute_placeholders(dict_templ, input_dict)
assert 'os_type' not in output_dict
| 37.625 | 69 | 0.610346 |
acf1b7b84e3ae8a948b05e7aef0bbcb32b243a8c | 10,791 | py | Python | tests/test_session.py | kinzhong/kestrel-lang | 060d5fdd9ff251e3bbbd1609f233a54c96202897 | [
"Apache-2.0"
] | 119 | 2021-06-04T15:40:10.000Z | 2022-03-24T09:56:53.000Z | tests/test_session.py | kinzhong/kestrel-lang | 060d5fdd9ff251e3bbbd1609f233a54c96202897 | [
"Apache-2.0"
] | 76 | 2021-06-04T15:06:10.000Z | 2022-03-20T21:03:13.000Z | tests/test_session.py | kinzhong/kestrel-lang | 060d5fdd9ff251e3bbbd1609f233a54c96202897 | [
"Apache-2.0"
] | 28 | 2021-06-05T07:27:15.000Z | 2022-01-20T18:43:47.000Z | import json
import logging
import os
import pytest
import pathlib
import shutil
import tempfile
import pandas as pd
from kestrel.session import Session
def get_df(session, var_name):
return pd.DataFrame.from_records(session.get_variable(var_name))
def execute(session, script):
result = session.execute(script)
if isinstance(result, str):
assert not result.startswith("[ERROR]")
@pytest.fixture
def fake_bundle_file():
cwd = os.path.dirname(os.path.abspath(__file__))
return os.path.join(cwd, "test_bundle.json")
@pytest.fixture
def fake_bundle_2():
cwd = os.path.dirname(os.path.abspath(__file__))
return os.path.join(cwd, "test_bundle_2.json")
@pytest.fixture
def fake_bundle_3():
cwd = os.path.dirname(os.path.abspath(__file__))
return os.path.join(cwd, "test_bundle_3.json")
@pytest.fixture
def cbcloud_powershell_bundle():
cwd = os.path.dirname(os.path.abspath(__file__))
return os.path.join(cwd, "powershell_search_stix_result.json")
def test_session_1(fake_bundle_file):
with Session(debug_mode=True) as session:
execute(
session,
f"""conns = get network-traffic
from file://{fake_bundle_file}
where [network-traffic:dst_port < 10000]""",
)
conns = get_df(session, "conns")
assert len(conns.index) == 100
execute(session, "sort conns by network-traffic:dst_port asc")
s = get_df(session, "_")
assert len(s.index) == 100
assert s.iloc[0]["dst_port"] == 22
execute(session, "group conns by network-traffic:dst_port")
s = get_df(session, "_")
assert len(s.index) == 5
port_3128 = s[(s["dst_port"] == 3128)]
assert len(port_3128.index) == 1
assert port_3128.iloc[0]["number_observed"] == 14
conns_sym = session.symtable["conns"]
conns_dict = dict(conns_sym)
assert conns_dict["type"] == conns_sym.type
assert conns_dict["entity_table"] == conns_sym.entity_table
def test_session_timeframe(fake_bundle_file):
with Session(debug_mode=True) as session:
session = Session()
script = f"""conns = get network-traffic
from file://{fake_bundle_file}
where [network-traffic:dst_port = 22] START t'2020-06-30T19:25:00.000Z' STOP t'2020-06-30T19:26:00.000Z'"""
execute(session, script)
conns = get_df(session, "conns")
assert len(conns.index) == 7
@pytest.mark.parametrize(
"sco_type, prop, op, value, count",
[
("ipv4-addr", "value", "=", "'192.168.121.121'", 1),
("network-traffic", "src_ref.value", "=", "'192.168.121.121'", 1),
("network-traffic", "dst_port", "=", 22, 29),
("user-account", "account_login", "=", "'henry'", 2),
("user-account", "account_login", "LIKE", "'hen%'", 2),
("user-account", "account_login", "=", "'zane'", 0),
],
)
def test_session_simple(fake_bundle_file, sco_type, prop, op, value, count):
with Session(debug_mode=True) as session:
script = f"""result = get {sco_type} from file://{fake_bundle_file} where [{sco_type}:{prop} {op} {value}]"""
execute(session, script)
result = get_df(session, "result")
assert len(result.index) == count
@pytest.mark.parametrize(
"sco_type, pattern, count",
[
(
"network-traffic",
"[network-traffic:dst_ref.value = '10.0.0.91' AND network-traffic:dst_port = 22]",
3,
),
(
"network-traffic",
"[network-traffic:dst_ref.value = '10.0.0.91' OR network-traffic:dst_port = 22]",
35,
),
],
)
def test_session_complex(fake_bundle_file, sco_type, pattern, count):
with Session(debug_mode=True) as session:
script = f"""result = get {sco_type} from file://{fake_bundle_file} where {pattern}"""
execute(session, script)
result = get_df(session, "result")
assert len(result.index) == count
def test_generated_pattern(fake_bundle_file, fake_bundle_2):
with Session(debug_mode=True) as session:
script = f"""conns_a = get network-traffic
from file://{fake_bundle_file}
where [network-traffic:dst_ref.value = '10.0.0.134']"""
execute(session, script)
conns_a = get_df(session, "conns_a")
script = f"""conns_b = get network-traffic
from file://{fake_bundle_2}
where [network-traffic:dst_port = conns_a.dst_port]"""
execute(session, script)
conns_b = get_df(session, "conns_b")
conns_b.to_csv("conns_b.csv")
# time range failed it
assert len(conns_b.index) == 0
assert os.path.exists("conns_b.csv")
os.remove("conns_b.csv")
def test_generated_pattern_match(fake_bundle_file, fake_bundle_3):
with Session(debug_mode=True) as session:
script = f"""conns_a = get network-traffic
from file://{fake_bundle_file}
where [network-traffic:dst_ref.value = '10.0.0.134']"""
execute(session, script)
conns_a = get_df(session, "conns_a")
script = f"""conns_b = get network-traffic
from file://{fake_bundle_3}
where [network-traffic:dst_port = conns_a.dst_port]"""
execute(session, script)
conns_b = get_df(session, "conns_b")
conns_b.to_csv("conns_b.csv")
# time range not tested since it is only generated for udi data sources
assert len(conns_b.index) == 3
# assert len(conns_b.index) == 2 # 2/3 matches due to time range
assert os.path.exists("conns_b.csv")
os.remove("conns_b.csv")
def test_disp_column_order(fake_bundle_file, caplog):
caplog.set_level(logging.DEBUG)
with Session(debug_mode=True) as session:
execute(
session,
f"""conns = get network-traffic
from file://{fake_bundle_file}
where [network-traffic:dst_port < 10000]""",
)
# SCO type in attr names should be optional
recs = session.execute(f"disp conns attr network-traffic:src_port, dst_port")[0]
conns = recs.dataframe
print(conns.head())
cols = conns.columns.to_list()
assert cols.index("src_port") < cols.index("dst_port")
with pytest.raises(Exception):
session.execute(
f"disp conns attr process:src_port, dst_port"
) # Wrong SCO type
def test_get_set_variable(fake_bundle_file):
with Session() as session:
# Create a normal var
script = f"x = get ipv4-addr from file://{fake_bundle_file} where [ipv4-addr:value = '192.168.121.121']"
execute(session, script)
var_list = session.get_variable_names()
assert "x" in var_list
var_x = session.get_variable("x")
assert len(var_x) == 1
val = var_x[0]
assert val["type"] == "ipv4-addr"
assert val["value"] == "192.168.121.121"
# Now create a new var using Session API
names = ["alice", "bob", "carol"]
session.create_variable("y", names, object_type="user-account")
var_list = session.get_variable_names()
assert "x" in var_list
assert "y" in var_list
var_y = session.get_variable("y")
assert len(var_y) == 3
print(var_y)
val = var_y[0]
assert val["type"] == "user-account"
# Maybe this should be 'account_login'?
assert (
val["user_id"] in names
) # Order is not preserved, so it could be any of these
def test_session_runtime_dir():
# standard session
with Session() as session:
assert os.path.exists(session.runtime_directory)
tmp_master = pathlib.Path(tempfile.gettempdir()) / "kestrel"
if tmp_master.exists():
d = pathlib.Path(session.runtime_directory).resolve()
d_master = tmp_master.resolve()
assert d != d_master
assert not os.path.exists(session.runtime_directory)
# debug session
with Session(debug_mode=True) as session:
assert os.path.exists(session.runtime_directory)
tmp_master = pathlib.Path(tempfile.gettempdir()) / "kestrel"
assert os.path.exists(session.runtime_directory)
if tmp_master.exists():
d = pathlib.Path(session.runtime_directory).resolve()
d_master = tmp_master.resolve()
assert d == d_master
# predefined runtime_dir session managed by session
d = pathlib.Path(tempfile.gettempdir()) / "kestrel-runtime-test"
d = d.resolve()
if os.path.exists(d):
shutil.rmtree(d)
with Session(runtime_dir=d) as session:
session = Session()
assert os.path.exists(d)
assert not os.path.exists(d)
# predefined runtime_dir session not managed by session
d = pathlib.Path(tempfile.gettempdir()) / "kestrel-runtime-test"
d = d.resolve()
pathlib.Path(d).mkdir(parents=True, exist_ok=True)
with Session(runtime_dir=d) as session:
assert os.path.exists(d)
assert os.path.exists(d)
@pytest.mark.parametrize(
"time_string, suffix_ts",
[
("START t'2021", ["-01-01T00:00:00Z'"]),
("START t'2021-05", ["-01T00:00:00Z'"]),
("START t'2021-05-04", ["T00:00:00Z'"]),
("START t'2021-05-04T07:", ["00:00Z'"]),
("START t'2021-05-04T07:30", [":00Z'"]),
("START t'2021-05-04T07:30:", ["00Z'"]),
("STOP t'2021", ["-01-01T00:00:00Z'"]),
("STOP t'2021-05", ["-01T00:00:00Z'"]),
("STOP t'2021-05-04", ["T00:00:00Z'"]),
("STOP t'2021-05-04T07:", ["00:00Z'"]),
("STOP t'2021-05-04T07:30", [":00Z'"]),
("STOP t'2021-05-04T07:30:", ["00Z'"]),
],
)
def test_session_do_complete_timestamp(fake_bundle_file, time_string, suffix_ts):
with Session(debug_mode=True) as session:
script = f"""{time_string}"""
result = session.do_complete(script, len(script))
assert result == suffix_ts
def test_session_debug_from_env():
os.environ["KESTREL_DEBUG"] = "something"
with Session() as session:
assert session.debug_mode == True
def test_sha256_attr_name(cbcloud_powershell_bundle):
# Make sure we can handle single quotes in attr names
with Session() as session:
script = (
"x = get process"
f" from file://{cbcloud_powershell_bundle}"
" where [process:name = 'powershell.exe']"
)
execute(session, script)
out = session.execute("DISP x ATTR binary_ref.hashes.'SHA-256'")
df = out[0].dataframe
assert (
df["binary_ref.hashes.'SHA-256'"][0]
== "de96a6e69944335375dc1ac238336066889d9ffc7d73628ef4fe1b1b160ab32c"
)
| 35.613861 | 128 | 0.617181 |
acf1b7bd49ff6ad3978b0779754d529b076dcd63 | 10,724 | py | Python | src/estimators/same_key_aggregator.py | PrivacyAmp/cardinality_estimation_evaluation_framework | c6f16733f821bba99c1e5ca827025a063f5689ae | [
"Apache-2.0"
] | 1 | 2021-02-03T07:54:36.000Z | 2021-02-03T07:54:36.000Z | src/estimators/same_key_aggregator.py | OpenMeasurement/cardinality_estimation_evaluation_framework | c6f16733f821bba99c1e5ca827025a063f5689ae | [
"Apache-2.0"
] | null | null | null | src/estimators/same_key_aggregator.py | OpenMeasurement/cardinality_estimation_evaluation_framework | c6f16733f821bba99c1e5ca827025a063f5689ae | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Same key aggregator for frequency estimation."""
import copy
import functools
import numpy as np
from wfa_cardinality_estimation_evaluation_framework.estimators import any_sketch
from wfa_cardinality_estimation_evaluation_framework.estimators.any_sketch import UniqueKeyFunction
from wfa_cardinality_estimation_evaluation_framework.estimators.base import EstimatorBase
from wfa_cardinality_estimation_evaluation_framework.estimators.base import SketchBase
from wfa_cardinality_estimation_evaluation_framework.estimators.bloom_filter_sketch_operators import SketchOperator
from wfa_cardinality_estimation_evaluation_framework.estimators.bloom_filters import ExponentialBloomFilter
from wfa_cardinality_estimation_evaluation_framework.estimators.bloom_filters import FirstMomentEstimator
from wfa_cardinality_estimation_evaluation_framework.estimators.estimator_noisers import GeometricEstimateNoiser
class ExponentialSameKeyAggregator(SketchBase):
"""Implement a Same Key Aggregator in Exponential bloom filter."""
@classmethod
def get_sketch_factory(cls, length, decay_rate):
def f(random_seed):
return cls(length, decay_rate, random_seed)
return f
def __init__(self, length, decay_rate, random_seed):
"""Creates an ExponentialSameKeyAggregator.
An ExponentialSameKeyAggregator includes three components:
1. An ExponentialBloomFilter for estimating the reach.
2. An AnySketch to track the unique key in each register.
3. Another AnySketch to track the frequency of each effective key.
Args:
length: The length of bit vector for the Exponential bloom filter.
decay_rate: The decay rate of Exponential distribution.
random_seed: An optional integer specifying the random seed for
generating the random seeds for hash functions.
"""
self.length = length
self.decay_rate = decay_rate
self.exponential_bloom_filter = ExponentialBloomFilter(
length=length, decay_rate=decay_rate, random_seed=random_seed)
self.unique_key_tracker = any_sketch.AnySketch(
any_sketch.SketchConfig([
any_sketch.IndexSpecification(
any_sketch.ExponentialDistribution(length, decay_rate), 'exp')
], num_hashes=1, value_functions=[any_sketch.UniqueKeyFunction()]),
random_seed)
self.frequency_count_tracker = any_sketch.AnySketch(
any_sketch.SketchConfig([
any_sketch.IndexSpecification(
any_sketch.ExponentialDistribution(length, decay_rate), 'exp')
], num_hashes=1, value_functions=[any_sketch.SumFunction()]),
random_seed)
def add(self, x):
self.exponential_bloom_filter.add(x)
self.frequency_count_tracker.add(x)
# The unique_key_sketch needs to be updated in a special way
indexes = self.unique_key_tracker.get_indexes(x)
unique_key = UniqueKeyFunction()
for index in indexes:
self.unique_key_tracker.sketch[index] = unique_key(
self.unique_key_tracker.sketch[index],
UniqueKeyFunction.get_value_from_id(x))
def assert_compatible(self, other):
""""Check if the two ExponentialSameKeyAggregator are comparable."""
assert isinstance(other, ExponentialSameKeyAggregator), (
'Other is not a ExponentialSameKeyAggregator.')
assert self.length == other.length, (
'The sketch lengths are different: '
f'{self.length} != {other.length}')
assert self.decay_rate == other.decay_rate, (
'The decay rates are different: '
f'{self.decay_rate} != {other.decay_rate}')
class StandardizedHistogramEstimator(EstimatorBase):
"""Frequency estimator from ExponentialSameKeyAggregator.
"""
def __init__(self,
max_freq=10,
noiser_class=GeometricEstimateNoiser,
epsilon=np.log(3),
epsilon_split=0.5):
"""Initiate a StandardizedHistogramEstimator.
Algorithm description:
Given any ExponentialSameKeyAggregator ska,
Step 1. Estimate the 1+ reach, from ska.exponential_bloom_filter.
Step 2. Estimate the histogram of frequency histogram among (only)
effective keys.
Step 3. Use the estimated 1+ reach to standardize the frequency histogram
among effective keys, and thus obtain an estimate of frequnecy histogram
among all IDs.
Args:
max_freq: the maximum targeting frequency level. For example, if it is set
to 3, then the sketches will include frequency=1, 2, 3+ (frequency >=
3). Note: we have to set a max_freq; privacy cannot be guaranteed if
there's no max_freq.
noiser_class: a class of noiser indicating the distribution of noise.
epsilon: total privacy budget for a run of frequency estimation. No noise
is added when epsilon == np.Inf.
epsilon_split: The proportion of total privacy budget that is assigned to
the estimate of 1+ reach. The remaining privacy budget is assigned to
the frequency histogram among effective keys.
"""
self.max_freq = max_freq
self.one_plus_reach_noiser = None
self.histogram_noiser = None
if noiser_class is not None:
assert epsilon_split > 0 and epsilon_split < 1, (
'In StandardizedHistogramEstimator, epsilon_split must be >0 and <1.')
# self.one_plus_reach_noiser is the noiser on the estimate of 1+ reach.
self.one_plus_reach_noiser = noiser_class(epsilon=epsilon * epsilon_split)
# self.histogram_noiser is the noiser on the frequency histogram among
# effective keys
# For simplicity, suppose the two noisers share the same noiser_class.
self.histogram_noiser = noiser_class(
epsilon=epsilon * (1 - epsilon_split))
def __call__(self, sketch_list):
ska = StandardizedHistogramEstimator.merge_sketch_list(sketch_list)
return self.estimate_cardinality(ska)
@classmethod
def merge_two_exponential_bloom_filters(cls, this, that):
sketch_operator = SketchOperator(
estimation_method=FirstMomentEstimator.METHOD_EXP)
return sketch_operator.union(this, that)
@classmethod
def merge_two_unique_key_trackers(cls, this, that):
result = copy.deepcopy(this)
unique_key_function = UniqueKeyFunction()
result.sketch = np.array(
[unique_key_function(x, y) for x, y in zip(this.sketch, that.sketch)])
return result
@classmethod
def merge_two_frequency_count_trackers(cls, this, that):
result = copy.deepcopy(this)
result.sketch = this.sketch + that.sketch
return result
@classmethod
def merge_two_sketches(cls, this, that):
assert isinstance(this, ExponentialSameKeyAggregator), (
'This is not a ExponentialSameKeyAggregator.')
result = copy.deepcopy(this)
result.exponential_bloom_filter = StandardizedHistogramEstimator.merge_two_exponential_bloom_filters(
this.exponential_bloom_filter, that.exponential_bloom_filter)
result.unique_key_tracker = StandardizedHistogramEstimator.merge_two_unique_key_trackers(
this.unique_key_tracker, that.unique_key_tracker)
result.frequency_count_tracker = StandardizedHistogramEstimator.merge_two_frequency_count_trackers(
this.frequency_count_tracker, that.frequency_count_tracker)
return result
@classmethod
def merge_sketch_list(cls, sketch_list):
return functools.reduce(StandardizedHistogramEstimator.merge_two_sketches,
sketch_list)
def estimate_one_plus_reach(self, exponential_same_key_aggregator):
estimator = FirstMomentEstimator(
noiser=self.one_plus_reach_noiser,
method=FirstMomentEstimator.METHOD_EXP)
return estimator(
[exponential_same_key_aggregator.exponential_bloom_filter])[0]
def estimate_histogram_from_effective_keys(
self, exponential_same_key_aggregator):
"""Obtain the frequency distribution among effective keys.
Args:
exponential_same_key_aggregator: an ExponentialSameKeyAggregator.
Returns:
An array with any ith element indicating the number of effective keys
with frequency = (i + 1); the last element indicating the number of
effective keys with frequency >= self.max_freq. Each element of this array
has been independently noised by self.histogram_noiser.
"""
is_effective_register = np.isin(
exponential_same_key_aggregator.unique_key_tracker.sketch,
(UniqueKeyFunction.FLAG_EMPTY_REGISTER,
UniqueKeyFunction.FLAG_COLLIDED_REGISTER),
invert=True)
freq_effective_keys = (
exponential_same_key_aggregator.frequency_count_tracker.sketch
[is_effective_register])
if self.max_freq is not None:
freq_effective_keys[freq_effective_keys > self.max_freq] = self.max_freq
raw_histogram_array_from_effective_keys = np.bincount(
freq_effective_keys, minlength=self.max_freq + 1)[1:]
if self.histogram_noiser is None:
return raw_histogram_array_from_effective_keys
return self.histogram_noiser(raw_histogram_array_from_effective_keys)
@classmethod
def standardize_histogram(cls, histogram, total):
"""Scales a histogram (array) so that it sums up to a given total."""
return histogram / sum(histogram) * total
def estimate_cardinality(self, exponential_same_key_aggregator):
"""Estimate_cardinality of 1+, 2+, ..., N+ reach, from a SameKeyAggregator.
Args:
exponential_same_key_aggregator: an ExponentialSameKeyAggregator.
Returns:
A list with the ith element being the estimated (i+1)+ reach, i.e.,
the number of audience who have been exposed to the ads for at least
(i+1) times. This has the same format as the output of
stratified_sketch.SequentialEstimator.
"""
one_plus_reach = self.estimate_one_plus_reach(
exponential_same_key_aggregator)
hist = self.estimate_histogram_from_effective_keys(
exponential_same_key_aggregator)
standardized_hist = StandardizedHistogramEstimator.standardize_histogram(
hist, one_plus_reach)
return list(reversed(np.cumsum(list(reversed(standardized_hist)))))
| 44.131687 | 115 | 0.750093 |
acf1b7f63c154b75caa0a4e8caee7e34eaf18500 | 794 | py | Python | Packages/Patterns_Package/symbols/filled_symbols/Right_faced_Equilataral_Triangle.py | saribalarakeshreddy/Python-3.9.0 | 25b4c74feb2a27b91e69aa82becde23e356e82c4 | [
"MIT"
] | null | null | null | Packages/Patterns_Package/symbols/filled_symbols/Right_faced_Equilataral_Triangle.py | saribalarakeshreddy/Python-3.9.0 | 25b4c74feb2a27b91e69aa82becde23e356e82c4 | [
"MIT"
] | null | null | null | Packages/Patterns_Package/symbols/filled_symbols/Right_faced_Equilataral_Triangle.py | saribalarakeshreddy/Python-3.9.0 | 25b4c74feb2a27b91e69aa82becde23e356e82c4 | [
"MIT"
] | null | null | null | def for_Right_faced_Equilataral_Triangle():
""" pattern for :Right_faced_Equilataral_Triangle using for loop"""
for i in range(7):
for j in range(7):
if ((i+j>=3 and j-i<=3) and i<=3) or j==3 or i==4 and j%6!=0 or i==5 and j in (2,4):
print('*', end ='')
else:
print('', end='')
print()
def while_Right_faced_Equilataral_Triangle():
""" pattern for :Right_faced_Equilataral_Triangle using while loop"""
i=0
while i<7:
j=0
while j<7:
if ((i+j>=3 and j-i<=3) and i<=3) or j==3 or i==4 and j%6!=0 or i==5 and j in (2,4):
print('*', end ='')
else:
print('', end='')
j+=1
i+=1
print()
| 34.521739 | 97 | 0.465995 |
acf1b9165daf53cd9a17b44e3c5e4c03ca954aa3 | 2,431 | py | Python | tests/utils.py | debrief/pepys-import | 12d29c0e0f69e1119400334983947893e7679b6b | [
"Apache-2.0"
] | 4 | 2021-05-14T08:22:47.000Z | 2022-02-04T19:48:25.000Z | tests/utils.py | debrief/pepys-import | 12d29c0e0f69e1119400334983947893e7679b6b | [
"Apache-2.0"
] | 1,083 | 2019-11-06T17:01:07.000Z | 2022-03-25T10:26:51.000Z | tests/utils.py | debrief/pepys-import | 12d29c0e0f69e1119400334983947893e7679b6b | [
"Apache-2.0"
] | 4 | 2019-11-06T12:00:45.000Z | 2021-06-09T04:18:28.000Z | import os
from pepys_import.core.store.data_store import DataStore
from pepys_import.file.file_processor import FileProcessor
from pepys_import.utils.text_formatting_utils import formatted_text_to_str
def check_errors_for_file_contents(file_contents, expected_errors, importer, filename=None):
data_store = DataStore("", "", "", 0, ":memory:", db_type="sqlite")
data_store.initialise()
processor = FileProcessor(archive=False)
processor.register_importer(importer)
# check states empty
with data_store.session_scope():
# there must be no states at the beginning
states = data_store.session.query(data_store.db_classes.State).all()
assert len(states) == 0
# there must be no platforms at the beginning
platforms = data_store.session.query(data_store.db_classes.Platform).all()
assert len(platforms) == 0
# there must be no datafiles at the beginning
datafiles = data_store.session.query(data_store.db_classes.Datafile).all()
assert len(datafiles) == 0
if filename is None:
filename = "test_input.txt"
with open(filename, "w") as f:
f.write(file_contents)
# parse the file
processor.process(filename, data_store, False)
# Delete the temporary file
os.remove(filename)
# check data got created
with data_store.session_scope():
# there must be no states
states = data_store.session.query(data_store.db_classes.State).all()
assert len(states) == 0
errors = processor.importers[0].errors
if expected_errors is None:
assert len(errors) == 0
return
if len(errors) == 0:
assert False, "No errors reported"
errors = errors[0]
joined_errors = "\n".join(errors.values())
if isinstance(expected_errors, str):
assert expected_errors in joined_errors
else:
for expected_error in expected_errors:
assert expected_error in joined_errors
def move_and_overwrite(from_path, to_path):
"""
Moves a file from from_path to to_path, overwriting the destination file if it already exists.
Does this by deleting the to_path if it already exists, then doing the move
"""
if os.path.exists(to_path):
os.remove(to_path)
os.rename(from_path, to_path)
# Use normal print() to capture table reports
def side_effect(text):
print(formatted_text_to_str(text))
| 30.012346 | 98 | 0.694364 |
acf1b9df7e8d672c4118d6f4add3115d5c10ace6 | 12,435 | py | Python | spytest/apis/system/ntp.py | mykolaf/sonic-mgmt | de77268526173c5e3a345f3f3703b56eb40c5eed | [
"Apache-2.0"
] | 1 | 2021-09-15T17:09:13.000Z | 2021-09-15T17:09:13.000Z | spytest/apis/system/ntp.py | mykolaf/sonic-mgmt | de77268526173c5e3a345f3f3703b56eb40c5eed | [
"Apache-2.0"
] | 1 | 2020-02-05T16:51:53.000Z | 2020-02-05T16:51:53.000Z | spytest/apis/system/ntp.py | mykolaf/sonic-mgmt | de77268526173c5e3a345f3f3703b56eb40c5eed | [
"Apache-2.0"
] | null | null | null | from spytest.utils import filter_and_select
from spytest import st
import json
from utilities.utils import ensure_service_params
def add_ntp_servers(dut, iplist=[]):
"""
:param dut:
:param iplist:
:return:
"""
st.log("add ntp servers")
final_data = {}
temp_data = {}
if iplist:
for ip in iplist:
temp_data[ip] = {}
else:
st.log("please provide atleast 1 server to configure")
return False
final_data['NTP_SERVER'] = temp_data
final_data = json.dumps(final_data)
st.apply_json(dut, final_data)
st.log("Regenerate the ntp-config")
command = "systemctl restart ntp-config"
st.config(dut, command)
return True
def delete_ntp_servers(dut, iplist=[]):
"""
:param dut:
:param iplist:
:return:
"""
return True
def enable_ntp(dut):
"""
:param dut:
:return:
"""
st.log("enable ntp")
command = "sudo timedatectl set-ntp true"
st.config(dut, command)
return True
def disable_ntp(dut):
"""
:param dut:
:return:
"""
st.log("disable ntp")
command = "sudo timedatectl set-ntp false"
st.config(dut, command)
return True
def enable_local_rtc(dut):
st.log("enable set-local-rtc")
command = "sudo timedatectl set-local-rtc true"
st.config(dut, command)
return True
def disable_local_rtc(dut):
"""
:param dut:
:return:
"""
st.log("disable set-local-rtc")
command = "sudo timedatectl set-local-rtc false"
st.config(dut, command)
return True
def config_timezone(dut, zone):
"""
:param dut:
:param zone:
:return:
"""
st.log("config timezone")
if zone:
command = "sudo timedatectl set-timezone {}".format(zone)
st.config(dut, command)
return True
else:
st.log("please provide zone name")
return False
def show_ntp_server(dut):
"""
:param dut:
:return:
"""
st.log("show ntp servers")
command = "show ntp"
output = st.show(dut, command)
return output
def verify_ntp_server_details(dut, server_ip=None, **kwargs):
output = show_ntp_server(dut)
flag = 1
if server_ip is None:
if "No association ID's returned" in output:
return True
else:
return False
else:
server_ips = [server_ip] if type(server_ip) is str else list([str(e) for e in server_ip])
data = kwargs
for ent in output:
remote_ip = ent["remote"].strip("+*#o-x").strip()
if remote_ip in server_ips:
if 'remote' in data and remote_ip not in data['remote']:
st.log("Remote Server IP is not matching")
flag = 0
if 'refid' in data and ent["refid"] != data["refid"]:
st.log("Ref ID is not matching")
flag = 0
if 'st' in data and ent["st"] != data["st"]:
st.log("Stratum value is not matching")
flag = 0
if 't' in data and ent["t"] != data["t"]:
st.log("Type is not matching")
flag = 0
if 'when' in data and ent["when"] != data["when"]:
st.log("Polling value is not matching")
flag = 0
if 'poll' in data and ent["poll"] != data["poll"]:
st.log("Polling in seconds is not matching")
flag = 0
if 'reach' in data and ent["reach"] != data["reach"]:
st.log("Reach is not matching")
flag = 0
if 'delay' in data and ent["delay"] != data["delay"]:
st.log("Delay is not matching")
flag = 0
if 'offset' in data and ent["offset"] != data["offset"]:
st.log("Offset value is not matching")
flag = 0
if 'jitter' in data and ent["jitter"] != data["jitter"]:
st.log("Jitter value is not matching")
flag = 0
else:
st.log("Server IP is not matching")
flag = 0
if flag:
st.log("Server IP's matched.")
return True
else:
st.log("Server IP's not matched.")
return False
def show_ntp_status(dut,mvrf=False):
"""
:param dut:
:return:
"""
st.log("show ntp status")
if mvrf:
command = "sudo cgexec -g l3mdev:mgmt ntpstat"
else:
command = "ntpstat"
output = st.show(dut, command)
retval = []
entries = filter_and_select(output, ["server", "stratum", "time", "poll"])
for ent in entries:
retval.append(ent["server"].strip("()"))
retval.append(ent["stratum"])
retval.append(ent["time"])
retval.append(ent["poll"])
return retval
def config_date(dut, date):
"""
:param dut:
:param date:
:return:
"""
st.log("config date")
command = "date --set='{}'".format(date)
st.config(dut, command)
return True
def set_date_ntp(dut):
"""
:param dut:
:param date:
:return:
"""
st.log("set date using ntpd")
command = "sudo /usr/sbin/ntpd -q -g -x &"
st.config(dut, command)
return True
def show_timedatectl_status(dut):
"""
:param dut:
:return:
"""
st.log("timedatectl status")
command = "timedatectl status"
output = st.show(dut, command)
return output
def show_clock(dut):
"""
:param dut:
:return:
"""
st.log("show clock")
command = "show clock"
output = st.show(dut, command)
return output[0]
def verify_clock(dut, time):
"""
:param dut:
:param time:
:return:
"""
st.log("verifying show clock")
retval = show_clock(dut)
if retval == time:
return True
else:
return False
def verify_timedatectl(dut, **kwargs):
"""
:param dut:
:param kwargs:
:return:
"""
st.log("verifying timedatectl")
retval = show_timedatectl_status(dut)
flag = 1
data = kwargs
if not data:
st.error("Please provide details to be verified.")
return False
else:
if 'rtctime' in data:
if retval[0]['rtctime'] != data['rtctime']:
flag = 0
if 'universaltime' in data:
if retval[0]['universaltime'] != data['universaltime']:
flag = 0
if 'networktimeon' in data:
if retval[0]['networktimeon'] != data['networktimeon']:
flag = 0
if 'ntpsynchronized' in data:
if retval[0]['ntpsynchronized'] != data['ntpsynchronized']:
flag = 0
if 'timezone' in data:
if retval[0]['timezone'] != data['timezone']:
flag = 0
if 'localtime' in data:
if retval[0]['localtime'] != data['localtime']:
flag = 0
if flag:
return True
else:
return False
def verify_ntp_status(dut, iteration=1, delay=1, mvrf=False, **kwargs):
"""
Verify NTP status with polling.
Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com)
:param dut:
:param server: single or list of servers.
:param stratum:
:param time:
:param poll:
:param iteration: 1 sec (default)
:param delay: 1 sec (default)
:return:
"""
st.log("verifying ntp status")
i = 0
if not kwargs:
st.error("Please provide details to be verified.")
return False
else:
while True:
flag = 0
retval = show_ntp_status(dut,mvrf)
if not retval:
st.log("No o/p from ntpstat command")
if i > iteration:
st.log("NTP status failed.")
st.log("Max iterations {} reached".format(i))
return False
i += 1
st.wait(delay)
continue
if 'server' in kwargs:
server_li = list(kwargs['server']) if isinstance(kwargs['server'], list) else [kwargs['server']]
if retval[0] in server_li:
st.log("Detected NTP server - {}".format(retval[0]))
flag += 1
if 'stratum' in kwargs:
if retval[1] == kwargs['stratum']:
flag += 1
if 'time' in kwargs:
if retval[2] == kwargs['time']:
flag += 1
if 'poll' in kwargs:
if retval[3] == kwargs['poll']:
flag += 1
if flag == len(kwargs):
return True
if i > iteration:
st.log("NTP status failed.")
st.log("Max iterations {} reached".format(i))
return False
i += 1
st.wait(delay)
def verify_ntp_server(dut, serverip, **kwargs):
"""
:param dut:
:param serverip:
:param kwargs:
:return:
"""
st.log("verifying ntp server")
flag = 1
data = kwargs
if not data or not serverip:
st.error("Please provide details to be verified.")
return False
else:
retval = show_ntp_server(dut)
if not retval:
return False
else:
if 'remote' in data:
if retval[0] != data['remote']:
flag = 0
if 'refid' in data:
if retval[1] != data['refid']:
flag = 0
if 'st' in data:
if retval[2] != data['st']:
flag = 0
if 't' in data:
if retval[3] != data['t']:
flag = 0
if 'when' in data:
if retval[4] != data['when']:
flag = 0
if 'poll' in data:
if retval[5] != data['poll']:
flag = 0
if 'reach' in data:
if retval[6] != data['reach']:
flag = 0
if 'delay' in data:
if retval[7] != data['delay']:
flag = 0
if 'offset' in data:
if retval[8] != data['offset']:
flag = 0
if 'jitter' in data:
if retval[9] != data['jitter']:
flag = 0
if flag:
return True
else:
return False
def verify_ntp_service_status(dut, status, iteration=1, delay=1):
"""
Verify NTP service status with polling
Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com)
:param dut:
:param status:
:param iteration: 1 sec (default)
:param delay: 1 sec (default)
:return:
"""
command = "service ntp status | grep Active"
i = 1
while True:
output = st.config(dut, command)
if status in output:
st.log("NTP service status is '{}' iteration".format(i))
return True
if i > iteration:
st.log("NTP service status is not '{}'")
st.log("Max iterations {} reached".format(i))
return False
i += 1
st.wait(delay)
def verify_ntp_server_exists(dut, server_ip=None, **kwargs):
output = show_ntp_server(dut)
flag = 1
if server_ip is None:
if "No association ID's returned" in output:
return True
else:
return False
else:
server_ips = [server_ip] if type(server_ip) is str else list([str(e) for e in server_ip])
data = kwargs
for ent in output:
remote_ip = ent["remote"].strip("+*#o-x").strip()
if remote_ip in server_ips:
if 'remote' in data and remote_ip not in data['remote']:
st.log("Remote Server IP is not matching")
return False
else:
return True
def ensure_ntp_config(dut,iplist=[]):
if not iplist:
iplist = ensure_service_params(dut, "ntp", "default")
if not iplist:
st.log("NTP server IPs missing")
return False
commands = []
for ip in iplist:
if not verify_ntp_server_exists(dut, ip, remote=ip):
commands.append("config ntp add {}".format(ip))
st.config(dut, commands)
return True
| 26.97397 | 112 | 0.510897 |
acf1ba2375937e3f54e4e644a6331505c1290b53 | 4,713 | py | Python | main.py | swapnilsparsh/IPL-Score-Viewer | 34a82bbfa05dade89456641d799cd85d95d2a79e | [
"MIT"
] | 3 | 2020-10-02T08:22:58.000Z | 2021-12-23T16:10:44.000Z | main.py | swapnilsparsh/IPL-Score-Viewer | 34a82bbfa05dade89456641d799cd85d95d2a79e | [
"MIT"
] | 20 | 2020-09-30T19:08:21.000Z | 2020-10-27T16:15:15.000Z | main.py | swapnilsparsh/IPL-Score-Viewer | 34a82bbfa05dade89456641d799cd85d95d2a79e | [
"MIT"
] | 15 | 2020-09-30T19:54:58.000Z | 2020-10-27T16:02:25.000Z | from tkinter import *
import requests
from bs4 import BeautifulSoup
# Set our variables
refresh_time = 1000* 2 # Refresh every 2 seconds
# Background colors
original_bg = "#CECCBE"
dark_bg = "#2B2B2B"
old_bg = 'sandybrown' # This is no longer used but can be used to replace original_bg
label_old_bg = 'light goldenrod' # This is the old label bg before replacing w/ light/dark theme
# Set up tkinter root window
root = Tk()
root.title("Cricket Score Viewer by SWAPNIL")
root.configure(bg=original_bg)
# Darkmode button images
onImg = PhotoImage(file="onbutton.png")
offImg = PhotoImage(file="offbutton.png")
def darkmode_switch():
"""Function for dark/light theme button"""
# Check current bg colour
current_bg = root.cget('bg')
# If current_bg is original, change new_bg to dark (vice versa)
if current_bg == original_bg:
new_bg = dark_bg
darkmodetxt_label.config(text="Dark Mode: ON", bg=new_bg)
darkmode_btn.config(image=onImg, bg=new_bg, activebackground=new_bg)
elif current_bg == dark_bg:
new_bg = original_bg
darkmodetxt_label.config(text="Dark Mode: OFF", bg=new_bg)
darkmode_btn.config(image=offImg, bg=new_bg, activebackground=new_bg)
# Set bg to new_bg, fg to current_bg
root.config(bg=new_bg)
for item in all_objects:
item.config(bg=new_bg, fg=current_bg)
def get_data():
"""A helper function which fetch the data and update the UI"""
# URL Request
url ='https://www.cricbuzz.com/'
page = requests.get(url)
soup = BeautifulSoup(page.text,'html.parser')
# Find our data
team_names = soup.find_all(class_='cb-ovr-flo cb-hmscg-tm-nm')
team_scores = soup.find_all(class_='cb-ovr-flo')
result1 = soup.find_all(class_='cb-ovr-flo cb-text-live') # empty if no ongoing game
result2 = soup.find_all(class_='cb-ovr-flo cb-text-complete')
# Get name of first 2 teams competing
dteam1 = team_names[0].get_text()
dteam2 = team_names[1].get_text()
# Check if there is an ongoing game & save into variables data we want
if not result1: # check if result1 is an empty list
dresult = result2[0].get_text()
dteam1_score = team_scores[10].get_text()
dteam2_score = team_scores[12].get_text()
else:
dresult = result1[0].get_text()
dteam1_score = team_scores[8].get_text()
dteam2_score = team_scores[10].get_text()
# Update the text labels
team1.config(text=dteam1)
team2.config(text=dteam2)
team1_score.config(text=dteam1_score)
team2_score.config(text=dteam2_score)
result.config(text=dresult)
# Loop itself
root.after(refresh_time, get_data)
# Initialise Tkinter objects
header1 = Label(root, text ='Cricket Live Score by SWAPNIL', font ='arial 8')
team1 = Label(root, text='Team 1', font='arial 20', bg=original_bg)
team2 = Label(root, text='Team 2', font='arial 20', bg=original_bg)
team1_score = Label(root, text='hit refresh', font='arial 20', bg=original_bg)
team2_score = Label(root, text='hit refresh', font='arial 20', bg=original_bg)
result = Label(root, text='hit refresh', font='arial 11', bg=original_bg)
refresh = Button(root, text='Refresh', command=get_data, bg=original_bg, fg=dark_bg) # Force refresh
header2 = Label(root, text='Data Collected from Cricbuzz', font='ariel 8')
darkmodetxt_label = Label(root, text="Dark Mode: OFF", font="FixedSys 17", bg=original_bg, fg="green")
darkmode_btn = Button(root, image=offImg, borderwidth=0, command=darkmode_switch, bg=original_bg, activebackground=original_bg, pady=1)
# Put our Tkinter objects on grid
header1.grid( row=0, columnspan=2, pady=5)
team1.grid( row=1, column=0, padx=15)
team2.grid( row=1, column=1)
team2_score.grid( row=2, column=1, padx=5)
team1_score.grid( row=2, column=0, padx=5)
result.grid( row=3, columnspan=2, pady=5)
refresh.grid( row=4, columnspan=2, pady=5)
header2.grid( row=5, columnspan=2, pady=0)
darkmodetxt_label.grid( row=8, columnspan=2)
darkmode_btn.grid( row=7, columnspan=2, pady=20)
# Set objects for which we want to follow the dark/light theme
all_objects = [team1, team2, team1_score, team2_score, result, refresh]
# Run get_data after mainloop starts
root.after(0, get_data) # This triggers get_data which has a root.after ==> hence loops itself
# Run the app
try:
print("CTRL + C to close or click close button")
root.mainloop()
except KeyboardInterrupt:
print("Thanks for using Cricket Score Viewer")
except Exception as e:
print("UnKnownError:%s. Please report to the author"%str(e)) | 38.950413 | 135 | 0.690006 |
acf1baa7c183794c174af5cb75f202ea0cf6a7b2 | 81 | py | Python | hackerRank/Problem Solving/Data Structures/Arrays/leftRotation.py | paritoshtripathi935/laughing-disco | 3ee690fba469b1e07846af5c85b1dd8675656520 | [
"MIT"
] | 4 | 2021-12-09T06:44:24.000Z | 2021-12-24T08:09:19.000Z | hackerRank/Problem Solving/Data Structures/Arrays/leftRotation.py | paritoshtripathi935/laughing-disco | 3ee690fba469b1e07846af5c85b1dd8675656520 | [
"MIT"
] | null | null | null | hackerRank/Problem Solving/Data Structures/Arrays/leftRotation.py | paritoshtripathi935/laughing-disco | 3ee690fba469b1e07846af5c85b1dd8675656520 | [
"MIT"
] | null | null | null | def rotateLeft(d, arr):
# Write your code here
return arr[d:] + arr[: d]
| 20.25 | 29 | 0.592593 |
acf1baf9d263b3c161d976b5f6a1634d85d2b13f | 25,366 | py | Python | modoboa/admin/tests/test_account.py | antoniotrento/modoboa | 98eea782a080a3cdfea5abea7d288ff3d49595c6 | [
"ISC"
] | 1 | 2019-06-12T19:24:42.000Z | 2019-06-12T19:24:42.000Z | modoboa/admin/tests/test_account.py | antoniotrento/modoboa | 98eea782a080a3cdfea5abea7d288ff3d49595c6 | [
"ISC"
] | null | null | null | modoboa/admin/tests/test_account.py | antoniotrento/modoboa | 98eea782a080a3cdfea5abea7d288ff3d49595c6 | [
"ISC"
] | 1 | 2020-11-20T00:25:23.000Z | 2020-11-20T00:25:23.000Z | # coding: utf-8
from __future__ import unicode_literals
from unittest import skipIf
from django.core.urlresolvers import reverse
from django.test import override_settings
from modoboa.core import factories as core_factories
from modoboa.core.tests import test_ldap
from modoboa.core.models import User
from modoboa.lib.tests import ModoTestCase
from modoboa.lib.tests import NO_LDAP
from modoboa.limits import utils as limits_utils
from .. import factories
from .. import models
class AuthenticationTestCase(ModoTestCase):
"""Check authentication."""
@classmethod
def setUpTestData(cls):
"""Create test data."""
super(AuthenticationTestCase, cls).setUpTestData()
cls.mb = factories.MailboxFactory(
domain__name="test.com", address="user",
user__username="user@test.com",
user__groups=('SimpleUsers',)
)
def test_authentication_unicode(self):
"""Test with unicode password."""
self.client.logout()
password = "Tété1234"
self.mb.user.set_password(password)
self.mb.user.save(update_fields=["password"])
data = {"username": self.mb.full_address, "password": password}
response = self.client.post(reverse("core:login"), data)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.endswith(reverse("core:user_index")))
class AccountTestCase(ModoTestCase):
@classmethod
def setUpTestData(cls):
"""Create test data."""
super(AccountTestCase, cls).setUpTestData()
factories.populate_database()
def test_crud(self):
values = dict(
username="tester@test.com", first_name="Tester", last_name="Toto",
password1="Toto1234", password2="Toto1234", role="SimpleUsers",
quota_act=True,
is_active=True, email="tester@test.com", stepid='step2'
)
self.ajax_post(reverse("admin:account_add"), values)
account = User.objects.get(username="tester@test.com")
mb = account.mailbox
self.assertEqual(mb.full_address, "tester@test.com")
self.assertEqual(mb.quota, 10)
self.assertTrue(mb.enabled)
self.assertEqual(mb.quota_value.username, "tester@test.com")
self.assertEqual(account.username, mb.full_address)
self.assertTrue(account.check_password("Toto1234"))
self.assertEqual(account.first_name, "Tester")
self.assertEqual(account.last_name, "Toto")
self.assertEqual(mb.domain.mailbox_count, 3)
# Check if self alias has been created
self.assertTrue(
models.AliasRecipient.objects.select_related("alias").filter(
alias__address=mb.full_address, address=mb.full_address,
alias__internal=True).exists()
)
values.update({
"username": "pouet@test.com", "language": "en",
"secondary_email": "homer@simpson.com",
"phone_number": "123456789"
})
self.ajax_post(
reverse("admin:account_change", args=[account.id]), values
)
account.refresh_from_db()
self.assertEqual(account.secondary_email, values["secondary_email"])
mb = models.Mailbox.objects.get(pk=mb.pk)
self.assertEqual(mb.full_address, "pouet@test.com")
self.assertEqual(mb.quota_value.username, "pouet@test.com")
# Check if self alias has been updated
self.assertTrue(
models.AliasRecipient.objects.select_related("alias").filter(
alias__address=mb.full_address, address=mb.full_address,
alias__internal=True).exists()
)
self.ajax_post(
reverse("admin:account_delete", args=[account.id]), {}
)
# Check if self alias has been deleted
self.assertFalse(
models.AliasRecipient.objects.select_related("alias").filter(
alias__address=mb.full_address, address=mb.full_address,
alias__internal=True).exists()
)
def test_aliases_update_on_rename(self):
"""Check if aliases are updated on mailbox rename."""
account = User.objects.get(username="user@test.com")
values = {
"username": "new_user@test.com",
"first_name": account.first_name,
"last_name": account.last_name,
"role": account.role,
"quota_act": True,
"is_active": account.is_active,
"email": "new_user@test.com",
"language": "en",
"create_alias_with_old_address": False,
"aliases_1": "alias@test.com"
}
url = reverse("admin:account_change", args=[account.pk])
self.ajax_post(url, values)
qset = account.mailbox.aliasrecipient_set.filter(alias__internal=False)
for alr in qset:
self.assertEqual(alr.address, values["email"])
def test_create_alias_on_rename(self):
"""Check if alias is automatically created."""
account = User.objects.get(username="user@test.com")
values = {
"username": "new_user@test.com",
"first_name": account.first_name,
"last_name": account.last_name,
"role": account.role,
"quota_act": True,
"is_active": account.is_active,
"email": "new_user@test.com",
"language": "en",
"create_alias_with_old_address": False
}
url = reverse("admin:account_change", args=[account.pk])
# Rename while option is set to False -> no alias created
self.ajax_post(url, values)
self.assertFalse(
models.AliasRecipient.objects.filter(
address="new_user@test.com", alias__address="user@test.com",
alias__internal=False
).exists()
)
# Now rename while option set to True -> alias created
values.update({
"username": "user@test.com",
"email": "user@test.com",
"create_alias_with_old_address": True}
)
self.ajax_post(url, values)
self.assertTrue(
models.AliasRecipient.objects.filter(
address="user@test.com", alias__address="new_user@test.com",
alias__internal=False
).exists()
)
# Change domain while option set to True -> alias created
values.update({
"username": "new_user@test2.com",
"email": "new_user@test2.com",
})
self.ajax_post(url, values)
self.assertTrue(
models.AliasRecipient.objects.filter(
address="new_user@test2.com",
alias__address="user@test.com",
alias__domain__name="test.com",
alias__internal=False
).exists()
)
def test_password_constraints(self):
"""Check password constraints."""
values = {
"username": "tester@test.com",
"first_name": "Tester", "last_name": "Toto",
"password1": "", "password2": "",
"role": "SimpleUsers",
"quota_act": True, "is_active": True, "email": "tester@test.com",
"stepid": "step2"
}
resp = self.ajax_post(reverse("admin:account_add"), values, 400)
self.assertEqual(
resp["form_errors"]["password1"][0],
"This field is required.")
values["password1"] = "Toto1234"
values["password2"] = "Toto12345"
resp = self.ajax_post(reverse("admin:account_add"), values, 400)
self.assertEqual(
resp["form_errors"]["password2"][0],
"The two password fields didn't match.")
values["password1"] = "toto1234"
values["password2"] = "toto1234"
resp = self.ajax_post(reverse("admin:account_add"), values, 400)
self.assertEqual(
resp["form_errors"]["password2"][0],
"Password must contain at least 1 uppercase letter.")
def test_random_password(self):
"""Try to create an account with a random password."""
values = {
"username": "tester@test.com",
"first_name": "Tester", "last_name": "Toto",
"random_password": True, "role": "SimpleUsers",
"quota_act": True, "is_active": True, "email": "tester@test.com",
"stepid": "step2"
}
self.ajax_post(reverse("admin:account_add"), values)
account = User.objects.get(username=values["username"])
password = account.password
values["language"] = "en"
# Since 'random_password' is still True, a new password should
# be generated
self.ajax_post(
reverse("admin:account_change", args=[account.pk]), values
)
account.refresh_from_db()
self.assertNotEqual(password, account.password)
password = account.password
values["random_password"] = False
self.ajax_post(
reverse("admin:account_change", args=[account.pk]), values
)
account.refresh_from_db()
self.assertEqual(password, account.password)
def test_delete_default_superadmin(self):
"""Delete default superadmin."""
sadmin2 = core_factories.UserFactory(
username="admin2", is_superuser=True)
sadmin = User.objects.get(username="admin")
self.client.force_login(sadmin2)
self.ajax_post(
reverse("admin:account_delete", args=[sadmin.pk]), {}
)
values = {
"username": "user@test.com", "role": "DomainAdmins",
"is_active": True, "email": "user@test.com",
"language": "en"
}
account = User.objects.get(username="user@test.com")
self.ajax_post(
reverse("admin:account_change", args=[account.pk]), values
)
def test_sender_address(self):
"""Check if sender addresses are saved."""
account = User.objects.get(username="user@test.com")
values = {
"username": "user@test.com", "first_name": "Tester",
"last_name": "Toto", "role": "SimpleUsers",
"quota_act": True, "is_active": True, "email": "user@test.com",
"senderaddress": "test@titi.com", "senderaddress_1": "toto@go.com",
"language": "en"
}
self.ajax_post(
reverse("admin:account_change", args=[account.pk]), values)
self.assertEqual(
models.SenderAddress.objects.filter(
mailbox__address="user").count(),
2)
del values["senderaddress_1"]
self.ajax_post(
reverse("admin:account_change", args=[account.pk]), values)
self.assertEqual(
models.SenderAddress.objects.filter(
mailbox__address="user").count(),
1)
def test_sender_address_as_domainadmin(self):
"""Check that restrictions are applied."""
admin = User.objects.get(username="admin@test.com")
self.client.force_login(admin)
account = User.objects.get(username="user@test.com")
values = {
"username": "user@test.com", "first_name": "Tester",
"last_name": "Toto", "role": "SimpleUsers",
"quota_act": True, "is_active": True, "email": "user@test.com",
"senderaddress": "test@titi.com",
"senderaddress_1": "toto@test2.com"
}
response = self.ajax_post(
reverse("admin:account_change", args=[account.pk]), values, 400)
self.assertEqual(
response["form_errors"]["senderaddress_1"][0],
"You don't have access to this domain")
def test_conflicts(self):
"""Check if unicity constraints are respected."""
values = {
"username": "user@test.com",
"password1": "Toto1234", "password2": "Toto1234",
"role": "SimpleUsers", "quota_act": True,
"is_active": True, "email": "user@test.com",
"stepid": "step2"
}
self.ajax_post(reverse("admin:account_add"), values, status=400)
values.update({"username": "fakeuser@test.com",
"email": "fakeuser@test.com"})
self.ajax_post(reverse("admin:account_add"), values)
account = User.objects.get(username="fakeuser@test.com")
values = {
"username": "user@test.com",
"role": "SimpleUsers", "quota_act": True,
"is_active": True, "email": "user@test.com",
}
self.ajax_post(
reverse("admin:account_change", args=[account.pk]), values,
status=400
)
def test_utf8_username(self):
"""Create an account with non-ASCII characters."""
values = dict(
username="téster@test.com", first_name="Tester", last_name="Toto",
password1="Toto1234", password2="Toto1234", role="SimpleUsers",
quota_act=True,
is_active=True, email="téster@test.com", stepid="step2"
)
self.ajax_post(reverse("admin:account_add"), values)
def _set_quota(self, email, value, expected_status=200):
account = User.objects.get(username=email)
values = {
"username": email, "role": "SimpleUsers", "quota_act": False,
"is_active": True, "quota": value, "email": email,
"language": "en"
}
self.ajax_post(
reverse("admin:account_change", args=[account.id]),
values, status=expected_status
)
def test_set_nul_quota_as_superadmin(self):
self._set_quota("user@test.com", 0)
def test_set_nul_quota_as_domainadmin(self):
"""Check cases where a domain admin set unlimited quota."""
self.client.logout()
self.assertTrue(
self.client.login(username="admin@test.com", password="toto")
)
# Fails because domain has a quota
self._set_quota("user@test.com", 0, 400)
self.client.logout()
self.assertTrue(
self.client.login(username="admin@test2.com", password="toto")
)
# Ok because domain has no quota
self._set_quota("user@test2.com", 0)
def test_domain_quota(self):
"""Check domain quota."""
dom = models.Domain.objects.get(name="test.com")
dom.quota = 100
dom.save(update_fields=["quota"])
# 2 x 10MB
self.assertEqual(dom.allocated_quota, 20)
self._set_quota("user@test.com", 80)
del dom.allocated_quota
# 10 + 80 < 100 => ok
self.assertEqual(dom.allocated_quota, 90)
# 30 + 80 > 100 => failure
self._set_quota("admin@test.com", 30, 400)
def test_master_user(self):
"""Validate the master user mode."""
values = {
"username": "masteruser", "role": "SuperAdmins",
"quota_act": False,
"is_active": True, "master_user": True, "stepid": "step2",
"password1": "Toto1234", "password2": "Toto1234"
}
self.ajax_post(
reverse("admin:account_add"), values
)
self.assertTrue(User.objects.get(username="masteruser").master_user)
values = {
"username": "testuser", "role": "DomainAdmins",
"quota_act": False,
"is_active": True, "master_user": True, "stepid": "step2",
"password1": "Toto1234", "password2": "Toto1234"
}
self.ajax_post(
reverse("admin:account_add"), values, status=400
)
def test_account_detail_view(self):
"""Test account detail view."""
self.set_global_parameter("enable_admin_limits", False, app="limits")
account = User.objects.get(username="admin@test.com")
url = reverse("admin:account_detail", args=[account.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn("Summary", response.content.decode())
self.assertIn("Administered domains", response.content.decode())
self.assertNotIn("Resources usage", response.content.decode())
self.set_global_parameter("enable_admin_limits", True, app="limits")
response = self.client.get(url)
self.assertIn("Resources usage", response.content.decode())
def test_quota_list_view(self):
"""Test quota list view."""
models.Quota.objects.filter(username="user@test.com").update(
bytes=5 * 1048576)
url = reverse("admin:identity_quota_list")
response = self.ajax_get(url)
self.assertIn("5M", response["rows"])
self.assertIn('title="50%"', response["rows"])
self.assertIn("user@test.com", response["rows"])
old_rows = response["rows"]
response = self.ajax_get(
"{}?sort_order=-quota_value__bytes".format(url))
self.assertNotEqual(old_rows, response["rows"])
old_rows = response["rows"]
response = self.ajax_get(
"{}?sort_order=-quota_usage".format(url))
self.assertEqual(old_rows, response["rows"])
response = self.ajax_get(
"{}?sort_order=-unknown".format(url), status=400)
@skipIf(NO_LDAP, "No ldap module installed")
@override_settings(AUTHENTICATION_BACKENDS=(
'modoboa.lib.authbackends.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
))
class LDAPAccountTestCase(test_ldap.LDAPTestCaseMixin, ModoTestCase):
"""Check LDAP related code."""
def test_autocreate_disabled(self):
"""Check if objects are not created as expected."""
self.activate_ldap_authentication()
self.searchbind_mode()
self.set_global_parameter("auto_create_domain_and_mailbox", False)
username = "testuser@example.com"
self.authenticate(username, "test")
self.assertFalse(
models.Domain.objects.filter(name="example.com").exists())
self.assertFalse(
models.Mailbox.objects.filter(address="testuser").exists())
class PermissionsTestCase(ModoTestCase):
@classmethod
def setUpTestData(cls):
"""Create test data."""
super(PermissionsTestCase, cls).setUpTestData()
parameters = {}
for name, tpl in limits_utils.get_user_limit_templates():
parameters["deflt_user_{0}_limit".format(name)] = 2
cls.localconfig.parameters.set_values(parameters, app="limits")
cls.localconfig.save()
factories.populate_database()
cls.reseller = core_factories.UserFactory(username="reseller")
cls.reseller.role = "Resellers"
cls.user = User.objects.get(username="user@test.com")
def setUp(self):
"""Initiate test context."""
super(PermissionsTestCase, self).setUp()
self.values = {
"username": self.user.username, "role": "DomainAdmins",
"is_active": self.user.is_active, "email": "user@test.com",
"quota_act": True, "language": "en"
}
def tearDown(self):
self.client.logout()
def test_domain_admins(self):
self.ajax_post(
reverse("admin:account_change", args=[self.user.id]),
self.values
)
self.assertEqual(self.user.role, "DomainAdmins")
self.values["role"] = "SimpleUsers"
self.ajax_post(
reverse("admin:account_change", args=[self.user.id]),
self.values
)
self.assertNotEqual(
self.user.groups.first().name, "DomainAdmins")
def test_superusers(self):
self.values["role"] = "SuperAdmins"
self.ajax_post(
reverse("admin:account_change", args=[self.user.id]),
self.values
)
self.assertEqual(
User.objects.get(username="user@test.com").is_superuser, True
)
self.values["role"] = "SimpleUsers"
self.ajax_post(
reverse("admin:account_change", args=[self.user.id]),
self.values
)
self.assertEqual(User.objects.get(
username="user@test.com").is_superuser, False)
def test_self_modif(self):
self.client.logout()
self.assertTrue(
self.client.login(username="admin@test.com", password="toto")
)
admin = User.objects.get(username="admin@test.com")
values = {
"username": "admin@test.com", "first_name": "Admin",
"password1": "", "password2": "",
"quota": 10, "is_active": True, "email": "admin@test.com",
"language": "en"
}
self.ajax_post(
reverse("admin:account_change", args=[admin.id]),
values
)
self.assertEqual(admin.role, "DomainAdmins")
self.assertTrue(admin.can_access(
models.Domain.objects.get(name="test.com")))
values["role"] = "SuperAdmins"
self.ajax_post(
reverse("admin:account_change", args=[admin.id]),
values
)
admin.refresh_from_db()
self.assertEqual(admin.role, "DomainAdmins")
self.client.logout()
self.client.login(username=self.reseller.username, password="toto")
self.assertTrue(self.reseller.can_access(self.reseller))
values = {
"username": self.reseller.username, "first_name": "Reseller",
"password1": "", "password2": "",
"is_active": True, "language": "en"
}
self.ajax_post(
reverse("admin:account_change", args=[self.reseller.pk]),
values
)
self.assertEqual(self.reseller.role, "Resellers")
values["role"] = "SuperAdmins"
self.ajax_post(
reverse("admin:account_change", args=[self.reseller.pk]),
values
)
self.assertEqual(self.reseller.role, "Resellers")
def test_domadmin_access(self):
self.client.logout()
self.assertEqual(
self.client.login(username="admin@test.com", password="toto"),
True)
response = self.client.get(reverse("admin:domain_list"))
self.assertEqual(response.status_code, 200)
response = self.client.get(
reverse("admin:account_change", args=[self.user.id]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertNotEqual(response["Content-Type"], "application/json")
def test_domainadmin_deletes_superadmin(self):
"""Check domain admins restrictions about super admins
When a super admin owns a mailbox and a domain admin exists
for the associated domain, this domain admin must not be able
to access the super admin.
"""
values = dict(
username="superadmin2@test.com", first_name="Super",
last_name="Admin", password1="Toto1234", password2="Toto1234",
role="SuperAdmins", is_active=True,
email="superadmin2@test.com", stepid='step2'
)
self.ajax_post(
reverse("admin:account_add"),
values
)
account = User.objects.get(username="superadmin2@test.com")
self.client.logout()
self.client.login(username="admin@test.com", password="toto")
self.ajax_post(
reverse("admin:account_delete", args=[account.id]), {}, 403
)
def test_domainadmin_dlist_local_domain_not_owned(self):
"""Check if a domain admin can use a local mailbox he can't
access as a recipient in a distribution list"""
values = dict(
address="all@test.com",
recipients="user@test.com",
recipients_1="user@test2.com",
enabled=True
)
self.ajax_post(reverse("admin:alias_add"), values)
def test_domainadmin_master_user(self):
"""Check domain administrator is not allowed to access this feature."""
values = dict(
username="user10@test.com", first_name="Test",
last_name="Test", password1="Toto1234", password2="Toto1234",
role="SimpleUsers", is_active=True, master_user=True,
email="user10@test.com", stepid='step2'
)
self.ajax_post(
reverse("admin:account_add"),
values, status=400
)
def test_domadmins_permissions(self):
"""
Check that two domain admins in the same domains see the same
content.
"""
dom = models.Domain.objects.get(name="test.com")
mb = factories.MailboxFactory(
domain=dom, address="admin2",
user__username="admin2@test.com", user__groups=('DomainAdmins', ),
user__password="{PLAIN}toto")
dom.add_admin(mb.user)
self.client.logout()
self.assertTrue(
self.client.login(username="admin@test.com", password="toto"))
values = {
"username": "new@test.com", "password1": "Toto1234",
"password2": "Toto1234", "role": "SimpleUsers", "quota_act": True,
"is_active": True, "email": "new@test.com", "stepid": "step2"
}
self.ajax_post(reverse("admin:account_add"), values)
new_mb = models.Mailbox.objects.get(user__username="new@test.com")
self.assertTrue(mb.user.can_access(new_mb))
| 38.726718 | 79 | 0.595916 |
acf1bb6feda7f7ab6a9f0d41f1c628f537c65f6e | 583 | py | Python | 2_fade_in.py | nixielive/nxyi2c-python-samples | 3d5a21decc02540c4b3890b8887b0a5de9291db5 | [
"MIT"
] | null | null | null | 2_fade_in.py | nixielive/nxyi2c-python-samples | 3d5a21decc02540c4b3890b8887b0a5de9291db5 | [
"MIT"
] | null | null | null | 2_fade_in.py | nixielive/nxyi2c-python-samples | 3d5a21decc02540c4b3890b8887b0a5de9291db5 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import smbus
def usage():
"""^(^_^)^"""
print(sys.argv[0] + ' [target address] [digit]')
def app_main():
"""application main procedure"""
if len(sys.argv) < 3:
usage()
return
bus = smbus.SMBus(1)
# set pattern
bus.write_word_data(int(sys.argv[1]), 1, 2)
# set duration
bus.write_word_data(int(sys.argv[1]), 2, 500)
# set number
bus.write_word_data(int(sys.argv[1]), 3, int(sys.argv[2]))
# set start
bus.write_word_data(int(sys.argv[1]), 0, 0)
bus.close()
if __name__ == '__main__':
app_main()
| 17.666667 | 59 | 0.624357 |
acf1bcf829310a6be06f03f2445ea9a9fc82c08a | 4,255 | py | Python | test_cases/protobuf3_adapter.py | QratorLabs/ritfest2016 | cddaaa9e827f5315d2e426c083029124649d6f50 | [
"MIT"
] | null | null | null | test_cases/protobuf3_adapter.py | QratorLabs/ritfest2016 | cddaaa9e827f5315d2e426c083029124649d6f50 | [
"MIT"
] | null | null | null | test_cases/protobuf3_adapter.py | QratorLabs/ritfest2016 | cddaaa9e827f5315d2e426c083029124649d6f50 | [
"MIT"
] | null | null | null | import sys
sys.path.insert(0, './codegen')
try:
import protobuf3_gen.struct_map as struct_map_pb2
import protobuf3_gen.test as test_pb2
class Protobuf3Adapter(object):
NAME = 'protobuf3'
def encoder_string(self, data):
obj = test_pb2.Str()
obj.data = data
return obj.encode_to_bytes()
def decoder_string(self, data):
obj = test_pb2.Str()
obj.parse_from_bytes(data)
return obj.data
def encoder_bytes(self, data):
obj = test_pb2.Bin()
obj.data = data
return obj.encode_to_bytes()
def decoder_bytes(self, data):
obj = test_pb2.Bin()
obj.parse_from_bytes(data)
return obj.data
def encoder_integer(self, data):
obj = test_pb2.Int()
obj.data = data
return obj.encode_to_bytes()
def decoder_integer(self, data):
obj = test_pb2.Int()
obj.parse_from_bytes(data)
return obj.data
def encoder_float(self, data):
obj = test_pb2.Float()
obj.data = data
return obj.encode_to_bytes()
def decoder_float(self, data):
obj = test_pb2.Float()
obj.parse_from_bytes(data)
return obj.data
def encoder_boolean(self, data):
obj = test_pb2.Bool()
obj.data = data
return obj.encode_to_bytes()
def decoder_boolean(self, data):
obj = test_pb2.Bool()
obj.parse_from_bytes(data)
return obj.data
def encoder_array(self, data):
obj = test_pb2.Array()
obj.data.extend(data)
return obj.encode_to_bytes()
def decoder_array(self, data):
obj = test_pb2.Array()
obj.parse_from_bytes(data)
return list(obj.data)
def encoder_map(self, data):
obj = test_pb2.Map()
for key, value in data.items():
entry = test_pb2.MapEntry()
entry.key = key
entry.value = value
obj.data.append(entry)
return obj.encode_to_bytes()
def decoder_map(self, data):
obj = test_pb2.Map()
obj.parse_from_bytes(data)
return {
entry.key: entry.value
for entry in obj.data
}
def encoder_struct_10(self, data):
obj = test_pb2.Struct10()
for k, v in data.items():
setattr(obj, k, v)
return obj.encode_to_bytes()
def decoder_struct_10(self, data):
obj = test_pb2.Struct10()
obj.parse_from_bytes(data)
return {
v.field_name: getattr(obj, v.field_name)
for v in obj._Message__fields.values()
}
def encoder_struct_map(self, data):
obj = struct_map_pb2.StructMap()
for k, v in data.items():
setattr(obj, k, v)
return obj.encode_to_bytes()
def decoder_struct_map(self, data):
obj = struct_map_pb2.StructMap()
obj.parse_from_bytes(data)
return {
v.field_name: getattr(obj, v.field_name)
for v in obj._Message__fields.values()
}
def encoder_simple_list(self, data):
obj = test_pb2.SimpleList()
obj.ints.extend(data)
return obj.encode_to_bytes()
def decoder_simple_list(self, data):
obj = test_pb2.SimpleList()
obj.parse_from_bytes(data)
return list(obj.ints)
def encoder_points_list(self, data):
obj = test_pb2.PointsList()
for p in data:
po = test_pb2.Point()
po.x, po.y = p
obj.points.append(po)
return obj.encode_to_bytes()
def decoder_points_list(self, data):
obj = test_pb2.PointsList()
obj.parse_from_bytes(data)
return [[p.x, p.y] for p in obj.points]
except ImportError:
class Protobuf3Adapter(object):
NAME = 'protobuf3'
| 28.557047 | 56 | 0.532315 |
acf1bd18ea6b87b25a8020c052a445499b41e392 | 535 | py | Python | shophub/manage.py | Saiful1721648/Product-Recommendation-System-Using-Machine-Learning-through-Big-Data-in-E-commerce-Website | cd2fb19ae1bc6a2d0382e3f5288243681ebd7e88 | [
"Apache-2.0"
] | null | null | null | shophub/manage.py | Saiful1721648/Product-Recommendation-System-Using-Machine-Learning-through-Big-Data-in-E-commerce-Website | cd2fb19ae1bc6a2d0382e3f5288243681ebd7e88 | [
"Apache-2.0"
] | null | null | null | shophub/manage.py | Saiful1721648/Product-Recommendation-System-Using-Machine-Learning-through-Big-Data-in-E-commerce-Website | cd2fb19ae1bc6a2d0382e3f5288243681ebd7e88 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mac.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.4375 | 73 | 0.684112 |
acf1be2d6ed05d8221a0240dfe99acf2c4beb66d | 3,360 | py | Python | dsp-module/dsp/core/filter.py | RNatvik/rntools | ddaf8f9cc440bcd0ed0439f087bc951e0add6dce | [
"MIT"
] | null | null | null | dsp-module/dsp/core/filter.py | RNatvik/rntools | ddaf8f9cc440bcd0ed0439f087bc951e0add6dce | [
"MIT"
] | 1 | 2020-08-11T16:05:51.000Z | 2020-08-11T16:05:51.000Z | dsp-module/dsp/core/filter.py | RNatvik/rntools | ddaf8f9cc440bcd0ed0439f087bc951e0add6dce | [
"MIT"
] | null | null | null | class Filter:
def __init__(self, b: list, a: list = None, k: float = 1):
"""
Creates a filter object
:param b: numerator coefficients of the transfer function (coeffs of X)
:param a: denominator coefficients of the transfer function (coeffs of Y)
:param k: output gain (default 1)
"""
if not a:
a = [1]
self.b = b
self.a = a
self.k = k
self.input = [0] * len(b)
self.output = [0] * (len(a) - 1)
def filter_value(self, x_new: float):
"""
Passes a single value through the filter
TODO: Find a better way to do the calculation than to use pop and insert for self.a[0]
:param x_new: the value to be passed through the filter
:return: the filter's output value
"""
self.input = self._shift_list(self.input, x_new)
a0 = self.a.pop(0)
y_new = a0 * (
sum([b * x for b, x in zip(self.b, self.input)])
- sum([a * y for a, y in zip(self.a, self.output)])
)
self.a.insert(0, a0)
self.output = self._shift_list(self.output, y_new)
return y_new
def filter_list(self, xn: list):
"""
Passes a list of values through the filter
:param xn: the input vector
:return: the filter output vector
"""
y = []
for x in xn:
y.append(self.filter_value(x))
return y
def get_latest(self):
"""
Returns the latest output value of the filter
:return: the latest output value of the filter
"""
return self.output[0]
def clear(self):
"""
Clear the filter's stored input and output list
:return: None
"""
self.input = [0] * len(self.input)
self.output = [0] * len(self.output)
def _shift_list(self, lst: list, val: float):
if len(lst) > 0:
lst.pop()
lst.insert(0, val)
return lst
class MultiChannelFilter:
def __init__(self, channels: int, b: list, a: list = None, k: float = 1):
"""
Creates a multi channel filter, which is used for filtering multiple individual signals with the same filter coefficients.
:param channels: Number of channels / signals to filter
:param b: numerator coefficients of the transfer function (coeffs of X)
:param a: denominator coefficients of the transfer function (coeffs of Y)
:param k: output gain (default 1)
"""
self.channels = channels
self.filters = []
for i in range(self.channels):
self.filters.append(Filter(b, a=a, k=k))
def filter_values(self, values: list):
"""
Filters values
:param values: values to filter
:return: filtered values, None if wrong number of elements
"""
if len(values) == self.channels:
filtered_values = [self.filters[i].filter_value(values[i]) for i in range(self.channels)]
else:
filtered_values = None
return filtered_values
def get_latest(self):
"""
Returns each channel's latest output as a list
:return: each channel's latest output
"""
return [self.filters[i].get_latest() for i in range(self.channels)]
| 30.825688 | 130 | 0.566071 |
acf1be2e2beda337c1d62cd28bfeadc19e04eeec | 15,666 | py | Python | bayesian/auth.py | miteshvp/fabric8-analytics-server | df54bb0944efff586ee4e602876e8af579a36e75 | [
"Apache-2.0"
] | null | null | null | bayesian/auth.py | miteshvp/fabric8-analytics-server | df54bb0944efff586ee4e602876e8af579a36e75 | [
"Apache-2.0"
] | null | null | null | bayesian/auth.py | miteshvp/fabric8-analytics-server | df54bb0944efff586ee4e602876e8af579a36e75 | [
"Apache-2.0"
] | 1 | 2019-06-06T12:10:59.000Z | 2019-06-06T12:10:59.000Z | """Authorization token handling."""
import datetime
import enum
from flask import current_app, request, g
from flask_principal import Permission as PrincipalPermission
from flask_security import RoleMixin, UserMixin, current_user, login_user
from itsdangerous import BadSignature, SignatureExpired, TimedJSONWebSignatureSerializer
import jwt
from jwt.contrib.algorithms.pycrypto import RSAAlgorithm
from os import getenv
from sqlalchemy.exc import SQLAlchemyError
from . import rdb
from .exceptions import HTTPError
from .utils import fetch_public_key
jwt.register_algorithm('RS256', RSAAlgorithm(RSAAlgorithm.SHA256))
def decode_token():
"""Decode the authorization token read from the request header."""
token = request.headers.get('Authorization')
if token is None:
return {}
if token.startswith('Bearer '):
_, token = token.split(' ', 1)
pub_key = fetch_public_key(current_app)
audiences = current_app.config.get('BAYESIAN_JWT_AUDIENCE').split(',')
for aud in audiences:
try:
decoded_token = jwt.decode(token, pub_key, audience=aud)
except jwt.InvalidTokenError:
current_app.logger.error('Auth Token could not be decoded for audience {}'.format(aud))
decoded_token = None
if decoded_token is not None:
break
if decoded_token is None:
raise jwt.InvalidTokenError('Auth token audience cannot be verified.')
return decoded_token
def login_required(view):
"""Check if the login is required and if the user can be authorized."""
# NOTE: the actual authentication 401 failures are commented out for now and will be
# uncommented as soon as we know everything works fine; right now this is purely for
# being able to tail logs and see if stuff is going fine
def wrapper(*args, **kwargs):
# Disable authentication for local setup
if getenv('DISABLE_AUTHENTICATION') in ('1', 'True', 'true'):
return view(*args, **kwargs)
lgr = current_app.logger
user = None
try:
decoded = decode_token()
if not decoded:
lgr.exception('Provide an Authorization token with the API request')
raise HTTPError(401, 'Authentication failed - token missing')
lgr.info('Successfuly authenticated user {e} using JWT'.
format(e=decoded.get('email')))
except jwt.ExpiredSignatureError as exc:
lgr.exception('Expired JWT token')
decoded = {'email': 'unauthenticated@jwt.failed'}
raise HTTPError(401, 'Authentication failed - token has expired') from exc
except Exception as exc:
lgr.exception('Failed decoding JWT token')
decoded = {'email': 'unauthenticated@jwt.failed'}
raise HTTPError(401, 'Authentication failed - could not decode JWT token') from exc
else:
user = APIUser(decoded.get('email', 'nobody@nowhere.nodomain'))
if user:
g.current_user = user
else:
g.current_user = APIUser('unauthenticated@no.auth.token')
raise HTTPError(401, 'Authentication required')
return view(*args, **kwargs)
return wrapper
class APIUser(UserMixin):
"""Structure representing user accessing the API."""
def __init__(self, email):
"""Construct the instance of APIUsed class and initialize the 'email' attribute."""
self.email = email
# NOTE: the stuff below is obsolete and we'll most likely want to drop it in future
roles_users = rdb.Table('roles_users',
rdb.Column('user_id', rdb.Integer(), rdb.ForeignKey('user.id')),
rdb.Column('role_id', rdb.Integer(), rdb.ForeignKey('role.id')))
permissions_roles = rdb.Table('permissions_roles',
rdb.Column('role_id', rdb.Integer(), rdb.ForeignKey('role.id')),
rdb.Column('permission_id', rdb.Integer(),
rdb.ForeignKey('permission.id')))
class LazyRowBasedPermission(PrincipalPermission):
"""This class represents a lazily-checked row-based permission.
You'll need to create
a subclass for specific checks. E.g. you may want to create a subclass that will
express permission based on who created DB object Foo:
class ModifyFooPermission(LazyRowBasedPermission):
name = 'modify Foo id {arg}' # this always has to contain "{arg}"
view_arg = 'foo_id'
def allows(self, identity):
# identity contains id of logged-in user, but it can also be
# AnonymousIdentity, which has no id
# if you're going to use this after @login_required decorator,
# you'll never get AnonymousIdentity
arg = self.get_arg()
foo = rdb.session.query(Foo).get(arg)
if foo is None:
raise HTTPError(404, 'foo {id} not found'.format(id=arg))
return foo.created_by == identity.id
modify_foo_permission = ModifyFooPermission()
# you can use modify_foo_permission as argument to require_permissions decorator
@app.route('/foo/<int:foo_id>', methods=['POST'])
@require_permissions(modify_foo_permission)
def edit_foo(foo_id):
# will only get here if current user created Foo object with id foo_id
pass
# alternatively, if you need to check the permission inside the view:
@app.route('/foo/<something>', method=['POST'])
def edit_foo(something):
foo_id = complex_computation(something)
if ModifyFooPermission(need=foo_id).can(): # "can" calls "allows" for current user
pass # do something
else:
raise HTTPError(403, 'No way, amigo')
This implementation uses one of the proposals for flask-principal lazy permissions as a base:
https://github.com/mattupstate/flask-principal/issues/6#issuecomment-24750550
"""
name = 'modify row {arg}'
view_arg = None
def __init__(self, need=None, view_arg=None):
"""Construct the instance of LazyRowBasedPermission class."""
super().__init__(self, *[need])
self.only_need = need # we only assume one need for our permissions ATM
self.view_arg = view_arg or type(self).view_arg
def get_arg(self):
"""Get the arg from self.only_need or from request."""
return self.only_need or request.view_args.get(self.view_arg)
def __str__(self):
"""Return the 'informal' string representation of an LazyRowBasedPermission instance."""
try:
arg = self.get_arg()
except Exception:
arg = 'unknown'
return self.name.format(arg=arg)
def _check_one_perm(perm, has_perms):
"""Check the permission.
Helper function that evaluates one permission (be it PermEnum instance or
LazyRowBasedPermission instance) and returns True/False.
"""
if isinstance(perm, enum.Enum):
return perm.value in has_perms
elif isinstance(perm, LazyRowBasedPermission):
return perm.can()
else:
raise HTTPError(500, 'Internal server error while checking permissions')
def check_permissions_and(needs_perms, has_perms):
"""Check if all permissions from list `needs_perms` are satisfied.
Returns `None` if check succeeds, raises `HTTPError` with 403 code otherwise (logical "and").
Members of the `needs_perms` list can be:
* PermEnum instances - it is checked whether or not their string names are in `has_perms` list
* LazyRowBasedPermission instances - it is checked that they allow currently logged in user to
perform the action
* list/tuple instances - interpreted as logical "or" and passed to `check_permissions_or`
:param needs_perms: (nested) list of strings, permissions to check for
:param has_perms: list of strings, permissions to check against
"""
for perm in needs_perms:
if isinstance(perm, (list, tuple)):
check_permissions_or(perm, has_perms)
else:
if not _check_one_perm(perm, has_perms):
raise HTTPError(403, 'User doesn\'t have permission "{}"'.format(str(perm)))
# else everything is fine
def check_permissions_or(needs_perms, has_perms):
"""Check if at least one permission from list `needs_perms` is satisfied.
Returns `None` if check succeeds, raises `HTTPError` with 403 code otherwise (logical "or").
Members of the `needs_perms` list follow the same rules as for `check_permissions_and`,
except list/tuple instances get interpreted as logical "and" and passed
to `check_permissions_and`.
:param needs_perms: (nested) list of strings, permissions to check for
:param has_perms: list of strings, permissions to check against
"""
for perm in needs_perms:
if isinstance(perm, (list, tuple)):
try:
check_permissions_and(perm, has_perms)
return
except HTTPError:
pass # continue to check following need
else:
if _check_one_perm(perm, has_perms):
return
# else continue to check following need
raise HTTPError(403, 'User doesn\'t have any permission of required: "{}"'.format(needs_perms))
def require_permissions(*needs_perms):
"""Check user permissions.
View decorator which checks that current user has sufficient permissions to access
the decorated view. Raises HTTPError with 403 status code if not.
User's permissions are
* all permissions of all roles that the user currently has assigned (string representations
of PermEnum instances)
* dynamic permissions based on current DB content (represented by instances of subclasses
of LazyRowBasedPermission)
:param needs_perms: PermEnum/LazyRowBasedPermission instances or (nested) lists of
PermEnum/LazyRowBasedPermission instances
Explanation:
The needs_perms argument allows expressing arbitrary permission requirements using
logical "and" and "or" by nesting. Even levels of nesting (0th, 2nd, ...) express
"and", odd levels (1st, 3rd, ...) express "or". For example:
# this example uses numbers to keep it concise
- require_permissions(1) ~> require "1"
- require_permissions(1, 2) ~> require "1 and 2"
- require_permissions([3, 4]) ~> require "3 or 4"
- require_permissions(1, 2, [3, 4]) ~> require "1 and 2 and (3 or 4)"
- require_permissions(1, [2, [3, 4]]) ~> require "1 and (2 or (3 and 4))"
# a real example would look like this:
@require_permissions(PermEnum.sleep, [PermEnum.lay_on_couch, PermEnum.lay_on_bed])
@app.route('/sleep')
def sleep():
return 'sleeping!'
# user must has following permissions to be able to access the view:
# (permission to sleep and (permission to lay on couch or permission to lay on bed))
"""
def func_decorator(func):
def inner(*args, **kwargs):
if needs_perms:
if not current_user.is_authenticated:
raise HTTPError(401, 'Unauthenticated user can\'t access this view')
# get all user permissions from DB
try:
user = rdb.session.query(User).\
outerjoin(User.roles).\
outerjoin(Role.permissions).\
filter(User.id == current_user.id).first()
except SQLAlchemyError:
rdb.session.rollback()
raise
has_perms = []
for role in user.roles:
for perm in role.permissions:
has_perms.append(perm.name)
check_permissions_and(needs_perms, has_perms)
# else no permissions are needed => don't check anything
return func(*args, **kwargs)
return inner
return func_decorator
class PermEnum(enum.Enum):
"""Possible permissions."""
# NOTE: when adding/changing/deleting these, you need to manually create a migration that
# adds/updates/deletes the appropriate Permission row in DB
def __str__(self):
"""Return the 'informal' string representation of an PermEnum instance."""
return self.value
class Permission(rdb.Model):
"""Data structure representing permission (assigned to role)."""
id = rdb.Column(rdb.Integer(), primary_key=True)
name = rdb.Column(rdb.String(80), unique=True)
class Role(rdb.Model, RoleMixin):
"""Data structure representing role (assigned to user)."""
id = rdb.Column(rdb.Integer(), primary_key=True)
name = rdb.Column(rdb.String(80), unique=True)
description = rdb.Column(rdb.String(255))
permissions = rdb.relationship('Permission', secondary=permissions_roles,
backref=rdb.backref('roles', lazy='dynamic'))
class User(rdb.Model, UserMixin):
"""Structure representing user accessing the system using its security token ."""
id = rdb.Column(rdb.Integer(), primary_key=True)
login = rdb.Column(rdb.String(255), unique=True)
email = rdb.Column(rdb.String(255))
password = rdb.Column(rdb.String(255))
active = rdb.Column(rdb.Boolean(), default=True)
roles = rdb.relationship('Role', secondary=roles_users,
backref=rdb.backref('users', lazy='dynamic'))
token = rdb.Column(rdb.String(255))
token_expires = rdb.Column(rdb.DateTime())
def generate_auth_token(self, expiration=None):
"""Generate new security token.
Note: calling this automatically rewrites (== revokes) previous token.
"""
expires_in = expiration or current_app.config['API_TOKEN_LIFETIME']
s = TimedJSONWebSignatureSerializer(current_app.config['SECRET_KEY'],
expires_in=expires_in)
self.token = s.dumps({'id': str(self.id)})
# time based signers in itsdangerous always return bytes, so we decode to store in DB
# (there should be no harm done storing the token decoded)
self.token = self.token.decode('utf-8')
# we need to store the token expiration time since user may change it by revoking the token
self.token_expires = s.get_issue_date(s.loads(self.token, return_header=True)[1]) + \
datetime.timedelta(seconds=expires_in)
rdb.session.add(self)
rdb.session.commit()
return self.token, self.token_expires
def revoke_auth_token(self):
"""Revoke security token."""
self.token = None
self.token_expires = None
rdb.session.add(self)
rdb.session.commit()
@classmethod
def get_by_token(cls, token):
"""Find the owner of given security token."""
s = TimedJSONWebSignatureSerializer(current_app.config['SECRET_KEY'])
# may raise BadSignature or SignatureExpired
data = s.loads(token)
user = current_app.user_datastore.find_user(id=data['id'])
if not user:
raise HTTPError(401, 'Unknow user with id {}'.format(data['id']))
if user.token == token:
if datetime.datetime.utcnow() < user.token_expires:
return user
raise SignatureExpired('bad token')
raise BadSignature('bad token')
| 41.11811 | 99 | 0.64324 |
acf1bef97db15ec6e18f12a653dca7af32e12d46 | 16,794 | py | Python | src/utils/movielens_etl/movielens.py | LaudateCorpus1/hermes-5 | d9b50452379fe636da96c2bad2d286afa15cd7b9 | [
"Apache-2.0"
] | 135 | 2015-11-17T09:04:37.000Z | 2022-01-14T07:00:34.000Z | src/utils/movielens_etl/movielens.py | cacan/hermes | d9b50452379fe636da96c2bad2d286afa15cd7b9 | [
"Apache-2.0"
] | 16 | 2015-11-19T18:04:13.000Z | 2016-11-19T00:30:12.000Z | src/utils/movielens_etl/movielens.py | cacan/hermes | d9b50452379fe636da96c2bad2d286afa15cd7b9 | [
"Apache-2.0"
] | 68 | 2015-11-13T22:51:57.000Z | 2022-01-26T01:51:09.000Z | #!/usr/bin/env python
"""Translate the MovieLens 20M dataset to JSON.
This script takes the four files from the MovieLens 20M dataset and
rewrites each row as a JSON object. This makes it very easy to work
with in Spark.
Attributes:
RATINGS (dict): A dictionary that stores information from all of
the rating actions taken by the users in the dataset. The
variables are as follows:
- user_id (int): A unique identifier for each user.
- movie_id (int): A unique identifier for each movie.
- rating (float): The user's star rating for a movie,
from 0.5 to 5.0 stars with steps of half a star.
- timestamp (int): Seconds since the Unix epoch.
TAGS (dict): A dictionary that stores information from all of the
tagging actions taken by the users in the dataset. The
variables are as follows:
- user_id (int): A unique identifier for each user.
- movie_id (int): A unique identifier for each movie.
- tag (str): The tag applied by the user.
- timestamp (int): Seconds since the Unix epoch.
MOVIES (dict): A dictionary that stores information about each
movie in the dataset. The variables are as follows:
- movie_id (int): A unique identifier for each movie.
- imdb_id (bool): The unique identifier for each movie
used by IMDb.
- tmdb_id (bool): The unique identifier for each movie
used by TMDb (The Movie Database).
- title (str): The title of the movie.
- year (int): The year included with the title in
movies.csv. It is not necessarily the year the movie
was released, sometimes it is the DVD release date,
or some other date. If date is important to your
model, you should probably use an alternative source
to get it.
- genre_action (bool): True if the movie was categorized
by MovieLens as an action movie, else false.
- genre_adventure (bool): True if the movie was
categorized by MovieLens as an adventure movie, else
false.
- genre_animation (bool): True if the movie was
categorized by MovieLens as an animated movie, else
false.
- genre_childrens (bool): True if the movie was
categorized by MovieLens as a children's movie, else
false.
- genre_comedy (bool): True if the movie was categorized
by MovieLens as a comedy, else false.
- genre_crime (bool): True if the movie was categorized
by MovieLens as an crime movie, else false.
- genre_documentary (bool): True if the movie was
categorized by MovieLens as a documentary, else false.
- genre_drama (bool): True if the movie was categorized
by MovieLens as a drama, else false.
- genre_fantasy (bool): True if the movie was categorized
by MovieLens as an fantasy movie, else false.
- genre_filmnoir (bool): True if the movie was
categorized by MovieLens as film-noir, else false.
- genre_horror (bool): True if the movie was categorized
by MovieLens as a horror movie, else false.
- genre_musical (bool): True if the movie was categorized
by MovieLens as a musical, else false.
- genre_mystery (bool): True if the movie was categorized
by MovieLens as a mystery, else false.
- genre_romance (bool): True if the movie was categorized
by MovieLens as a romance movie, else false.
- genre_scifi (bool): True if the movie was categorized
by MovieLens as an sci-fi movie, else false.
- genre_thriller (bool): True if the movie was
categorized by MovieLens as a thriller movie, else
false.
- genre_war (bool): True if the movie was categorized by
MovieLens as a war movie, else false.
- genre_western (bool): True if the movie was categorized
by MovieLens as a western, else false.
- genre_none (bool): True if the movie was not
categorized by MovieLens.
"""
from copy import deepcopy
import json
import csv
# JSON rating object
RATINGS = {
"user_id": None,
"movie_id": None,
"rating": None,
"timestamp": None,
}
# JSON tag object
TAGS = {
"user_id": None,
"movie_id": None,
"tag": None,
"timestamp": None,
}
# JSON movie object
MOVIES = {
"movie_id": None,
"imdb_id": None,
"tmdb_id": None,
"title": None,
"year": None,
"genre_action": False,
"genre_adventure": False,
"genre_animation": False,
"genre_childrens": False,
"genre_comedy": False,
"genre_crime": False,
"genre_documentary": False,
"genre_drama": False,
"genre_fantasy": False,
"genre_filmnoir": False,
"genre_horror": False,
"genre_musical": False,
"genre_mystery": False,
"genre_romance": False,
"genre_scifi": False,
"genre_thriller": False,
"genre_war": False,
"genre_western": False,
"genre_none": False,
}
def ratings_to_json_20m(ratings_csv, output_directory):
"""Convert the ratings.csv file to a file containing a collection of JSON
objects for the 20M dataset.
Args:
- ratings_csv (str): The ratings file.
- output_directory (str): The directory to write the output
file to.
Returns:
None
"""
with open(ratings_csv, 'rb') as csv_file:
with open(output_directory + "/movielens_20m_ratings.json", 'w') as out:
reader = csv.reader(csv_file)
next(reader, None) # Skip the header
for line in reader:
row = deepcopy(RATINGS)
row['user_id'] = int(line[0])
row['movie_id'] = int(line[1])
row['rating'] = float(line[2])
row['timestamp'] = int(line[3])
row_str = json.dumps(row)
out.write(row_str + '\n')
del row
def ratings_to_json_10m(ratings_csv, output_directory, output_file="movielens_10m_ratings.json"):
"""Convert the ratings.csv file to a file containing a collection of JSON
objects for the 10M dataset.
Args:
- ratings_csv (str): The ratings file.
- output_directory (str): The directory to write the output
file to.
- output_file (str,optional): The name of the file to write, by default
"movielens_10m_ratings.json"
Returns:
None
"""
with open(ratings_csv, 'rb') as csv_file:
with open(output_directory + "/" + output_file, 'w') as out:
for line in csv_file:
line = line.split("::")
row = deepcopy(RATINGS)
row['user_id'] = int(line[0])
row['movie_id'] = int(line[1])
row['rating'] = float(line[2])
row['timestamp'] = int(line[3])
row_str = json.dumps(row)
out.write(row_str + '\n')
del row
def ratings_to_json_1m(ratings_csv, output_directory):
""" Same as ratings_to_json_10m() with output_file hardcoded to
"movielens_1m_ratings.json".
"""
return ratings_to_json_10m(ratings_csv, output_directory, output_file="movielens_1m_ratings.json")
def tags_to_json_20m(tags_csv, output_directory):
"""Convert the tags.csv file to a file containing a collection of JSON
objects for the 20M dataset.
Args:
- tags_csv (str): The tags file.
- output_directory (str): The directory to write the output
file to.
Returns:
None
"""
with open(tags_csv, 'rb') as csv_file:
with open(output_directory + "/movielens_20m_tags.json", 'w') as out:
reader = csv.reader(csv_file)
next(reader, None) # Skip the header
for line in reader:
row = deepcopy(TAGS)
row['user_id'] = int(line[0])
row['movie_id'] = int(line[1])
row['tag'] = line[2]
row['timestamp'] = int(line[3])
row_str = json.dumps(row)
out.write(row_str + '\n')
del row
def tags_to_json_10m(tags_csv, output_directory, output_file="movielens_10m_tags.json"):
"""Convert the tags.csv file to a file containing a collection of JSON
objects for the 1M and 10M datasets.
Args:
- tags_csv (str): The tags file.
- output_directory (str): The directory to write the output
file to.
- output_file (str,optional): The name of the file to write, by default
"movielens_10m_tags.json"
Returns:
None
"""
with open(tags_csv, 'rb') as csv_file:
with open(output_directory + "/" + output_file, 'w') as out:
for line in csv_file:
line = line.split('::')
row = deepcopy(TAGS)
row['user_id'] = int(line[0])
row['movie_id'] = int(line[1])
row['tag'] = line[2]
row['timestamp'] = int(line[3])
row_str = json.dumps(row)
out.write(row_str + '\n')
del row
def tags_to_json_1m(tags_csv, output_directory):
""" Same as tags_to_json_10m() with output_file hardcoded to
"movielens_1m_tags.json".
"""
return tags_to_json_10m(tags_csv, output_directory, output_file="movielens_1m_tags.json")
def extract_title_and_year(orig_title, encoding=None):
"""Extract the title and year from the title provided in
movies.csv.
The titles provided in movies.csv is of the form:
Movie Name (Year)
Where the movie name portion may contain multiple parenthetical
pieces. The Year portion, if it exists, is always the last
parenthetical part.
Args:
- orig_title (str): The title string from the data.
- encoding (str): The encoding of the file, often "UTF-8" or "latin-1".
If None, than a normal Python string is used.
Returns:
(return_title, return_year) (str, int): Returns the title of
the movie, and the year extracted from orig_title.
return_year is None if no year was included in
orig_title.
"""
# A few of the titles are surrounded with quotes
stripped_title = orig_title.strip('"')
# We need to find the year, which is contained in the last set of
# parenthesizes.
paren_location = orig_title.rfind('(')
# Found a parenthesis
if paren_location > -1:
if encoding is None:
tmp_title = orig_title[:paren_location].strip()
else:
tmp_title = unicode(orig_title[:paren_location].strip(), encoding=encoding)
tmp_year = orig_title[paren_location:]
# Check that it looks like a year
if len(tmp_year) == 6 \
and (tmp_year[1] == "1" or tmp_year[1] == "2"):
# Date found, parse and return
out_year = int(tmp_year.strip('()'))
return (tmp_title, out_year)
else:
# Looks like it doesn't so the parenthetical is probably
# an alternate title, hence set no date and return the
# original title
return (orig_title, None)
# Not found
else:
return (orig_title, None)
def set_genres(genre_string, row):
"""Set genre values in a movie object given a genre string.
MovieLens passes genres in a string of the form:
"genre1|genre2|...". This function takes that string and sets the
correct fields in the movie object. For example, the string
"Sci-Fi|Children" would yield and object with "genre_scifi" set
to True, and "genre_childrens" set to True.
In some cases MovieLens does not indicate any genre, and instead
sets the string to "(no genres listed)". In this case, the
special "genre_none" field is set to True.
Returns:
row (dict): The movie object that was passed in. Returning is
not strictly necessary as the object is passed in by
reference.
"""
# Sometimes the genres are unset, so we return
if genre_string == "(no genres listed)":
row['genre_none'] = True
return row
# Otherwise we need to find genres
genre_map = {
"Action": "genre_action",
"Adventure": "genre_adventure",
"Animation": "genre_animation",
"Children": "genre_childrens",
"Comedy": "genre_comedy",
"Crime": "genre_crime",
"Documentary": "genre_documentary",
"Drama": "genre_drama",
"Fantasy": "genre_fantasy",
"Film-Noir": "genre_filmnoir",
"Horror": "genre_horror",
"Musical": "genre_musical",
"Mystery": "genre_mystery",
"Romance": "genre_romance",
"Sci-Fi": "genre_scifi",
"Thriller": "genre_thriller",
"War": "genre_war",
"Western": "genre_western",
}
for genre, key in genre_map.iteritems():
if genre in genre_string:
row[key] = True
return row
def movies_to_json_20m(movies_csv, links_csv, output_directory):
"""Convert the movies.csv and links.csv files to a file
containing a collection of JSON objects for the 20M dataset.
Args:
- movies_csv (str): The movies file.
- links_csv (str): The links file.
- output_directory (str): The directory to write the output
file to.
Returns:
None
"""
# Cache the link results
link_cache = {}
with open(links_csv, 'rb') as csv_file:
reader = csv.reader(csv_file)
next(reader, None) # Skip the header
for line in reader:
movie_id = int(line[0])
# Sometimes these extra IDs aren't set, so if we can't convert them
# it is because they are empty
try:
imdb_id = int(line[1])
except ValueError:
imdb_id = None
try:
tmdb_id = int(line[2])
except ValueError:
tmdb_id = None
link_cache[movie_id] = (imdb_id, tmdb_id)
# Process the movie results and join with the links
with open(movies_csv, 'rb') as csv_file:
with open(output_directory + "/movielens_20m_movies.json", 'w') as out:
reader = csv.reader(csv_file)
next(reader, None) # Skip the header
for line in reader:
row = deepcopy(MOVIES)
row['movie_id'] = int(line[0])
(row['imdb_id'], row['tmdb_id']) = link_cache[row['movie_id']]
(row['title'], row['year']) = extract_title_and_year(line[1])
row = set_genres(line[2], row)
row_str = json.dumps(row)
out.write(row_str + '\n')
del row
def movies_to_json_10m(movies_csv, output_directory, output_file="movielens_10m_movies.json", encoding=None):
"""Convert the movies.csv files to a file
containing a collection of JSON objects for the 10M dataset.
Args:
- movies_csv (str): The movies file.
- output_directory (str): The directory to write the output
file to.
- output_file (str,optional): The name of the file to write, by default
"movielens_10m_movies.json"
- encoding (str,optional): The encoding of the file, often "UTF-8" or "latin-1".
If None, than a normal Python string is used.
Returns:
None
"""
# Process the movie results and join with the links
with open(movies_csv, 'rb') as csv_file:
with open(output_directory + "/" + output_file, 'w') as out:
for line in csv_file:
line = line.split('::')
row = deepcopy(MOVIES)
row['movie_id'] = int(line[0])
(row['title'], row['year']) = extract_title_and_year(line[1], encoding=encoding)
row = set_genres(line[2], row)
row_str = json.dumps(row)
out.write(row_str + '\n')
del row
def movies_to_json_1m(movies_csv, output_directory):
""" Same as movies_to_json_10m() with output_file hardcoded to
"movielens_1m_movies.json" and encoding hardcoded to "latin-1".
"""
return movies_to_json_10m(movies_csv, output_directory, output_file="movielens_1m_movies.json", encoding="latin-1")
| 36.668122 | 119 | 0.595213 |
acf1bf19c7fc97c0a55671528f4b139c7bc74577 | 4,298 | py | Python | fairseq/models/squad.py | lynli/fairseq | 70b02caf633ef9041b033941bd90306c36cdc5b7 | [
"BSD-3-Clause"
] | null | null | null | fairseq/models/squad.py | lynli/fairseq | 70b02caf633ef9041b033941bd90306c36cdc5b7 | [
"BSD-3-Clause"
] | null | null | null | fairseq/models/squad.py | lynli/fairseq | 70b02caf633ef9041b033941bd90306c36cdc5b7 | [
"BSD-3-Clause"
] | null | null | null | import torch
import torch.nn as nn
from fairseq.tasks.language_modeling import LanguageModelingTask
from fairseq.modules import (
ElmoTokenEmbedder, MultiheadAttention,
CharacterTokenEmbedder)
from . import (
BaseFairseqModel, register_model, register_model_architecture,
)
from fairseq import options
from fairseq import utils
@register_model('finetuning_squad')
class FinetuningSquad(BaseFairseqModel):
def __init__(self, args, language_model, eos_idx, pad_idx, unk_idx):
super().__init__()
self.language_model = language_model
self.eos_idx = eos_idx
self.pad_idx = pad_idx
self.unk_idx = unk_idx
self.last_dropout = nn.Dropout(args.last_dropout)
self.ln = nn.LayerNorm(args.model_dim, elementwise_affine=False) if args.layer_norm else None
self.start_proj = torch.nn.Linear(args.model_dim, 1, bias=False)
self.end_proj = torch.nn.Linear(args.model_dim, 1, bias=False)
if isinstance(self.language_model.decoder.embed_tokens, CharacterTokenEmbedder):
print('disabling training char convolutions')
self.language_model.decoder.embed_tokens.disable_convolutional_grads()
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.constant_(self.start_proj.weight, 0)
# torch.nn.init.constant_(self.start_proj.bias, 0)
torch.nn.init.constant_(self.end_proj.weight, 0)
# torch.nn.init.constant_(self.end_proj.bias, 0)
def forward(self, text, paragraph_mask):
x, _ = self.language_model(text)
if isinstance(x, list):
x = x[0]
if self.ln is not None:
x = self.ln(x)
x = self.last_dropout(x)
assert paragraph_mask.any()
paragraph_toks = x[paragraph_mask]
start = x.new_full(paragraph_mask.shape, float('-inf'))
end = start.clone()
start[paragraph_mask] = self.start_proj(paragraph_toks).squeeze(-1)
end[paragraph_mask] = self.end_proj(paragraph_toks).squeeze(-1)
return start, end
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--lm-path', metavar='PATH', help='path to elmo model')
parser.add_argument('--model-dim', type=int, metavar='N', help='decoder input dimension')
parser.add_argument('--last-dropout', type=float, metavar='D', help='dropout before projection')
parser.add_argument('--model-dropout', type=float, metavar='D', help='lm dropout')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='lm dropout')
parser.add_argument('--relu-dropout', type=float, metavar='D', help='lm dropout')
parser.add_argument('--proj-unk', action='store_true', help='if true, also includes unk emb in projection')
parser.add_argument('--layer-norm', action='store_true', help='if true, does non affine layer norm before proj')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
dictionary = task.dictionary
assert args.lm_path is not None
task = LanguageModelingTask(args, dictionary, dictionary)
models, _ = utils.load_ensemble_for_inference([args.lm_path], task, {
'remove_head': True,
'dropout': args.model_dropout,
'attention_dropout': args.attention_dropout,
'relu_dropout': args.relu_dropout,
})
assert len(models) == 1, 'ensembles are currently not supported for elmo embeddings'
return FinetuningSquad(args, models[0], dictionary.eos(), dictionary.pad(), dictionary.unk())
@register_model_architecture('finetuning_squad', 'finetuning_squad')
def base_architecture(args):
args.model_dim = getattr(args, 'model_dim', 1024)
args.last_dropout = getattr(args, 'last_dropout', 0.1)
args.model_dropout = getattr(args, 'model_dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.relu_dropout = getattr(args, 'relu_dropout', 0.05)
args.layer_norm = getattr(args, 'layer_norm', False)
args.proj_unk = getattr(args, 'proj_unk', False)
| 39.431193 | 120 | 0.68148 |
acf1bf473625f08201fbcddadcae88408642bdb8 | 3,838 | py | Python | digitick_client/models/department.py | frague59/digitick-client | b8787438cddc60720c60c8b23826185a7d0988d5 | [
"MIT"
] | null | null | null | digitick_client/models/department.py | frague59/digitick-client | b8787438cddc60720c60c8b23826185a7d0988d5 | [
"MIT"
] | null | null | null | digitick_client/models/department.py | frague59/digitick-client | b8787438cddc60720c60c8b23826185a7d0988d5 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Digitick REST API
The Digitick REST API is a set of methods giving access to catalog, user and cart management.
OpenAPI spec version: v1.0
Contact: contact@digitick.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Department(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, name=None, code=None):
"""
Department - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'name': 'str',
'code': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'code': 'code'
}
self._id = id
self._name = name
self._code = code
@property
def id(self):
"""
Gets the id of this Department.
:return: The id of this Department.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Department.
:param id: The id of this Department.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this Department.
:return: The name of this Department.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Department.
:param name: The name of this Department.
:type: str
"""
self._name = name
@property
def code(self):
"""
Gets the code of this Department.
:return: The code of this Department.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""
Sets the code of this Department.
:param code: The code of this Department.
:type: str
"""
self._code = code
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Department):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 23.402439 | 97 | 0.507035 |
acf1bf9ded443d04e1267020308e45345f0348bb | 138 | py | Python | tools/datasets/__init__.py | SuzukiDaichi-git/ai_edge_contest | 9fc62503dc755da33d7ffa6f85862964dba1c8d5 | [
"MIT"
] | null | null | null | tools/datasets/__init__.py | SuzukiDaichi-git/ai_edge_contest | 9fc62503dc755da33d7ffa6f85862964dba1c8d5 | [
"MIT"
] | null | null | null | tools/datasets/__init__.py | SuzukiDaichi-git/ai_edge_contest | 9fc62503dc755da33d7ffa6f85862964dba1c8d5 | [
"MIT"
] | null | null | null | from .signate import Signate
from .cityscapes import Cityscapes
from .camvid import CamVid
__all__ = ['Signate', 'Cityscapes', 'CamVid']
| 23 | 45 | 0.768116 |
acf1bfb49b1986508d0dccb07c78434d9b38b3be | 14,070 | py | Python | Code/Python/Kamaelia/Kamaelia/Visualisation/PhysicsGraph3D/TopologyViewer3DWithParams.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Code/Python/Kamaelia/Kamaelia/Visualisation/PhysicsGraph3D/TopologyViewer3DWithParams.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Code/Python/Kamaelia/Kamaelia/Visualisation/PhysicsGraph3D/TopologyViewer3DWithParams.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
===========================================================
Generic 3D Topology Viewer With more Parameters supports
===========================================================
Extend TopologyViewer3D by supporting additional parameters of "ADD" and "UPDATE" commands.
Example Usage
-------------
A simple console driven topology viewer::
Pipeline( ConsoleReader(),
lines_to_tokenlists(),
TopologyViewer3DWithParams(),
).run()
Then at runtime try typing these commands to change the topology in real time::
>>> DEL ALL
>>> ADD NODE 1 "1st node" (0,0,-10) teapot
>>> ADD NODE 2 "2nd node" randompos sphere image=../../../Docs/cat.gif
>>> ADD NODE 3 "3rd node" randompos - bgcolour=(255,255,0);bgcolour=(0,255,255)
>>> UPDATE NODE 1 name=1st;bgcolour=(0,255,0)
>>> UPDATE NODE 3 name=3rd;bgcolour=(255,0,0);fgcolour=(0,0,255);fontsize=100
>>> ADD NODE 1:1 "1st child node of the 1st node" " ( 0 , 0 , -10 ) " -
>>> ADD NODE 1:2 "2nd child node of the 1st node" randompos - "fontsize = 20"
>>> ADD LINK 1 2
>>> ADD LINK 3 2
>>> DEL LINK 1 2
>>> ADD LINK 1:1 1:2
>>> DEL NODE 1
How does it work?
-----------------
Extend TopologyViewer3D by supporting additional parameters of "ADD" and "UPDATE" commands.
The format of "ADD" commands:
[ "ADD", "NODE", <id>, <name>, <positionSpec>, <particle type>, <parameters> ]
The format of "UPDATE" commands:
[ "UPDATE", "NODE", <id>, <parameters> ]
The format of parameters: pa=pa_value;pb=pb_value
Add quotation if there are spaces within parameters.
Available parameters:
- bgcolour -- Colour of surfaces behind text label (default=(230,230,230)), only apply to label texture
- fgcolour -- Colour of the text label (default=(0,0,0), only apply to label texture
- sidecolour -- Colour of side planes (default=(200,200,244)), only apply to CuboidParticle3D
- bgcolourselected -- Background colour when the particle is selected (default=(0,0,0)
- bgcolourselected -- Frontground colour when the particle is selected (default=(244,244,244))
- sidecolourselected -- Side colour when the particle is selected (default=(0,0,100))
- margin -- Margin size in pixels (default=8)
- fontsize -- Font size for label text (default=50)
- pixelscaling -- Factor to convert pixels to units in 3d, ignored if size is specified (default=100)
- thickness -- Thickness of button widget, ignored if size is specified (default=0.3)
- image -- The uri of image, image texture instead of label texture is used if specified
See Kamaelia.PhysicsGraph3D.TopologyViewer3D.TopologyViewer3D for more information.
"""
import re
def paramStr2paramDict(string):
"""Transform a parameters string to a parameters dictionary."""
colourRegex = re.compile("^\( *(\d{1,3}) *, *(\d{1,3}) *, *(\d{1,3}) *\)$")
decimalRegex = re.compile('^\d*\.?\d*$')
dictionary = {}
string = string.strip().strip(';')
string_list = string.split(';')
for item in string_list:
result = item.split('=')
param = result[0].strip()
value = result[1].strip()
mColour = colourRegex.match(value)
if mColour: # If colour triple tuple
value = list(map(int, mColour.groups()))
else:
mDecimal = decimalRegex.match(value)
if mDecimal: # If Decimal
if '.' in value:
value = float(value)
else:
value = int(value)
dictionary.update({param : value})
return dictionary
from TopologyViewer3D import TopologyViewer3D
class TopologyViewer3DWithParams(TopologyViewer3D):
"""\
TopologyViewer3DWithParams(...) -> new TopologyViewer3DWithParams component.
A component that takes incoming topology (change) data and displays it live
using pygame OpenGL. A simple physics model assists with visual layout. Particle
types, appearance and physics interactions can be customised.
It extends TopologyViewer3D by supporting additional parameters of "ADD" commands.
Keyword arguments (in order):
- screensize -- (width,height) of the display area (default = (800,600))
- fullscreen -- True to start up in fullscreen mode (default = False)
- caption -- Caption for the pygame window (default = "Topology Viewer")
- particleTypes -- dict("type" -> klass) mapping types of particle to classes used to render them (default = {"-":RenderingParticle})
- initialTopology -- (nodes,bonds) where bonds=list((src,dst)) starting state for the topology (default=([],[]))
- laws -- Physics laws to apply between particles (default = SimpleLaws(bondlength=100))
- simCyclesPerRedraw -- number of physics sim cycles to run between each redraw (default=1)
- border -- Minimum distance from edge of display area that new particles appear (default=100)
"""
def __init__(self, **argd):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(TopologyViewer3DWithParams, self).__init__(**argd)
def updateParticle(self, node_id, **params):
"""\
updateParticle(node_id, \*\*params) -> updates the given node's parameters/attributes if it exists
- node_id -- an id for an already existing node
- params -- the updated parameters/attributes dictionary of the particle, e.g. name, texture, colour and size
"""
for p in self.physics.particles:
if p.ID == node_id:
p.updateAttrs(**params)
p.needRedraw = True
return
def doCommand(self, msg):
"""\
Proceses a topology command tuple:
[ "ADD", "NODE", <id>, <name>, <positionSpec>, <particle type> ]
[ "DEL", "NODE", <id> ]
[ "ADD", "LINK", <id from>, <id to> ]
[ "DEL", "LINK", <id from>, <id to> ]
[ "DEL", "ALL" ]
[ "GET", "ALL" ]
"""
#print ('doCommand')
if len(msg) >= 2:
cmd = msg[0].upper(), msg[1].upper()
# Add default arguments when they are not provided
if cmd == ("ADD", "NODE"):
if len(msg) == 4:
msg += ['randompos', '-']
elif len(msg) == 5:
msg += ['-']
if cmd == ("ADD", "NODE") and (len(msg) == 6 or len(msg) == 7):
if len(msg) == 7 and msg[6].strip() != "":
params = paramStr2paramDict(msg[6])
else:
params = {}
if msg[2] in [p.ID for p in self.physics.particles]:
print ("Node exists, please use a new node ID!")
else:
if ( msg[5] in self.particleTypes ):
#print ('ADD NODE begin')
ptype = self.particleTypes[msg[5]]
ident = msg[2]
name = msg[3]
posSpec = msg[4]
pos = self._generatePos(posSpec)
particle = ptype(position = pos, ID=ident, name=name, **params)
particle.originaltype = msg[5]
#self.particles.append(particle)
#print (self.particles[0])
self.addParticle(particle)
self.isNewNode = True
#print (id(particle))
#print ('ADD NODE end')
elif cmd == ("DEL", "NODE") and len(msg) == 3:
ident = msg[2]
self.removeParticle(ident)
elif cmd == ("ADD", "LINK") and len(msg) == 4:
src = msg[2]
dst = msg[3]
self.makeBond(src, dst)
elif cmd == ("DEL", "LINK") and len(msg) == 4:
src = msg[2]
dst = msg[3]
self.breakBond(src, dst)
elif cmd == ("DEL", "ALL") and len(msg) == 2:
self.removeParticle(*self.physics.particleDict.keys())
self.currentLevel = 0
self.currentParentParticleID = ''
elif cmd == ("GET", "ALL") and len(msg) == 2:
topology = [("DEL","ALL")]
topology.extend(self.getTopology())
self.send( ("TOPOLOGY", topology), "outbox" )
elif cmd == ("UPDATE_NAME", "NODE") and len(msg) == 4:
node_id = msg[2]
new_name = msg[3]
self.updateParticleLabel(node_id, new_name)
self.send( ("UPDATE_NAME", "NODE", node_id, new_name), "outbox" )
elif cmd == ("GET_NAME", "NODE") and len(msg) == 3:
node_id = msg[2]
name = self.getParticleLabel(node_id)
self.send( ("GET_NAME", "NODE", node_id, name), "outbox" )
elif cmd == ("UPDATE", "NODE") and len(msg) == 4:
node_id = msg[2]
params = paramStr2paramDict(msg[3])
self.updateParticle(node_id, **params)
self.send( ("UPDATE", "NODE", node_id, msg[3]), "outbox" )
else:
print ("Command Error: please check your command format!")
else:
print ("Command Error: not enough parameters!")
__kamaelia_components__ = ( TopologyViewer3DWithParams, )
if __name__ == "__main__":
from Kamaelia.Util.DataSource import DataSource
from Kamaelia.Visualisation.PhysicsGraph.lines_to_tokenlists import lines_to_tokenlists
from Kamaelia.Util.Console import ConsoleEchoer,ConsoleReader
from Kamaelia.Chassis.Graphline import Graphline
# Data can be from both DataSource and console inputs
print ("Please type the command you want to draw")
Graphline(
CONSOLEREADER = ConsoleReader(">>> "),
# DATASOURCE = DataSource(['ADD NODE 1Node 1Node randompos -', 'ADD NODE 2Node 2Node randompos -',
# 'ADD NODE 3Node 3Node randompos -', 'ADD NODE 4Node 4Node randompos -',
# 'ADD LINK 1Node 2Node','ADD LINK 2Node 3Node', 'ADD LINK 3Node 4Node',
# 'ADD LINK 4Node 1Node']),
DATASOURCE = DataSource(['ADD NODE 1Node 1Node randompos teapot image=../../../Docs/cat.gif',
'ADD NODE 2Node 2Node randompos - image=../../../Docs/cat.gif',
'ADD NODE 3Node 3Node randompos sphere image=../../../Docs/cat.gif',
'ADD NODE 4Node 4Node randompos - image=http://kamaelia.sourceforge.net/Kamaelia.gif',
'ADD NODE 5Node 5Node randompos sphere image=http://edit.kamaelia.org/Kamaelia.gif',
'ADD NODE 6Node 6Node randompos -',
'ADD NODE 7Node 7Node randompos sphere',
'ADD LINK 1Node 2Node',
'ADD LINK 1Node 3Node', 'ADD LINK 1Node 4Node',
'ADD LINK 1Node 5Node','ADD LINK 1Node 6Node', 'ADD LINK 1Node 7Node',
'ADD NODE 1Node:1Node 1Node:1Node randompos - image=../../../Docs/cat.gif',
'ADD NODE 1Node:2Node 1Node:2Node randompos -',
'ADD NODE 1Node:3Node 1Node:3Node randompos -',
'ADD NODE 1Node:4Node 1Node:4Node randompos -',
'ADD LINK 1Node:1Node 1Node:2Node', 'ADD LINK 1Node:2Node 1Node:3Node',
'ADD LINK 1Node:3Node 1Node:4Node', 'ADD LINK 1Node:4Node 1Node:1Node',
'ADD NODE 1Node:1Node:1Node 1Node:1Node:1Node randompos - image=../../../Docs/cat.gif',
'ADD NODE 1Node:1Node:2Node 1Node:1Node:2Node randompos -',
'ADD LINK 1Node:1Node:1Node 1Node:1Node:2Node',
'ADD NODE 5Node:1Node 5Node:1Node randompos sphere image=../../../Docs/cat.gif',
'ADD NODE 5Node:2Node 5Node:2Node randompos sphere',
'ADD LINK 5Node:1Node 5Node:2Node'
]),
TOKENS = lines_to_tokenlists(),
VIEWER = TopologyViewer3DWithParams(),
CONSOLEECHOER = ConsoleEchoer(),
linkages = {
("CONSOLEREADER","outbox") : ("TOKENS","inbox"),
("DATASOURCE","outbox") : ("TOKENS","inbox"),
("TOKENS","outbox") : ("VIEWER","inbox"),
("VIEWER","outbox") : ("CONSOLEECHOER","inbox"),
}
).run() | 45.681818 | 143 | 0.54037 |
acf1c0ecc9987d0fb7d8178920f8e6222aab79f2 | 3,528 | py | Python | app.py | imlonghao/Ether-Faucet | be449fae2c3ae1c8bb470e5766e9ae88c31e1777 | [
"MIT"
] | 1 | 2017-10-30T01:35:33.000Z | 2017-10-30T01:35:33.000Z | app.py | imlonghao/Ether-Faucet | be449fae2c3ae1c8bb470e5766e9ae88c31e1777 | [
"MIT"
] | null | null | null | app.py | imlonghao/Ether-Faucet | be449fae2c3ae1c8bb470e5766e9ae88c31e1777 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import rlp
import redis
import binascii
import requests
import tornado.ioloop
import tornado.web
import tornado.gen
import tornado.options
from os import environ
from ethereum.transactions import Transaction
tornado.options.define('port', default=9999, type=int)
def get_price():
result = requests.post('https://ropsten.infura.io/mew', json={
'method': 'eth_gasPrice',
'id': 1,
'jsonrpc': '2.0'
}, timeout=5).json()['result']
return int(result, 16)
def get_tx_count():
result = requests.post('https://ropsten.infura.io/mew', json={
'method': 'eth_getTransactionCount',
'params': [
environ['ADDRESS'],
'latest'
],
'id': 1,
'jsonrpc': '2.0'
}, timeout=5).json()['result']
return int(result, 16)
def send_tx(data):
requests.post('https://ropsten.infura.io/mew', json={
'method': 'eth_sendRawTransaction',
'params': [
data
],
'id': 1,
'jsonrpc': '2.0'
}, timeout=5).json()
class BaseHandler(tornado.web.RequestHandler):
@property
def redis(self):
return self.application.redis
class IndexHandler(BaseHandler):
@tornado.gen.coroutine
def get(self):
return self.write({
'paths': {
'/': {
'get': {
'responses': {
'200': {
'description': 'Return this page'
}
}
}
},
'/{Address}': {
'get': {
'parameters': [
{
'name': 'Address',
'in': 'path',
'type': 'address',
'description': 'Address to claim the faucet',
'required': True
}
],
'responses': {
'200': {
'description': 'Success to claim the faucet'
},
'429': {
'description': 'Rate limit reached'
}
}
}
}
}
})
class AddressHandler(BaseHandler):
@tornado.gen.coroutine
def get(self, address):
ip = self.request.remote_ip
if self.redis.get(ip) is not None or self.redis.get(address) is not None:
return self.send_error(429)
tx = Transaction(get_tx_count(), get_price(), 21000, address, 2 * 1000000000000000000, '')
tx.sign(environ['PRIVATE_KEY'], 3)
data = '0x%s' % binascii.hexlify(rlp.encode(tx)).decode()
send_tx(data)
self.redis.set(ip, 1, 60 * 60 * 24)
self.redis.set(address, 1, 60 * 60 * 24)
return self.write({'success': True})
if __name__ == "__main__":
tornado.options.parse_command_line()
ioloop = tornado.ioloop.IOLoop.instance()
application = tornado.web.Application([
(r'/', IndexHandler),
(r'/(0x[0-9a-fA-F]{40})', AddressHandler),
])
application.redis = redis.Redis(password=environ['REDIS_PASSWORD'])
application.listen(tornado.options.options.port, '127.0.0.1', xheaders=True)
ioloop.start()
| 29.4 | 98 | 0.472789 |
acf1c1d9ccb30bc5bb098c1b442822b84b74ff18 | 531 | py | Python | zvt/domain/quotes/stock/stock_4h_kdata.py | alucardxh/zvt | 02a2c64828146f4e15e702150f26a5ab6647a91c | [
"MIT"
] | 1 | 2021-09-08T17:00:23.000Z | 2021-09-08T17:00:23.000Z | zvt/domain/quotes/stock/stock_4h_kdata.py | alucardxh/zvt | 02a2c64828146f4e15e702150f26a5ab6647a91c | [
"MIT"
] | 1 | 2021-12-02T11:21:52.000Z | 2021-12-02T11:21:52.000Z | zvt/domain/quotes/stock/stock_4h_kdata.py | alucardxh/zvt | 02a2c64828146f4e15e702150f26a5ab6647a91c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.orm import declarative_base
from zvt.contract.register import register_schema
from zvt.domain.quotes import StockKdataCommon
KdataBase = declarative_base()
class Stock4hKdata(KdataBase, StockKdataCommon):
__tablename__ = 'stock_4h_kdata'
register_schema(providers=['joinquant', 'em'], db_name='stock_4h_kdata', schema_base=KdataBase, entity_type='stock')
# the __all__ is generated
__all__ = ['Stock4hKdata'] | 29.5 | 116 | 0.787194 |
acf1c28c596822afbae7358577a325cbedea0de7 | 2,835 | py | Python | DL/CNN-Numpy/layers/bn.py | ForrestPi/DL_module | 1ddd041ac742b670217fab0098b3939ff252ee26 | [
"MIT"
] | 1 | 2019-11-14T10:34:39.000Z | 2019-11-14T10:34:39.000Z | DL/CNN-Numpy/layers/bn.py | ForrestPi/DL_module | 1ddd041ac742b670217fab0098b3939ff252ee26 | [
"MIT"
] | null | null | null | DL/CNN-Numpy/layers/bn.py | ForrestPi/DL_module | 1ddd041ac742b670217fab0098b3939ff252ee26 | [
"MIT"
] | null | null | null | import numpy as np
import math
class BatchNorm(object):
def __init__(self, shape):
self.output_shape = shape
self.batch_size = shape[0]
self.input_data = np.zeros(shape)
self.alpha = np.ones(shape[-1])
self.beta = np.zeros(shape[-1])
self.a_gradient = np.zeros(shape[-1])
self.b_gradient = np.zeros(shape[-1])
self.moving_mean = np.zeros(shape[-1])
self.moving_var = np.zeros(shape[-1])
self.epsilon = 0.00001
self.moving_decay = 0.997
def forward(self, x, phase='train'):
self.input_data = x
self.mean = np.mean(x, axis=(0, 1, 2))
self.var = self.batch_size / (self.batch_size - 1) * np.var(x,
axis=(0, 1, 2)) if self.batch_size > 1 else np.var(
x, axis=(0, 1, 2))
# initialize shadow_variable with mean
if np.sum(self.moving_mean) == 0 and np.sum(self.moving_var) == 0:
self.moving_mean = self.mean
self.moving_var = self.var
# update shadow_variable with mean, var, moving_decay
else:
self.moving_mean = self.moving_decay * self.moving_mean + (1 - self.moving_decay)*self.mean
self.moving_var = self.moving_decay * self.moving_var + (1 - self.moving_decay)*self.var
if phase == 'train':
self.normed_x = (x - self.mean)/np.sqrt(self.var+self.epsilon)
if phase == 'test':
self.normed_x = (x - self.moving_mean)/np.sqrt(self.moving_var+self.epsilon)
return self.normed_x*self.alpha+self.beta
def gradient(self, eta):
self.a_gradient = np.sum(eta * self.normed_x, axis=(0, 1, 2))
self.b_gradient = np.sum(eta * self.normed_x, axis=(0, 1, 2))
normed_x_gradient = eta * self.alpha
var_gradient = np.sum(-1.0/2*normed_x_gradient*(self.input_data - self.mean)/(self.var+self.epsilon)**(3.0/2), axis=(0,1,2))
mean_gradinet = np.sum(-1/np.sqrt(self.var+self.epsilon)*normed_x_gradient, axis=(0,1,2))
x_gradient = normed_x_gradient*np.sqrt(self.var+self.epsilon)+2*(self.input_data-self.mean)*var_gradient/self.batch_size+mean_gradinet/self.batch_size
return x_gradient
def backward(self, alpha=0.0001):
self.alpha -= alpha * self.a_gradient
self.beta -= alpha * self.b_gradient
if __name__ == "__main__":
shape = [12,3]
bn = BatchNorm(shape)
print 'batch:', bn.batch_size
a = np.random.random(shape)
epsilon = 1e-4
out1 = bn.forward(a+epsilon)
out2 = bn.forward(a-epsilon)
# print bn.mean
# print bn.var
# print bn.moving_mean
# print bn.moving_var
# print bn.mean.shape
print out1
# print (out1-out2)/(2*epsilon)
print bn.gradient(np.ones(shape)) | 35.886076 | 158 | 0.602116 |
acf1c30020a719c920148b7dee36a48379d7ed11 | 6,410 | py | Python | assignment2/cpeaks.py | jeffrobots/gatech_ml | 2f98e56da1cc6b0b846d34111a37c0735abaa5a3 | [
"MIT"
] | null | null | null | assignment2/cpeaks.py | jeffrobots/gatech_ml | 2f98e56da1cc6b0b846d34111a37c0735abaa5a3 | [
"MIT"
] | null | null | null | assignment2/cpeaks.py | jeffrobots/gatech_ml | 2f98e56da1cc6b0b846d34111a37c0735abaa5a3 | [
"MIT"
] | 3 | 2020-10-01T03:59:24.000Z | 2021-02-26T22:25:41.000Z | import mlrose
import numpy as np
import pickle
import logging
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from sklearn.metrics import accuracy_score
import os
from alg_runner import sim_annealing_runner, rhc_runner, ga_runner, mimic_runner
from plotting import plot_montecarlo_sensitivity
from datetime import datetime
import pandas as pd
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
np.random.seed(1)
def run_cpeaks():
# If the output/Cpeaks directory doesn't exist, create it.
if not os.path.exists('./output/CPeaks/'):
os.mkdir('./output/CPeaks/')
problem_size = 50
peaks_fit = mlrose.ContinuousPeaks(t_pct=.1)
cpeaks_state_gen = lambda: np.random.randint(2, size=problem_size)
init_state = cpeaks_state_gen()
problem = mlrose.DiscreteOpt(length=problem_size, fitness_fn=peaks_fit, maximize=True, max_val=2)
all_results = {}
print("Running simulated annealing montecarlos")
sa_results, sa_timing = sim_annealing_runner(problem, init_state, state_regenerator=cpeaks_state_gen)
plot_montecarlo_sensitivity('CPeaks', 'sim_anneal', sa_results)
plot_montecarlo_sensitivity('CPeaks', 'sim_anneal_timing', sa_timing)
all_results['SA'] = [sa_results, sa_timing]
print("Running random hill montecarlos")
rhc_results, rhc_timing = rhc_runner(problem, init_state, state_regenerator=cpeaks_state_gen)
plot_montecarlo_sensitivity('CPeaks', 'rhc', rhc_results)
plot_montecarlo_sensitivity('CPeaks', 'rhc_timing', sa_timing)
all_results['RHC'] = [rhc_results, rhc_timing]
print("Running genetic algorithm montecarlos")
ga_results, ga_timing = ga_runner(problem, init_state, state_regenerator=cpeaks_state_gen)
plot_montecarlo_sensitivity('CPeaks', 'ga', ga_results)
plot_montecarlo_sensitivity('CPeaks', 'ga_timing', ga_timing)
all_results['GA'] = [ga_results, ga_timing]
print("Running MIMIC montecarlos")
mimic_results, mimic_timing = mimic_runner(problem, init_state, state_regenerator=cpeaks_state_gen)
plot_montecarlo_sensitivity('CPeaks', 'mimic', mimic_results)
plot_montecarlo_sensitivity('CPeaks', 'mimic_timing', mimic_timing)
all_results['MIMIC'] = [mimic_results, mimic_timing]
with open('./output/CPeaks/cpeaks_data.pickle', 'wb') as handle:
pickle.dump(all_results, handle, protocol=pickle.HIGHEST_PROTOCOL)
problem_size_space = np.linspace(10, 100, 20, dtype=int)
best_fit_dict = {}
best_fit_dict['Problem Size'] = problem_size_space
best_fit_dict['Random Hill Climbing'] = []
best_fit_dict['Simulated Annealing'] = []
best_fit_dict['Genetic Algorithm'] = []
best_fit_dict['MIMIC'] = []
times = {}
times['Problem Size'] = problem_size_space
times['Random Hill Climbing'] = []
times['Simulated Annealing'] = []
times['Genetic Algorithm'] = []
times['MIMIC'] = []
for prob_size in problem_size_space:
logger.info("---- Problem size: " + str(prob_size) + " ----")
prob_size_int = int(prob_size)
peaks_fit = mlrose.ContinuousPeaks(t_pct=.2)
problem = mlrose.DiscreteOpt(length=prob_size_int, fitness_fn=peaks_fit, maximize=True, max_val=2)
cpeaks_state_gen = lambda: np.random.randint(2, size=prob_size_int)
init_state = cpeaks_state_gen()
start = datetime.now()
_, best_fitness_sa, fit_array_sa = mlrose.simulated_annealing(problem,
schedule=mlrose.ExpDecay(exp_const=.001, init_temp=5,
min_temp=.01),
max_attempts=50,
max_iters=20000, init_state=init_state, track_fits=True)
best_fit_dict['Simulated Annealing'].append(best_fitness_sa)
end = datetime.now()
times['Simulated Annealing'].append((end-start).total_seconds())
start = datetime.now()
_, best_fitness_rhc, fit_array_rhc = mlrose.random_hill_climb(problem, max_attempts=100, max_iters=3000,
restarts=50, track_fits=True)
best_fit_dict['Random Hill Climbing'].append(best_fitness_rhc)
end = datetime.now()
times['Random Hill Climbing'].append((end-start).total_seconds())
start = datetime.now()
_, best_fitness_ga, fit_array_ga = mlrose.genetic_alg(problem, pop_size=prob_size_int*10,
mutation_prob=.025, max_attempts=30, track_fits=True, max_iters=1000)
best_fit_dict['Genetic Algorithm'].append(best_fitness_ga)
end = datetime.now()
times['Genetic Algorithm'].append((end-start).total_seconds())
start = datetime.now()
_, best_fitness_mimic, fit_array_mimic = mlrose.mimic(problem, pop_size=prob_size_int*10,
keep_pct=.1, max_attempts=30, track_fits=True, max_iters=2000)
best_fit_dict['MIMIC'].append(best_fitness_mimic)
end = datetime.now()
times['MIMIC'].append((end-start).total_seconds())
fits_per_iteration = {}
fits_per_iteration['Random Hill Climbing'] = fit_array_rhc
fits_per_iteration['Simulated Annealing'] = fit_array_sa
fits_per_iteration['Genetic Algorithm'] = fit_array_ga
fits_per_iteration['MIMIC'] = fit_array_mimic
fit_frame = pd.DataFrame.from_dict(best_fit_dict, orient='index').transpose()
# fit_frame.pop('Unnamed: 0') # idk why this shows up.
time_frame = pd.DataFrame.from_dict(times, orient='index').transpose()
# time_frame.pop('Unnamed: 0') # idk why this shows up.
fit_iteration_frame = pd.DataFrame.from_dict(fits_per_iteration, orient='index').transpose()
fit_frame.to_csv('./output/CPeaks/problem_size_fit.csv')
time_frame.to_csv('./output/CPeaks/problem_size_time.csv')
fit_iteration_frame.to_csv('./output/CPeaks/fit_per_iteration.csv')
if __name__ == "__main__":
run_cpeaks()
# Run fitness at each iteration for a large sample size.
| 40.56962 | 129 | 0.673947 |
acf1c37f6fcb6bec8074656f903c5593b01eab4a | 6,050 | py | Python | discovery-infra/create_triage_tickets.py | cwilkers/assisted-test-infra | 0e72f773d52d7adb6f3cdc431201416b52789a93 | [
"Apache-2.0"
] | null | null | null | discovery-infra/create_triage_tickets.py | cwilkers/assisted-test-infra | 0e72f773d52d7adb6f3cdc431201416b52789a93 | [
"Apache-2.0"
] | 114 | 2021-01-10T11:38:38.000Z | 2022-03-28T02:04:34.000Z | discovery-infra/create_triage_tickets.py | oshercc/assisted-test-infra | d6ab6df315e8b12065ad6c0147889a1d8af6641b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# This script gets a list of the filed clusters from the assisted-logs-server
# For each cluster, which does not already has a triaging Jira ticket, it creates one
#
import argparse
import logging
import netrc
import os
import sys
from urllib.parse import urlparse
import requests
import jira
import add_triage_signature
DEFAULT_WATCHERS = ["ronniela", "romfreiman", "ealster", "sarahlav"]
LOGS_COLLECTOR = "http://assisted-logs-collector.usersys.redhat.com"
JIRA_SERVER = "https://issues.redhat.com/"
DEFAULT_NETRC_FILE = "~/.netrc"
JIRA_SUMMARY = "cloud.redhat.com failure: {failure_id}"
JIRA_DESCRIPTION = """
h1. Cluster Info
*Cluster ID:* [{cluster_id}|https://cloud.redhat.com/openshift/assisted-installer/clusters/{cluster_id}]
*Username:* {username}
*Created_at:* {created_at}
*Installation started at:* {installation_started_at}
*Failed on:* {failed_on}
*status:* {status}
*status_info:* {status_info}
*OpenShift version:* {openshift_version}
*logs:* [{logs_collector}/#/{failure_id}/]
h1. Triage Results
h2. Failure Reason:
h2. Comments:
"""
def get_credentials_from_netrc(server, netrc_file=DEFAULT_NETRC_FILE):
cred = netrc.netrc(os.path.expanduser(netrc_file))
username, _, password = cred.authenticators(server)
return username, password
def get_jira_client(username, password):
logger.info("log-in with username: %s", username)
return jira.JIRA(JIRA_SERVER, basic_auth=(username, password))
def format_description(failure_data):
return JIRA_DESCRIPTION.format(logs_collector=LOGS_COLLECTOR, **failure_data)
def format_summary(failure_data):
return JIRA_SUMMARY.format(**failure_data)
def format_labels(failure_data):
return ["no-qe",
"AI_CLOUD_TRIAGE",
"AI_CLUSTER_{cluster_id}".format(**failure_data),
"AI_USER_{username}".format(**failure_data)]
def get_all_triage_tickets(jclient):
query = 'component = "Assisted-Installer Triage"'
idx = 0
block_size = 100
issues = []
while True:
i = jclient.search_issues(query, maxResults=block_size, startAt=idx, fields=['summary', 'key'])
if len(i) == 0:
break
#for x in i:
# print("{} - {}".format(x.key, x.fields.summary))
issues.extend([x.fields.summary for x in i])
idx += block_size
return set(issues)
def add_watchers(jclient, issue):
for w in DEFAULT_WATCHERS:
jclient.add_watcher(issue.key, w)
def create_jira_ticket(jclient, existing_tickets, failure_data):
summary = format_summary(failure_data)
if summary in existing_tickets:
logger.debug("issue found: %s", summary)
return None
new_issue = jclient.create_issue(project="MGMT",
summary=summary,
components=[{'name': "Assisted-installer Triage"}],
priority={'name': 'Blocker'},
issuetype={'name': 'Bug'},
labels=format_labels(failure_data),
description=format_description(failure_data))
logger.info("issue created: %s", new_issue)
add_watchers(jclient, new_issue)
return new_issue
def main(arg):
if arg.user_password is None:
username, password = get_credentials_from_netrc(urlparse(JIRA_SERVER).hostname, arg.netrc)
else:
try:
[username, password] = arg.user_password.split(":", 1)
except:
logger.error("Failed to parse user:password")
jclient = get_jira_client(username, password)
try:
res = requests.get("{}/files/".format(LOGS_COLLECTOR))
except Exception:
logger.exception("Error getting list of failed clusters")
sys.exit(1)
res.raise_for_status()
failed_clusters = res.json()
existing_tickets = get_all_triage_tickets(jclient)
for failure in failed_clusters:
#print("cluster: {}".format(failure["name"]))
res = requests.get("{}/files/{}/metdata.json".format(LOGS_COLLECTOR, failure['name']))
res.raise_for_status()
cluster = res.json()['cluster']
if cluster['status'] == "error":
cluster_data = {"cluster_id": cluster['id'],
"failure_id": failure['name'],
"openshift_version": cluster['openshift_version'],
"created_at": add_triage_signature.format_time(cluster['created_at']),
"installation_started_at": add_triage_signature.format_time(cluster['install_started_at']),
"failed_on": add_triage_signature.format_time(cluster['status_updated_at']),
"status": cluster['status'],
"status_info": cluster['status_info'],
"username": cluster['user_name']}
new_issue = create_jira_ticket(jclient, existing_tickets, cluster_data)
if new_issue is not None:
logs_url = "{}/files/{}".format(LOGS_COLLECTOR, failure['name'])
add_triage_signature.add_signatures(jclient, logs_url, new_issue.key)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
loginGroup = parser.add_argument_group(title="login options")
loginArgs = loginGroup.add_mutually_exclusive_group()
loginArgs.add_argument("--netrc", default="~/.netrc", required=False, help="netrc file")
loginArgs.add_argument("-up", "--user-password", required=False, help="Username and password in the format of user:pass")
parser.add_argument("-v", "--verbose", action="store_true", help="Output verbose logging")
args = parser.parse_args()
logging.basicConfig(level=logging.WARN, format='%(levelname)-10s %(message)s')
logger = logging.getLogger(__name__)
logging.getLogger("__main__").setLevel(logging.INFO)
if args.verbose:
logging.getLogger("__main__").setLevel(logging.DEBUG)
main(args)
| 35.588235 | 125 | 0.650413 |
acf1c47c5a7cabed7c642260cf6d5c1ea4174d52 | 5,064 | py | Python | tests/integration/states/pip.py | gotcha/salt | 7b84c704777d3d2062911895dc3fdf93d40e9848 | [
"Apache-2.0"
] | 1 | 2015-10-06T22:25:22.000Z | 2015-10-06T22:25:22.000Z | tests/integration/states/pip.py | gotcha/salt | 7b84c704777d3d2062911895dc3fdf93d40e9848 | [
"Apache-2.0"
] | null | null | null | tests/integration/states/pip.py | gotcha/salt | 7b84c704777d3d2062911895dc3fdf93d40e9848 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
tests.integration.states.pip
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2012 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import python libs
import os
import shutil
# Import salt libs
import integration
class PipStateTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
def setUp(self):
super(PipStateTest, self).setUp()
ret = self.run_function('cmd.has_exec', ['virtualenv'])
if not ret:
self.skipTest('virtualenv not installed')
def test_pip_installed_errors(self):
venv_dir = os.path.join(
integration.SYS_TMP_DIR, 'pip-installed-errors'
)
try:
# Since we don't have the virtualenv created, pip.installed will
# thrown and error.
ret = self.run_function('state.sls', mods='pip-installed-errors')
self.assertSaltFalseReturn(ret)
self.assertSaltCommentRegexpMatches(
ret,
'Error installing \'supervisor\': .* '
'[nN]o such file or directory'
)
# We now create the missing virtualenv
ret = self.run_function('virtualenv.create', [venv_dir])
self.assertEqual(ret['retcode'], 0)
# The state should not have any issues running now
ret = self.run_function('state.sls', mods='pip-installed-errors')
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
def test_pip_installed_weird_install(self):
ographite = '/opt/graphite'
if os.path.isdir(ographite):
self.skipTest(
'You already have \'{0}\'. This test would overwrite this '
'directory'.format(ographite)
)
try:
os.makedirs(ographite)
except OSError, err:
if err.errno == 13:
# Permission denied
self.skipTest(
'You don\'t have the required permissions to run this test'
)
finally:
if os.path.isdir(ographite):
shutil.rmtree(ographite)
venv_dir = os.path.join(
integration.SYS_TMP_DIR, 'pip-installed-weird-install'
)
try:
# Since we don't have the virtualenv created, pip.installed will
# thrown and error.
ret = self.run_function(
'state.sls', mods='pip-installed-weird-install'
)
self.assertSaltTrueReturn(ret)
# We cannot use assertInSaltComment here because we need to skip
# some of the state return parts
for key in ret.keys():
self.assertTrue(ret[key]['result'])
if ret[key]['comment'] == 'Created new virtualenv':
continue
self.assertEqual(
ret[key]['comment'],
'There was no error installing package \'carbon\' '
'although it does not show when calling \'pip.freeze\'.'
)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
if os.path.isdir('/opt/graphite'):
shutil.rmtree('/opt/graphite')
def test_issue_2028_pip_installed_state(self):
ret = self.run_function('state.sls', mods='issue-2028-pip-installed')
venv_dir = os.path.join(
integration.SYS_TMP_DIR, 'issue-2028-pip-installed'
)
try:
self.assertSaltTrueReturn(ret)
self.assertTrue(
os.path.isfile(os.path.join(venv_dir, 'bin', 'supervisord'))
)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
def test_issue_2087_missing_pip(self):
venv_dir = os.path.join(
integration.SYS_TMP_DIR, 'issue-2087-missing-pip'
)
try:
# Let's create the testing virtualenv
ret = self.run_function('virtualenv.create', [venv_dir])
self.assertEqual(ret['retcode'], 0)
# Let's remove the pip binary
pip_bin = os.path.join(venv_dir, 'bin', 'pip')
if not os.path.isfile(pip_bin):
self.skipTest(
'Failed to find the pip binary to the test virtualenv'
)
os.remove(pip_bin)
# Let's run the state which should fail because pip is missing
ret = self.run_function('state.sls', mods='issue-2087-missing-pip')
self.assertSaltFalseReturn(ret)
self.assertInSaltComment(
ret,
'Error installing \'pep8\': Could not find a `pip` binary'
)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
| 35.166667 | 79 | 0.552528 |
acf1c4dd60e23462abf109dca07760bde31edb35 | 626 | py | Python | check_binarization.py | unam3/a3-mission-check-helper-core | ad42f1d42d9c885d24517bd8e9d57bd963d09290 | [
"BSD-3-Clause"
] | null | null | null | check_binarization.py | unam3/a3-mission-check-helper-core | ad42f1d42d9c885d24517bd8e9d57bd963d09290 | [
"BSD-3-Clause"
] | null | null | null | check_binarization.py | unam3/a3-mission-check-helper-core | ad42f1d42d9c885d24517bd8e9d57bd963d09290 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python2
# coding: utf-8
from __future__ import unicode_literals
def was_mission_binarized(mission_sqm_path):
reference_line_part = '//DeRap: wog_'
with open(mission_sqm_path, 'r') as f:
for i, line in enumerate(f):
# we interested only in second line with signature
if (i == 1):
#print line
#for n, word in enumerate(line):
#
# print n, word, reference_line_part[n], word == reference_line_part[n]
return line.startswith(reference_line_part)
| 25.04 | 90 | 0.555911 |
acf1c538f16f1a893a49581ed4f7aea4cdd6046c | 7,409 | py | Python | stacks/XIAOMATECH/1.0/services/DATA_ANALYTICS_STUDIO/service_advisor.py | tvorogme/dataops | acfa21df42a20768c004c6630a064f4e38e280b2 | [
"Apache-2.0"
] | 3 | 2019-08-13T01:44:16.000Z | 2019-12-10T04:05:56.000Z | stacks/XIAOMATECH/1.0/services/DATA_ANALYTICS_STUDIO/service_advisor.py | tvorogme/dataops | acfa21df42a20768c004c6630a064f4e38e280b2 | [
"Apache-2.0"
] | null | null | null | stacks/XIAOMATECH/1.0/services/DATA_ANALYTICS_STUDIO/service_advisor.py | tvorogme/dataops | acfa21df42a20768c004c6630a064f4e38e280b2 | [
"Apache-2.0"
] | 7 | 2019-05-29T17:35:25.000Z | 2021-12-04T07:55:10.000Z | #!/usr/bin/env ambari-python-wrap
import imp
import os
import socket
import traceback
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../../')
PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
try:
with open(PARENT_FILE, 'rb') as fp:
service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
except Exception as e:
traceback.print_exc()
def get_from_dict(d, keys, default_value=None):
for k in keys:
if isinstance(d, dict) and k in d:
d = d[k]
else:
return default_value
return d
class DATAANALYTICSSTUDIOServiceAdvisor(service_advisor.ServiceAdvisor):
def __init__(self, *args, **kwargs):
self.as_super = super(DATAANALYTICSSTUDIOServiceAdvisor, self)
self.as_super.__init__(*args, **kwargs)
def getHostForComponent(self, component, hostsList):
if len(hostsList) == 0:
return None
componentName = self.getComponentName(component)
if componentName == "DATA_ANALYTICS_STUDIO_WEBAPP":
self.logger.info("Checking for postgres on host: " + socket.getfqdn());
result = os.system("which psql > /dev/null 2>&1")
if result == 0:
self.logger.info(
"Ambari host ({0}) has postgresql db, looking for other host for DATA_ANALYTICS_STUDIO_WEBAPP"
.format(socket.getfqdn()))
for host in hostsList:
if host != socket.getfqdn():
self.logger.info("DATA_ANALYTICS_STUDIO_WEBAPP was put into " + host)
return host
return super(DATAANALYTICSSTUDIOServiceAdvisor, self).getHostForComponent(component, hostsList)
def colocateService(self, hostsComponentsMap, serviceComponents):
pass
def getServiceComponentLayoutValidations(self, services, hosts):
items = []
componentsListList = [service["components"] for service in services["services"]]
componentsList = [item["StackServiceComponents"] for sublist in componentsListList for item in sublist]
dasPostgresqlServerHost = self.getHosts(componentsList, "DATA_ANALYTICS_STUDIO_WEBAPP")[0]
result = os.system("which psql > /dev/null 2>&1")
if result == 0 and socket.getfqdn() == dasPostgresqlServerHost:
items.append({"type": 'host-component',
"level": 'WARN',
"message": "Data Analytics Studio PostgreSQL Server is put on the same host as Ambari, where it is running it's own PostgreSQL server. The two may collide.",
"component-name": 'DATA_ANALYTICS_STUDIO_WEBAPP',
"host": dasPostgresqlServerHost})
return items
def appendToProperty(self, configurations, services, configType, propertyName, propertyValues, setAllValue):
currentValue = get_from_dict(services, ("configurations", configType, "properties", propertyName),
default_value="")
if currentValue != setAllValue:
putProperty = self.putProperty(configurations, configType, services)
diff = set(propertyValues) - set(currentValue.split(','))
if len(diff) > 0:
propertyValue = ','.join(diff)
newValue = currentValue + ',' + propertyValue if len(currentValue) > 0 else propertyValue
self.logger.info("Setting {0} to {1}".format(propertyName, newValue))
putProperty(propertyName, newValue)
def getServiceConfigurationRecommendations(self, configurations, clusterSummary, services, hosts):
servicesList = set([service['StackServices']['service_name'] for service in services['services']])
self.logger.info("Conf recommendations for DAS")
putDasSecurityProperty = self.putProperty(configurations, 'data_analytics_studio-security-site', services)
das_user = get_from_dict(services, ("configurations", "hive-env", "properties", "hive_user"),
default_value=None)
putDasSecurityProperty('admin_users', das_user)
if 'KNOX' in servicesList:
self.logger.info("configuring knox ...")
knox_port = '8443'
knox_hosts = self.getComponentHostNames(services, "KNOX", "KNOX_GATEWAY")
if len(knox_hosts) > 0:
knox_hosts.sort()
knox_host = knox_hosts[0]
knox_port = get_from_dict(services, ("configurations", "gateway-site", "properties", "gateway.port"),
default_value=knox_port)
putDasSecurityProperty('knox_sso_url',
'https://{0}:{1}/gateway/knoxsso/api/v1/websso'.format(knox_host, knox_port))
self.logger.info("knox host: {0}, knox port: {1}".format(knox_host, knox_port))
if 'HDFS' in servicesList and 'core-site' in services['configurations']:
self.logger.info("setting up proxy hosts")
das_hosts = self.getComponentHostNames(services, 'DATA_ANALYTICS_STUDIO', 'DATA_ANALYTICS_STUDIO_WEBAPP')
das_hosts = das_hosts + self.getComponentHostNames(services, 'DATA_ANALYTICS_STUDIO',
'DATA_ANALYTICS_STUDIO_EVENT_PROCESSOR')
propertyName = 'hadoop.proxyuser.{0}.hosts'.format(das_user)
self.appendToProperty(configurations, services, 'core-site', propertyName, das_hosts, '*')
def getServiceConfigurationRecommendationsForSSO(self, configurations, clusterData, services, hosts):
self.logger.info("setting up conf for sso")
ambari_configuration = self.get_ambari_configuration(services)
ambari_sso_details = ambari_configuration.get_ambari_sso_details() if ambari_configuration else None
if ambari_sso_details and ambari_sso_details.is_managing_services():
putProperty = self.putProperty(configurations, "data_analytics_studio-security-site", services)
if ambari_sso_details.should_enable_sso('DATA_ANALYTICS_STUDIO'):
self.logger.info("enabling sso for das")
putProperty('knox_sso_enabled', 'true')
putProperty('knox_sso_url', ambari_sso_details.get_sso_provider_url())
putProperty('knox_publickey', ambari_sso_details.get_sso_provider_certificate(False, True))
elif ambari_sso_details.should_disable_sso('DATA_ANALYTICS_STUDIO'):
self.logger.info("disabling sso for das")
putProperty('knox_sso_enabled', 'false')
class HDP30DATAANALYTICSSTUDIOServiceAdvisor(DATAANALYTICSSTUDIOServiceAdvisor):
def __init__(self, *args, **kwargs):
self.as_super = super(HDP30DATAANALYTICSSTUDIOServiceAdvisor, self)
self.as_super.__init__(*args, **kwargs)
self.initialize_logger("HDP30DATAANALYTICSSTUDIOServiceAdvisor")
class HDP26DATAANALYTICSSTUDIOServiceAdvisor(DATAANALYTICSSTUDIOServiceAdvisor):
def __init__(self, *args, **kwargs):
self.as_super = super(HDP26DATAANALYTICSSTUDIOServiceAdvisor, self)
self.as_super.__init__(*args, **kwargs)
from resource_management.core.logger import Logger
self.logger = Logger
| 52.546099 | 183 | 0.653934 |
acf1c53b38ac74282a2a1504a59193b059e86c35 | 5,213 | gyp | Python | chromium/third_party/WebKit/Source/config.gyp | wedataintelligence/vivaldi-source | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | [
"BSD-3-Clause"
] | null | null | null | chromium/third_party/WebKit/Source/config.gyp | wedataintelligence/vivaldi-source | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | [
"BSD-3-Clause"
] | null | null | null | chromium/third_party/WebKit/Source/config.gyp | wedataintelligence/vivaldi-source | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
{
'variables': {
# If set to 1, doesn't compile debug symbols into webcore reducing the
# size of the binary and increasing the speed of gdb. gcc only.
'remove_webcore_debug_symbols%': 0,
# Enables the Oilpan garbage-collection infrastructure.
# If you update the default value below, be sure to update the one in
# build/features.gypi, too!
'enable_oilpan%': 0,
# If set to 1 (default) and using clang, the Blink GC plugin will check the
# usage of the garbage-collection infrastructure during compilation.
'blink_gc_plugin%': 1,
# Additional flags for the Blink GC plugin.
'blink_gc_plugin_flags%': '',
# If set to 1, the Blink will use the base allocator instead of
# PartitionAlloc. so that the top of stack-unwinding becomes the caller
# which requests memory allocation in blink.
'blink_disable_partition_allocator%': 0,
},
'targets': [
{
# GN version: //third_party/WebKit/Source:config
# (In GN this is a config rather than a target.)
'target_name': 'config',
'type': 'none',
'direct_dependent_settings': {
'include_dirs': [
'.',
'..',
],
'msvs_disabled_warnings': [
4305, 4324, 4714, 4800, 4996,
],
'variables': {
'chromium_code': 1,
'clang_warning_flags': [ '-Wglobal-constructors' ],
},
'conditions': [
['OS=="win" and component=="shared_library"', {
'defines': [
'USING_V8_SHARED',
],
}],
['OS=="win"', {
'sources/': [
['exclude', 'Posix\\.cpp$'],
],
},{ # OS!="win"
'sources/': [
['exclude', 'Win\\.cpp$'],
],
}],
['OS!="mac"', {
'sources/': [
['exclude', 'Mac\\.mm$'],
],
}],
['OS!="android"', {
'sources/': [
['exclude', 'Android\\.cpp$'],
],
}],
['OS!="win" and remove_webcore_debug_symbols==1', {
# Remove -g from all targets defined here.
'cflags!': ['-g'],
}],
# Only enable the blink_gc_plugin when using clang and chrome plugins.
['blink_gc_plugin==1 and clang==1 and clang_use_chrome_plugins==1', {
'cflags': ['<!@(python <(DEPTH)/tools/clang/scripts/blink_gc_plugin_flags.py enable-oilpan=<(enable_oilpan) <(blink_gc_plugin_flags))'],
'xcode_settings': {
'OTHER_CFLAGS': ['<!@(python <(DEPTH)/tools/clang/scripts/blink_gc_plugin_flags.py enable-oilpan=<(enable_oilpan) <(blink_gc_plugin_flags))'],
},
'msvs_settings': {
'VCCLCompilerTool': {
'AdditionalOptions': ['<!@(python <(DEPTH)/tools/clang/scripts/blink_gc_plugin_flags.py enable-oilpan=<(enable_oilpan) <(blink_gc_plugin_flags))'],
},
},
}],
['blink_disable_partition_allocator==1', {
'defines': [
'MEMORY_TOOL_REPLACES_ALLOCATOR',
],
}],
['use_system_icu==1', {
'defines': [
'USING_SYSTEM_ICU',
],
}],
],
},
},
{
'target_name': 'unittest_config',
'type': 'none',
'dependencies': [
'config',
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(DEPTH)/testing/gtest.gyp:gtest',
],
'export_dependent_settings': [
'config',
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(DEPTH)/testing/gtest.gyp:gtest',
],
'direct_dependent_settings': {
'variables': {
'chromium_code': 1,
'clang_warning_flags_unset': [ '-Wglobal-constructors' ],
},
},
}
],
}
| 36.454545 | 161 | 0.61174 |
acf1c55b872c9f411d9e39add2db944814c0e19d | 863 | py | Python | controllers/search.py | toutpuissantged/youtubeSlack | 8855ad63519ec7fdb6dbf7933f3459c77b3c1a36 | [
"MIT"
] | 2 | 2021-04-09T21:18:29.000Z | 2021-11-14T20:16:27.000Z | controllers/search.py | toutpuissantged/youtubeSlack | 8855ad63519ec7fdb6dbf7933f3459c77b3c1a36 | [
"MIT"
] | null | null | null | controllers/search.py | toutpuissantged/youtubeSlack | 8855ad63519ec7fdb6dbf7933f3459c77b3c1a36 | [
"MIT"
] | null | null | null | from pytube import Playlist
from pytube import YouTube
import threading
class Search():
def __init__(self,InputVal):
super().__init__()
self.input=InputVal
self.previousprogress = 0
@staticmethod
def Test(InputVal):
print(InputVal.get())
def Download(self):
print('started')
url=self.input.get()
yt = YouTube(url)
yt.register_on_progress_callback(self.on_progress)
yt.streams.filter(only_audio=True).first().download()
def on_progress(self,stream, chunk, bytes_remaining):
total_size = stream.filesize
bytes_downloaded = total_size - bytes_remaining
liveprogress = (int)(bytes_downloaded / total_size * 100)
if liveprogress > self.previousprogress:
self.previousprogress = liveprogress
print(liveprogress) | 29.758621 | 65 | 0.658169 |
acf1c5c44e9515ff8909ef7a6d6ec21bcda25e30 | 2,046 | py | Python | setup.py | dalejung/zipline | e19f02a2ecb24baebddbeb17060d7b068c710e4d | [
"Apache-2.0"
] | null | null | null | setup.py | dalejung/zipline | e19f02a2ecb24baebddbeb17060d7b068c710e4d | [
"Apache-2.0"
] | null | null | null | setup.py | dalejung/zipline | e19f02a2ecb24baebddbeb17060d7b068c710e4d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Extension
from Cython.Build import cythonize
import numpy as np
ext_modules = [
Extension(
'zipline.assets._securities',
['zipline/assets/_securities.pyx'],
include_dirs=[np.get_include()],
),
]
setup(
name='zipline',
version='0.8.0rc1',
description='A backtester for financial algorithms.',
author='Quantopian Inc.',
author_email='opensource@quantopian.com',
packages=find_packages(),
ext_modules=cythonize(ext_modules),
scripts=['scripts/run_algo.py'],
include_package_data=True,
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Office/Business :: Financial',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: System :: Distributed Computing',
],
install_requires=[
'Logbook',
'pytz',
'requests',
'numpy',
'pandas',
'six',
'Cython==0.20.1'
],
extras_require={
'talib': ["talib"],
},
url="http://zipline.io"
)
| 30.537313 | 74 | 0.646139 |
acf1c739c23c2d10805cf5743067242d8917fcde | 1,667 | py | Python | test/test_experiment.py | lkylych/lagom | 64777be7f09136072a671c444b5b3fbbcb1b2f18 | [
"MIT"
] | null | null | null | test/test_experiment.py | lkylych/lagom | 64777be7f09136072a671c444b5b3fbbcb1b2f18 | [
"MIT"
] | null | null | null | test/test_experiment.py | lkylych/lagom | 64777be7f09136072a671c444b5b3fbbcb1b2f18 | [
"MIT"
] | null | null | null | import numpy as np
import pytest
from lagom.experiment import Config
from lagom.experiment import BaseExperimentWorker
from lagom.experiment import BaseExperimentMaster
from lagom import BaseAlgorithm
class SimpleAlgorithm(BaseAlgorithm):
def __call__(self, config):
return config['ID'], 'Finish the work now !'
class ExperimentWorker(BaseExperimentWorker):
def make_algo(self):
algo = SimpleAlgorithm(name='Simple algorithm')
return algo
class ExperimentMaster(BaseExperimentMaster):
def process_algo_result(self, config, result):
result, msg = result
assert result == config['ID']
print(msg)
def make_configs(self):
config = Config()
config.add_item(name='iter', val=30)
config.add_item(name='hidden_sizes', val=[64, 32, 16])
config.add_random_eps(name='lr', base=10, low=-6, high=0, num_sample=10)
config.add_random_continuous(name='values', low=-5, high=5, num_sample=5)
config.add_random_discrete(name='select', list_val=[43223, 5434, 21314], num_sample=10, replace=True)
configs = config.make_configs()
return configs
def test_experiment():
experiment = ExperimentMaster(worker_class=ExperimentWorker,
num_worker=128,
daemonic_worker=None)
experiment()
assert len(experiment.configs) <= experiment.num_iteration*experiment.num_worker
assert len(experiment.configs) > (experiment.num_iteration - 1)*experiment.num_worker
assert len(experiment.configs) == 500
| 29.245614 | 109 | 0.656269 |
acf1c7ec44b3423f2afa2b360cfb1172fbed9118 | 4,347 | py | Python | vtpl_api/models/crowd_dispersion_events_response.py | vtpl1/vtpl_api | d289c92254deb040de925205c583de69802a1c6b | [
"MIT"
] | null | null | null | vtpl_api/models/crowd_dispersion_events_response.py | vtpl1/vtpl_api | d289c92254deb040de925205c583de69802a1c6b | [
"MIT"
] | null | null | null | vtpl_api/models/crowd_dispersion_events_response.py | vtpl1/vtpl_api | d289c92254deb040de925205c583de69802a1c6b | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Engine api
Engine APIs # noqa: E501
The version of the OpenAPI document: 1.0.4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CrowdDispersionEventsResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'items': 'list[CrowdDispersionEvent]',
'links': 'Links',
'meta': 'Meta'
}
attribute_map = {
'items': 'items',
'links': 'links',
'meta': 'meta'
}
def __init__(self, items=None, links=None, meta=None): # noqa: E501
"""CrowdDispersionEventsResponse - a model defined in OpenAPI""" # noqa: E501
self._items = None
self._links = None
self._meta = None
self.discriminator = None
if items is not None:
self.items = items
if links is not None:
self.links = links
if meta is not None:
self.meta = meta
@property
def items(self):
"""Gets the items of this CrowdDispersionEventsResponse. # noqa: E501
:return: The items of this CrowdDispersionEventsResponse. # noqa: E501
:rtype: list[CrowdDispersionEvent]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this CrowdDispersionEventsResponse.
:param items: The items of this CrowdDispersionEventsResponse. # noqa: E501
:type: list[CrowdDispersionEvent]
"""
self._items = items
@property
def links(self):
"""Gets the links of this CrowdDispersionEventsResponse. # noqa: E501
:return: The links of this CrowdDispersionEventsResponse. # noqa: E501
:rtype: Links
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this CrowdDispersionEventsResponse.
:param links: The links of this CrowdDispersionEventsResponse. # noqa: E501
:type: Links
"""
self._links = links
@property
def meta(self):
"""Gets the meta of this CrowdDispersionEventsResponse. # noqa: E501
:return: The meta of this CrowdDispersionEventsResponse. # noqa: E501
:rtype: Meta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this CrowdDispersionEventsResponse.
:param meta: The meta of this CrowdDispersionEventsResponse. # noqa: E501
:type: Meta
"""
self._meta = meta
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CrowdDispersionEventsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.345455 | 86 | 0.570048 |
acf1c82fe1ef5a2e9ffc5514d181e778151f28e4 | 7,849 | py | Python | readthedocs/gold/tests/test_forms.py | nikhilgondane/RTD | f7926c45386f793f797f927363daed6a559a0a96 | [
"MIT"
] | 1 | 2021-06-08T02:09:31.000Z | 2021-06-08T02:09:31.000Z | readthedocs/gold/tests/test_forms.py | Alig1493/readthedocs.org | c37b00995c1bbc5ee51d3552ef176546373bb912 | [
"MIT"
] | 1 | 2018-12-24T04:01:31.000Z | 2018-12-24T04:01:31.000Z | readthedocs/gold/tests/test_forms.py | Alig1493/readthedocs.org | c37b00995c1bbc5ee51d3552ef176546373bb912 | [
"MIT"
] | 6 | 2019-02-13T16:08:41.000Z | 2020-03-12T14:17:14.000Z | from __future__ import absolute_import
import mock
import django_dynamic_fixture as fixture
from django.test import TestCase
from django.contrib.auth.models import User
from readthedocs.projects.models import Project
from ..models import GoldUser
from ..forms import GoldSubscriptionForm
class GoldSubscriptionFormTests(TestCase):
def setUp(self):
self.owner = fixture.get(User)
self.user = fixture.get(User)
self.project = fixture.get(Project, users=[self.user])
# Mocking
self.patches = {}
self.mocks = {}
self.patches['requestor'] = mock.patch('stripe.api_requestor.APIRequestor')
for patch in self.patches:
self.mocks[patch] = self.patches[patch].start()
self.mocks['request'] = self.mocks['requestor'].return_value
self.mock_request([({}, 'reskey')])
def mock_request(self, resp):
self.mocks['request'].request = mock.Mock(side_effect=resp)
def test_add_subscription(self):
"""Valid subscription form"""
subscription_list = {
'object': 'list',
'data': [],
'has_more': False,
'total_count': 1,
'url': '/v1/customers/cus_12345/subscriptions',
}
customer_obj = {
'id': 'cus_12345',
'description': self.user.get_full_name(),
'email': self.user.email,
'subscriptions': subscription_list
}
subscription_obj = {
'id': 'sub_12345',
'object': 'subscription',
'customer': 'cus_12345',
'plan': {
'id': 'v1-org-5',
'object': 'plan',
'amount': 1000,
'currency': 'usd',
'name': 'Test',
}
}
self.mock_request([
(customer_obj, ''),
(subscription_list, ''),
(subscription_obj, ''),
])
# Create user and subscription
subscription_form = GoldSubscriptionForm({
'level': 'v1-org-5',
'last_4_card_digits': '0000',
'stripe_token': 'GARYBUSEY',
'business_vat_id': 'business-vat-id',
},
customer=self.user,
)
self.assertTrue(subscription_form.is_valid())
subscription = subscription_form.save()
self.assertEqual(subscription.level, 'v1-org-5')
self.assertEqual(subscription.stripe_id, 'cus_12345')
self.assertEqual(subscription.business_vat_id, 'business-vat-id')
self.assertIsNotNone(self.user.gold)
self.assertEqual(self.user.gold.first().level, 'v1-org-5')
self.mocks['request'].request.assert_has_calls([
mock.call('post',
'/v1/customers',
{'description': mock.ANY, 'email': mock.ANY, 'business_vat_id': 'business-vat-id'},
mock.ANY),
mock.call('get',
'/v1/customers/cus_12345/subscriptions',
mock.ANY,
mock.ANY),
mock.call('post',
'/v1/customers/cus_12345/subscriptions',
{'source': mock.ANY, 'plan': 'v1-org-5'},
mock.ANY),
])
def test_add_subscription_update_user(self):
"""Valid subscription form"""
subscription_list = {
'object': 'list',
'data': [],
'has_more': False,
'total_count': 1,
'url': '/v1/customers/cus_12345/subscriptions',
}
customer_obj = {
'id': 'cus_12345',
'description': self.user.get_full_name(),
'email': self.user.email,
'subscriptions': subscription_list
}
subscription_obj = {
'id': 'sub_12345',
'object': 'subscription',
'customer': 'cus_12345',
'plan': {
'id': 'v1-org-5',
'object': 'plan',
'amount': 1000,
'currency': 'usd',
'name': 'Test',
}
}
self.mock_request([
(customer_obj, ''),
(customer_obj, ''),
(subscription_list, ''),
(subscription_obj, ''),
])
# Create user and update the current gold subscription
golduser = fixture.get(GoldUser, user=self.user, stripe_id='cus_12345')
subscription_form = GoldSubscriptionForm(
{'level': 'v1-org-5',
'last_4_card_digits': '0000',
'stripe_token': 'GARYBUSEY'},
customer=self.user,
instance=golduser
)
self.assertTrue(subscription_form.is_valid())
subscription = subscription_form.save()
self.assertEqual(subscription.level, 'v1-org-5')
self.assertEqual(subscription.stripe_id, 'cus_12345')
self.assertIsNotNone(self.user.gold)
self.assertEqual(self.user.gold.first().level, 'v1-org-5')
self.mocks['request'].request.assert_has_calls([
mock.call('get',
'/v1/customers/cus_12345',
{},
mock.ANY),
mock.call('post',
'/v1/customers/cus_12345',
{'description': mock.ANY, 'email': mock.ANY},
mock.ANY),
mock.call('get',
'/v1/customers/cus_12345/subscriptions',
mock.ANY,
mock.ANY),
mock.call('post',
'/v1/customers/cus_12345/subscriptions',
{'source': mock.ANY, 'plan': 'v1-org-5'},
mock.ANY),
])
def test_update_subscription_plan(self):
"""Update subcription plan"""
subscription_obj = {
'id': 'sub_12345',
'object': 'subscription',
'customer': 'cus_12345',
'plan': {
'id': 'v1-org-5',
'object': 'plan',
'amount': 1000,
'currency': 'usd',
'name': 'Test',
}
}
subscription_list = {
'object': 'list',
'data': [subscription_obj],
'has_more': False,
'total_count': 1,
'url': '/v1/customers/cus_12345/subscriptions',
}
customer_obj = {
'id': 'cus_12345',
'description': self.user.get_full_name(),
'email': self.user.email,
'subscriptions': subscription_list
}
self.mock_request([
(customer_obj, ''),
(subscription_list, ''),
(subscription_obj, ''),
])
subscription_form = GoldSubscriptionForm(
{'level': 'v1-org-5',
'last_4_card_digits': '0000',
'stripe_token': 'GARYBUSEY'},
customer=self.user
)
self.assertTrue(subscription_form.is_valid())
subscription = subscription_form.save()
self.assertEqual(subscription.level, 'v1-org-5')
self.assertIsNotNone(self.user.gold)
self.assertEqual(self.user.gold.first().level, 'v1-org-5')
self.mocks['request'].request.assert_has_calls([
mock.call('post',
'/v1/customers',
{'description': mock.ANY, 'email': mock.ANY},
mock.ANY),
mock.call('get',
'/v1/customers/cus_12345/subscriptions',
mock.ANY,
mock.ANY),
mock.call('post',
'/v1/subscriptions/sub_12345',
{'source': mock.ANY, 'plan': 'v1-org-5'},
mock.ANY),
])
| 34.275109 | 105 | 0.498917 |
acf1c872a9f312a58e80af977d7d429a8776960c | 11,343 | py | Python | cdd/tests/test_gen.py | SamuelMarks/docstring2class | a70745134bcb069fffdf1d9210c17129925c8fd5 | [
"Apache-2.0",
"MIT"
] | 4 | 2021-05-28T14:50:58.000Z | 2022-03-01T23:22:14.000Z | cdd/tests/test_gen.py | SamuelMarks/docstring2class | a70745134bcb069fffdf1d9210c17129925c8fd5 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-02-19T04:13:38.000Z | 2021-02-19T04:13:38.000Z | cdd/tests/test_gen.py | SamuelMarks/docstring2class | a70745134bcb069fffdf1d9210c17129925c8fd5 | [
"Apache-2.0",
"MIT"
] | null | null | null | """ Tests for gen """
import ast
import os
import sys
from ast import (
Assign,
ClassDef,
Dict,
Expr,
Import,
ImportFrom,
List,
Load,
Module,
Name,
Store,
alias,
)
from copy import deepcopy
from io import StringIO
from os.path import extsep
from shutil import rmtree
from tempfile import mkdtemp
from unittest import TestCase
from unittest.mock import patch
from cdd import emit, parse
from cdd.ast_utils import maybe_type_comment, set_value
from cdd.gen import gen
from cdd.pure_utils import rpartial
from cdd.source_transformer import to_code
from cdd.tests.mocks.methods import function_adder_ast
from cdd.tests.utils_for_tests import run_ast_test
method_adder_ast = deepcopy(function_adder_ast)
method_adder_ast.body[0] = Expr(set_value(" C class (mocked!) "))
method_adder_ast.decorator_list = [Name("staticmethod", Load())]
del function_adder_ast
def populate_files(tempdir, input_module_str=None):
"""
Populate files in the tempdir
:param tempdir: Temporary directory
:type tempdir: ```str```
:param input_module_str: Input string to write to the input_filename. If None, uses preset mock module.
:type input_module_str: ```Optional[str]```
:return: input filename, input str, expected_output
:rtype: ```Tuple[str, str, str, Module]```
"""
input_filename = os.path.join(tempdir, "input{extsep}py".format(extsep=extsep))
input_class_name = "Foo"
input_class_ast = emit.class_(
parse.function(deepcopy(method_adder_ast)),
emit_call=False,
class_name=input_class_name,
)
input_module_ast = Module(
body=[
input_class_ast,
Assign(
targets=[Name("input_map", Store())],
value=Dict(
keys=[set_value(input_class_name)],
values=[Name(input_class_name, Load())],
expr=None,
),
expr=None,
lineno=None,
**maybe_type_comment
),
Assign(
targets=[Name("__all__", Store())],
value=List(
ctx=Load(),
elts=[set_value(input_class_name), set_value("input_map")],
expr=None,
),
expr=None,
lineno=None,
**maybe_type_comment
),
],
type_ignores=[],
stmt=None,
)
input_module_str = input_module_str or to_code(input_module_ast)
# expected_output_class_str = (
# "class FooConfig(object):\n"
# ' """\n'
# " The amazing Foo\n\n"
# " :cvar a: An a. Defaults to 5\n"
# ' :cvar b: A b. Defaults to 16"""\n'
# " a = 5\n"
# " b = 16\n\n"
# " def __call__(self):\n"
# " self.a = 5\n"
# " self.b = 16\n"
# )
expected_class_ast = emit.class_(
parse.function(deepcopy(method_adder_ast)),
emit_call=True,
class_name="{input_class_name}Config".format(input_class_name=input_class_name),
)
with open(input_filename, "wt") as f:
f.write(input_module_str)
return input_filename, input_module_ast, input_class_ast, expected_class_ast
_import_star_from_input_ast = ImportFrom(
module="input",
names=[
alias(
name="input_map",
asname=None,
identifier=None,
identifier_name=None,
),
alias(
name="Foo",
asname=None,
identifier=None,
identifier_name=None,
),
],
level=1,
identifier=None,
)
_import_star_from_input_str = to_code(_import_star_from_input_ast)
_import_gen_test_module_ast = Import(
names=[
alias(
name="gen_test_module",
asname=None,
identifier=None,
identifier_name=None,
)
],
alias=None,
)
_import_gen_test_module_str = "{}\n".format(
to_code(_import_gen_test_module_ast).rstrip("\n")
)
class TestGen(TestCase):
"""Test class for gen.py"""
sys_path = deepcopy(sys.path)
tempdir = None
@classmethod
def setUpClass(cls) -> None:
"""Construct temporary module for use by tests"""
cls.tempdir = mkdtemp()
temp_module_dir = os.path.join(cls.tempdir, "gen_test_module")
os.mkdir(temp_module_dir)
(
cls.input_filename,
cls.input_module_ast,
cls.input_class_ast,
cls.expected_class_ast,
) = populate_files(temp_module_dir)
with open(
os.path.join(temp_module_dir, "__init__{extsep}py".format(extsep=extsep)),
"w",
) as f:
f.write(_import_star_from_input_str)
sys.path.append(cls.tempdir)
@classmethod
def tearDownClass(cls) -> None:
"""Drop the new module from the path and delete the temporary directory"""
sys.path = cls.sys_path
# input("removing: {tempdir!r}".format(tempdir=cls.tempdir))
rmtree(cls.tempdir)
def test_gen(self) -> None:
"""Tests `gen`"""
output_filename = os.path.join(
self.tempdir, "test_gen_output{extsep}py".format(extsep=extsep)
)
with patch("sys.stdout", new_callable=StringIO), patch(
"sys.stderr", new_callable=StringIO
):
self.assertIsNone(
gen(
name_tpl="{name}Config",
input_mapping="gen_test_module.input_map",
emit_name="class",
parse_name="infer",
output_filename=output_filename,
prepend="PREPENDED\n",
emit_call=True,
emit_default_doc=False,
)
)
with open(output_filename, "rt") as f:
gen_module_str = f.read()
gen_module_ast = ast.parse(gen_module_str)
run_ast_test(
self,
gen_ast=next(filter(rpartial(isinstance, ClassDef), gen_module_ast.body)),
gold=self.expected_class_ast,
)
def test_gen_with_imports_from_file(self) -> None:
"""Tests `gen` with `imports_from_file`"""
output_filename = os.path.join(
self.tempdir,
"test_gen_with_imports_from_file_output{extsep}py".format(extsep=extsep),
)
with patch("sys.stdout", new_callable=StringIO), patch(
"sys.stderr", new_callable=StringIO
):
self.assertIsNone(
gen(
name_tpl="{name}Config",
input_mapping="gen_test_module.input_map",
imports_from_file="gen_test_module",
emit_name="class",
parse_name="infer",
output_filename=output_filename,
emit_call=True,
emit_default_doc=False,
)
)
with open(output_filename, "rt") as f:
gen_ast = ast.parse(f.read())
run_ast_test(
self,
gen_ast=gen_ast,
gold=Module(
body=[
_import_star_from_input_ast,
self.expected_class_ast,
Assign(
targets=[Name("__all__", Store())],
value=List(
ctx=Load(),
elts=[set_value("FooConfig")],
expr=None,
),
expr=None,
lineno=None,
**maybe_type_comment
),
],
type_ignores=[],
stmt=None,
),
)
def test_gen_with_imports_from_file_and_prepended_import(self) -> None:
"""Tests `gen` with `imports_from_file` and `prepend`"""
output_filename = os.path.join(
self.tempdir,
"test_gen_with_imports_from_file_and_prepended_import_output{extsep}py".format(
extsep=extsep
),
)
with patch("sys.stdout", new_callable=StringIO), patch(
"sys.stderr", new_callable=StringIO
):
self.assertIsNone(
gen(
name_tpl="{name}Config",
input_mapping="gen_test_module.input_map",
imports_from_file="gen_test_module",
emit_name="class",
parse_name="infer",
prepend=_import_gen_test_module_str,
output_filename=output_filename,
emit_call=True,
emit_default_doc=False,
)
)
with open(output_filename, "rt") as f:
gen_ast = ast.parse(f.read())
gold = Module(
body=[
_import_gen_test_module_ast,
_import_star_from_input_ast,
self.expected_class_ast,
# self.input_module_ast.body[1],
Assign(
targets=[Name("__all__", Store())],
value=List(
ctx=Load(),
elts=[set_value("FooConfig")],
expr=None,
),
expr=None,
lineno=None,
**maybe_type_comment
),
],
type_ignores=[],
stmt=None,
)
run_ast_test(
self,
gen_ast=gen_ast,
gold=gold,
)
# unittest_main()
# mock_class = ClassDef(
# name="ClassyB",
# bases=tuple(),
# decorator_list=[],
# body=[FunctionDef(
# name="add_6_5",
# args=arguments(
# posonlyargs=[],
# args=list(map(set_arg, ("a", "b"))),
# kwonlyargs=[],
# kw_defaults=[],
# vararg=None,
# kwarg=None,
# defaults=list(map(set_value, (6, 5))),
# arg=None,
# ),
# body=[
# Expr(
# set_value(
# "\n :param a: first param\n "
# ":type a: ```int```\n\n "
# ":param b: second param\n "
# ":type b: ```int```\n\n "
# ":return: Aggregated summation of `a` and `b`.\n "
# ":rtype: ```int```\n ",
# )
# ),
# Return(
# value=Call(
# func=Attribute(Name("operator", Load()), "add", Load()),
# args=[Name("a", Load()), Name("b", Load())],
# keywords=[],
# expr=None,
# expr_func=None,
# ),
# expr=None,
# ),
# ],
# decorator_list=[],
# arguments_args=None,
# identifier_name=None,
# stmt=None,
# )],
# keywords=tuple(),
# identifier_name=None,
# expr=None,
# )
# print("===============================================\n",
# to_code(mock_class),
# "===============================================",)
| 30.328877 | 107 | 0.503923 |
acf1c883a538c320b666e04d02b994b09179543e | 173 | py | Python | tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_MovingMedian_Seasonal_MonthOfYear_NoAR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_MovingMedian_Seasonal_MonthOfYear_NoAR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_MovingMedian_Seasonal_MonthOfYear_NoAR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['MovingMedian'] , ['Seasonal_MonthOfYear'] , ['NoAR'] ); | 43.25 | 95 | 0.768786 |
acf1c94162d9767e5d385a7a5d767d7f130c485b | 1,018 | py | Python | oops/cursor_functions.py | mtasa-typescript/mtasa-wiki-dump | edea1746850fb6c99d6155d1d7891e2cceb33a5c | [
"MIT"
] | null | null | null | oops/cursor_functions.py | mtasa-typescript/mtasa-wiki-dump | edea1746850fb6c99d6155d1d7891e2cceb33a5c | [
"MIT"
] | 1 | 2021-02-24T21:50:18.000Z | 2021-02-24T21:50:18.000Z | oops/cursor_functions.py | mtasa-typescript/mtasa-wiki-dump | edea1746850fb6c99d6155d1d7891e2cceb33a5c | [
"MIT"
] | null | null | null | # Autogenerated file. ANY CHANGES WILL BE OVERWRITTEN
from to_python.core.types import FunctionType, \
FunctionArgument, \
FunctionArgumentValues, \
FunctionReturnTypes, \
FunctionSignature, \
FunctionDoc, \
FunctionOOP, \
FunctionOOPField, \
CompoundOOPData, \
FunctionData, \
CompoundFunctionData
DUMP_PARTIAL = [
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
)
]
| 15.90625 | 53 | 0.409627 |
acf1c9e4c794a4dd6785333797b6b383d4e6c632 | 23,891 | py | Python | experiments/lattice_model.py | crocha700/pylattice | 54c13735fecee121ffea8048f0f37d9b196f8e54 | [
"MIT"
] | null | null | null | experiments/lattice_model.py | crocha700/pylattice | 54c13735fecee121ffea8048f0f37d9b196f8e54 | [
"MIT"
] | null | null | null | experiments/lattice_model.py | crocha700/pylattice | 54c13735fecee121ffea8048f0f37d9b196f8e54 | [
"MIT"
] | null | null | null | from __future__ import division
import numpy as np
from numpy import pi, cos, sin, exp
class LatticeModel():
""" A class that represents a two-dimensional lattice
model of advection-diffusion with large-scale
sinusoidal source """
def __init__(self,
nx=128,
ny=None,
Lx=2*pi,
Ly=None,
dt=0.5,
tmax=1000,
tavestart = 500,
kappa=1.e-5,
urms = 1.,
power = 3.5,
nmin = 5.,
nmax = None,
source=True,
diagnostics_list='all'):
if ny is None: ny = nx
if Ly is None: Ly = Lx
self.nx = nx
self.ny = ny
self.Lx = Lx
self.Ly = Ly
self.dt = dt
self.dt_2 = dt/2.
self.dt_4 = dt/4.
self.tmax = tmax
self.tavestart = tavestart
self.t = 0.
self.tc = 0
self.kappa = kappa
self.nmin = nmin
if nmax:
self.nmax = nmax
else:
self.nmax = nx
self.power = power
self.urms = urms
self.source=source
self.diagnostics_list = diagnostics_list
self._initialize_grid()
self._init_velocity()
self._initialize_diagnostics()
self.even = True
self.odd = False
def _initialize_grid(self):
""" Initialize lattice and spectral space grid """
# physical space grids
self.dx, self.dy = self.Lx/(self.nx), self.Ly/(self.ny)
self.x = np.linspace(0.,self.Lx-self.dx,self.nx)
self.y = np.linspace(0.,self.Ly-self.dy,self.ny)
self.xi, self.yi = np.meshgrid(self.x,self.y)
self.ix, self.iy = np.meshgrid(range(self.nx),
range(self.ny))
# wavenumber grids
self.dk = 2.*pi/self.Lx
self.dl = 2.*pi/self.Ly
self.nl = self.ny
self.nk = self.nx/2+1
self.ll = self.dl*np.append( np.arange(0.,self.nx/2),
np.arange(-self.nx/2,0.) )
self.kk = self.dk*np.arange(0.,self.nk)
self.k, self.l = np.meshgrid(self.kk, self.ll)
self.ik = 1j*self.k
self.il = 1j*self.l
# constant for spectral normalizations
self.M = self.nx*self.ny
self.M2 = self.M**2
self.wv2 = self.k**2 + self.l**2
self.wv = np.sqrt( self.wv2 )
def _velocity(self):
phase = 2*pi*np.random.rand(2,self.nmax-self.nmin)
phi, psi = phase[0], phase[1]
Yn = self.n*self.y[...,np.newaxis] + phase[0][np.newaxis,...]
Xn = self.n*self.x[...,np.newaxis] + phase[1][np.newaxis,...]
u = (self.An*cos(Yn*self.dl)).sum(axis=1)
v = (self.An*cos(Xn*self.dk)).sum(axis=1)
self.u = u[...,np.newaxis]
self.v = v[np.newaxis,...]
def _init_velocity(self):
self.n = np.arange(self.nmin,self.nmax)[np.newaxis,...]
An = (self.n/self.nmin)**(-self.power/2.)
N = 2*self.urms/( np.sqrt( ((self.n/self.nmin)**-self.power).sum() ) )
self.An = N*An
#self.An = np.sqrt(2.)
#self.An = 2*urms
# estimate the Batchelor scale
S = np.sqrt( ((self.An*self.n*self.dk)**2).sum()/2. )
self.lb = np.sqrt(self.kappa/S)
#assert self.lb > self.dx, "**Warning: Batchelor scale not resolved."
def _advect(self,direction='x',n=1):
""" Advect th on a lattice given u and v,
and the current index array ix, iy
n is the number of substeps
n=1 for doing the full advection-diffusion,
n=2 for doing half the advection, etc """
if direction == 'x':
ix_new = self.ix.copy()
dindx = -np.round(self.u*self.dt_2/n/self.dx).astype(int)
ix_new = self.ix + dindx
ix_new[ix_new<0] = ix_new[ix_new<0] + self.nx
ix_new[ix_new>self.nx-1] = ix_new[ix_new>self.nx-1] - self.nx
self.th = self.th[self.iy,ix_new]
elif direction == 'y':
iy_new = self.iy.copy()
dindy = -np.round(self.v*self.dt_2/n/self.dy).astype(int)
iy_new = self.iy + dindy
iy_new[iy_new<0] = iy_new[iy_new<0] + self.ny
iy_new[iy_new>self.ny-1] = iy_new[iy_new>self.ny-1] - self.ny
self.th = self.th[iy_new,self.ix]
# advection + source
#y = self.y[...,np.newaxis] + np.zeros(self.x.size)[np.newaxis,...]
#v = self.v + np.zeros(self.y.size)[...,np.newaxis]
#sy = np.sin(self.dl*y)
#syn = np.sin(self.dl*(y+v*self.dt_2/n))
#v = np.ma.masked_array(v, v == 0.)
#self.forcey = (sy[iy_new,self.ix]-sy)/(self.dl*v)
#self.forcey = (syn-sy)/(self.dl*v)
#self.forcey[v.mask] = (self.dt_2/n)*np.cos(self.dl*y[v.mask])
#self.th = self.th[iy_new,self.ix] + self.forcey
def _diffuse(self, n=1):
""" Diffusion """
self.thh = np.fft.rfft2(self.th)
self.thh = self.thh*exp(-(self.dt/n)*self.kappa*self.wv2)
self.th = np.fft.irfft2(self.thh)
def _source(self,direction='x',n=1):
if direction == 'x':
self.th += (self.dt/n)*np.cos(self.dl*self.y)[...,np.newaxis]
elif direction == 'y':
# a brutal way
#self.th += (self.dt/n)*np.cos(self.dl*self.y)[...,np.newaxis]
pass
def _step_forward(self):
self._velocity()
# x-dir
self._advect(direction='x',n=2)
self._source(direction='x',n=2)
self._diffuse(n=4)
self._advect(direction='x',n=2)
self._source(direction='x',n=2)
self._diffuse(n=4)
# y-dir
self._advect(direction='y',n=2)
self._source(direction='y',n=2)
self._diffuse(n=4)
self._advect(direction='y',n=2)
self._source(direction='y',n=2)
self._diffuse(n=4)
self._calc_diagnostics()
self.tc += 1
self.t += self.dt
def run_with_snapshots(self, tsnapstart=0., tsnap=1):
"""Run the model forward, yielding to user code at specified intervals.
"""
tsnapint = np.ceil(tsnap/self.dt)
while(self.t < self.tmax):
self._step_forward()
if self.t>=tsnapstart and (self.tc%tsnapint)==0:
yield self.t
return
def run(self):
"""Run the model forward without stopping until the end."""
while(self.t < self.tmax):
self._step_forward()
def _calc_diagnostics(self):
# here is where we calculate diagnostics
if (self.t>=self.dt) and (self.t>=self.tavestart):
self._increment_diagnostics()
# diagnostic stuff follow
def _initialize_diagnostics(self):
# Initialization for diagnotics
self.diagnostics = dict()
self._setup_diagnostics()
if self.diagnostics_list == 'all':
pass # by default, all diagnostics are active
elif self.diagnostics_list == 'none':
self.set_active_diagnostics([])
else:
self.set_active_diagnostics(self.diagnostics_list)
def _setup_diagnostics(self):
"""Diagnostics setup"""
self.add_diagnostic('var',
description='Tracer variance',
function= (lambda self: self.spec_var(self.thh))
)
self.add_diagnostic('thbar',
description='x-averaged tracer',
function= (lambda self: self.thm)
)
self.add_diagnostic('grad2_th_bar',
description='x-averaged gradient square of th',
function= (lambda self: self.gradth2m)
)
self.add_diagnostic('vth2m',
description='x-averaged triple advective term v th2',
function= (lambda self: self.vth2m)
)
self.add_diagnostic('th2m',
description='x-averaged th2',
function= (lambda self: self.th2m)
)
self.add_diagnostic('vthm',
description='x-averaged, y-direction tracer flux',
function= (lambda self: (self.v*self.tha).mean(axis=1))
)
self.add_diagnostic('fluxy',
description='x-averaged, y-direction tracer flux',
function= (lambda self: (self.v*self.th).mean(axis=1))
)
self.add_diagnostic('spec',
description='spec of anomalies about x-averaged flow',
function= (lambda self: np.abs(np.fft.rfft2(
self.th-self.th.mean(axis=1)[...,np.newaxis]))**2/self.M2)
)
def _set_active_diagnostics(self, diagnostics_list):
for d in self.diagnostics:
self.diagnostics[d]['active'] == (d in diagnostics_list)
def add_diagnostic(self, diag_name, description=None, units=None, function=None):
# create a new diagnostic dict and add it to the object array
# make sure the function is callable
assert hasattr(function, '__call__')
# make sure the name is valid
assert isinstance(diag_name, str)
# by default, diagnostic is active
self.diagnostics[diag_name] = {
'description': description,
'units': units,
'active': True,
'count': 0,
'function': function, }
def describe_diagnostics(self):
"""Print a human-readable summary of the available diagnostics."""
diag_names = self.diagnostics.keys()
diag_names.sort()
print('NAME | DESCRIPTION')
print(80*'-')
for k in diag_names:
d = self.diagnostics[k]
print('{:<10} | {:<54}').format(
*(k, d['description']))
def _increment_diagnostics(self):
# compute intermediate quantities needed for some diagnostics
self._calc_derived_fields()
for dname in self.diagnostics:
if self.diagnostics[dname]['active']:
res = self.diagnostics[dname]['function'](self)
if self.diagnostics[dname]['count']==0:
self.diagnostics[dname]['value'] = res
else:
self.diagnostics[dname]['value'] += res
self.diagnostics[dname]['count'] += 1
def _calc_derived_fields(self):
""" Calculate derived field necessary for diagnostics """
self.thh = np.fft.rfft2(self.th)
# x-averaged tracer field
self.thm = self.th.mean(axis=1)
#self.thmh = np.fft.rfft(self.thm)
#self.thm_y = np.fft.irfft(1j*self.kk*self.thmh)
# anomaly about the x-averaged field
self.tha = self.th-self.thm[...,np.newaxis]
self.thah = np.fft.rfft2(self.tha)
# x-averaged gradient squared
gradx = np.fft.irfft2(1j*self.k*self.thah)
grady = np.fft.irfft2(1j*self.l*self.thah)
self.gradth2m = (gradx**2 + grady**2).mean(axis=1)
# Osborn-Cox amplification factor
#self.thm_y = 4*np.sin(self.y*self.dl)
#thm_y = self.block_average(self.thm_y)
#gradth2m = self.block_average(self.gradth2m)
#self.A2_OC = gradth2m / thm_y**2
#self.A2_OC[thm_y < 1.e-14] = np.nan
# triple term
self.vth2m = (self.v*(self.tha**2)).mean(axis=1)
# diff transport
self.th2m = (self.tha**2).mean(axis=1)
def get_diagnostic(self, dname):
return (self.diagnostics[dname]['value'] /
self.diagnostics[dname]['count'])
def spec_var(self, ph):
""" compute variance of p from Fourier coefficients ph """
var_dens = 2. * np.abs(ph)**2 / self.M**2
# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
var_dens[...,0] = var_dens[...,0]/2.
var_dens[...,-1] = var_dens[...,-1]/2.
return var_dens.sum()
def block_average(self,A, nblocks = 256):
""" Block average A onto A blocks """
nave = self.nx/nblocks
Ab = np.empty(nblocks)
for i in range(nblocks):
Ab[i] = A[i*nave:(i+1)*nave].mean()
return Ab
class LatticeModelGy():
""" A class that represents a two-dimensional lattice
model of advection-diffusion with large-scale
sinusoidal source """
def __init__(self,
nx=128,
ny=None,
Lx=2*pi,
Ly=None,
dt=0.5,
tmax=1000,
tavestart = 500,
kappa=1.e-5,
urms = 1.,
power = 3.5,
nmin = 5.,
nmax = None,
G = 1.,
diagnostics_list='all',
cadence = 5):
if ny is None: ny = nx
if Ly is None: Ly = Lx
self.nx = nx
self.ny = ny
self.Lx = Lx
self.Ly = Ly
self.dt = dt
self.dt_2 = dt/2.
self.dt_4 = dt/4.
self.tmax = tmax
self.tavestart = tavestart
self.t = 0.
self.tc = 0
self.G = G
self.kappa = kappa
self.nmin = nmin
if nmax:
self.nmax = nmax
else:
self.nmax = nx
self.power = power
self.urms = urms
self.diagnostics_list = diagnostics_list
self.cadence = cadence
self._initialize_grid()
self._init_velocity()
self._initialize_diagnostics()
self.even = True
self.odd = False
def _initialize_grid(self):
""" Initialize lattice and spectral space grid """
# physical space grids
self.dx, self.dy = self.Lx/(self.nx), self.Ly/(self.ny)
self.x = np.linspace(0.,self.Lx-self.dx,self.nx)
self.y = np.linspace(0.,self.Ly-self.dy,self.ny)
self.xi, self.yi = np.meshgrid(self.x,self.y)
self.ix, self.iy = np.meshgrid(range(self.nx),
range(self.ny))
# wavenumber grids
self.dk = 2.*pi/self.Lx
self.dl = 2.*pi/self.Ly
self.nl = self.ny
self.nk = self.nx/2+1
self.ll = self.dl*np.append( np.arange(0.,self.nx/2),
np.arange(-self.nx/2,0.) )
self.kk = self.dk*np.arange(0.,self.nk)
self.k, self.l = np.meshgrid(self.kk, self.ll)
self.ik = 1j*self.k
self.il = 1j*self.l
# constant for spectral normalizations
self.M = self.nx*self.ny
self.M2 = self.M**2
self.wv2 = self.k**2 + self.l**2
self.wv = np.sqrt( self.wv2 )
def _velocity(self):
phase = 2*pi*np.random.rand(2,self.nmax-self.nmin)
phi, psi = phase[0], phase[1]
Yn = self.n*self.y[...,np.newaxis] + phase[0][np.newaxis,...]
Xn = self.n*self.x[...,np.newaxis] + phase[1][np.newaxis,...]
u = (self.An*cos(Yn*self.dl)).sum(axis=1)
v = (self.An*cos(Xn*self.dk)).sum(axis=1)
self.u = u[...,np.newaxis]
self.v = v[np.newaxis,...]
def _init_velocity(self):
self.n = np.arange(self.nmin,self.nmax)[np.newaxis,...]
An = (self.n/self.nmin)**(-self.power/2.)
N = 2*self.urms/( np.sqrt( ((self.n/self.nmin)**-self.power).sum() ) )
self.An = N*An
#self.An = np.sqrt(2.)
#self.An = 2*urms
# estimate the Batchelor scale
S = np.sqrt( ((self.An*self.n*self.dk)**2).sum()/2. )
self.lb = np.sqrt(self.kappa/S)
#assert self.lb > self.dx, "**Warning: Batchelor scale not resolved."
def _advect(self,direction='x',n=1):
""" Advect th on a lattice given u and v,
and the current index array ix, iy
n is the number of substeps
n=1 for doing the full advection-diffusion,
n=2 for doing half the advection, etc """
if direction == 'x':
ix_new = self.ix.copy()
dindx = -np.round(self.u*self.dt_2/n/self.dx).astype(int)
ix_new = self.ix + dindx
ix_new[ix_new<0] = ix_new[ix_new<0] + self.nx
ix_new[ix_new>self.nx-1] = ix_new[ix_new>self.nx-1] - self.nx
self.th = self.th[self.iy,ix_new]
elif direction == 'y':
iy_new = self.iy.copy()
dindy = -np.round(self.v*self.dt_2/n/self.dy).astype(int)
iy_new = self.iy + dindy
iy_new[iy_new<0] = iy_new[iy_new<0] + self.ny
iy_new[iy_new>self.ny-1] = iy_new[iy_new>self.ny-1] - self.ny
self.th = self.th[iy_new,self.ix] + self.G*self.v*self.dt_2/n
def _diffuse(self, n=1):
""" Diffusion """
self.thh = np.fft.rfft2(self.th)
self.thh = self.thh*exp(-(self.dt/n)*self.kappa*self.wv2)
self.th = np.fft.irfft2(self.thh)
def _step_forward(self):
self._velocity()
# x-dir
self._advect(direction='x',n=2)
self._calc_diagnostics()
self._diffuse(n=4)
#self._calc_diagnostics()
self._advect(direction='x',n=2)
self._calc_diagnostics()
self._diffuse(n=4)
#self._calc_diagnostics()
# y-dir
self._advect(direction='y',n=2)
self._calc_diagnostics()
self._diffuse(n=4)
#self._calc_diagnostics()
self._advect(direction='y',n=2)
self._calc_diagnostics()
self._diffuse(n=4)
#self._calc_diagnostics()
self.tc += 1
self.t += self.dt
def run_with_snapshots(self, tsnapstart=0., tsnap=1):
"""Run the model forward, yielding to user code at specified intervals.
"""
tsnapint = np.ceil(tsnap/self.dt)
while(self.t < self.tmax):
self._step_forward()
if self.t>=tsnapstart and (self.tc%tsnapint)==0:
yield self.t
return
def run(self):
"""Run the model forward without stopping until the end."""
while(self.t < self.tmax):
self._step_forward()
def _calc_diagnostics(self):
# here is where we calculate diagnostics
if (self.t>=self.dt) and (self.t>=self.tavestart) and (self.tc%self.cadence):
self._increment_diagnostics()
# diagnostic stuff follow
def _initialize_diagnostics(self):
# Initialization for diagnotics
self.diagnostics = dict()
self._setup_diagnostics()
if self.diagnostics_list == 'all':
pass # by default, all diagnostics are active
elif self.diagnostics_list == 'none':
self.set_active_diagnostics([])
else:
self.set_active_diagnostics(self.diagnostics_list)
def _setup_diagnostics(self):
"""Diagnostics setup"""
self.add_diagnostic('var',
description='Tracer variance',
function= (lambda self: self.spec_var(self.thh))
)
self.add_diagnostic('thbar',
description='x-averaged tracer',
function= (lambda self: self.thm)
)
self.add_diagnostic('grad2_th_bar',
description='x-averaged gradient square of th',
function= (lambda self: self.gradth2m)
)
self.add_diagnostic('vth2m',
description='x-averaged triple advective term v th2',
function= (lambda self: self.vth2m)
)
self.add_diagnostic('th2m',
description='x-averaged th2',
function= (lambda self: self.th2m)
)
self.add_diagnostic('vthm',
description='x-averaged, y-direction tracer flux',
function= (lambda self: (self.v*self.tha).mean(axis=1))
)
self.add_diagnostic('fluxy',
description='x-averaged, y-direction tracer flux',
function= (lambda self: (self.v*self.th).mean(axis=1))
)
self.add_diagnostic('spec',
description='spec of anomalies about x-averaged flow',
function= (lambda self: np.abs(np.fft.rfft2(
self.th-self.th.mean(axis=1)[...,np.newaxis]))**2/self.M2)
)
def _set_active_diagnostics(self, diagnostics_list):
for d in self.diagnostics:
self.diagnostics[d]['active'] == (d in diagnostics_list)
def add_diagnostic(self, diag_name, description=None, units=None, function=None):
# create a new diagnostic dict and add it to the object array
# make sure the function is callable
assert hasattr(function, '__call__')
# make sure the name is valid
assert isinstance(diag_name, str)
# by default, diagnostic is active
self.diagnostics[diag_name] = {
'description': description,
'units': units,
'active': True,
'count': 0,
'function': function, }
def describe_diagnostics(self):
"""Print a human-readable summary of the available diagnostics."""
diag_names = self.diagnostics.keys()
diag_names.sort()
print('NAME | DESCRIPTION')
print(80*'-')
for k in diag_names:
d = self.diagnostics[k]
print('{:<10} | {:<54}').format(
*(k, d['description']))
def _increment_diagnostics(self):
# compute intermediate quantities needed for some diagnostics
self._calc_derived_fields()
for dname in self.diagnostics:
if self.diagnostics[dname]['active']:
res = self.diagnostics[dname]['function'](self)
if self.diagnostics[dname]['count']==0:
self.diagnostics[dname]['value'] = res
else:
self.diagnostics[dname]['value'] += res
self.diagnostics[dname]['count'] += 1
def _calc_derived_fields(self):
""" Calculate derived field necessary for diagnostics """
self.thh = np.fft.rfft2(self.th)
# x-averaged tracer field
self.thm = self.th.mean(axis=1)
#self.thmh = np.fft.rfft(self.thm)
#self.thm_y = np.fft.irfft(1j*self.kk*self.thmh)
# anomaly about the x-averaged field
self.tha = self.th-self.thm[...,np.newaxis]
self.thah = np.fft.rfft2(self.tha)
# x-averaged gradient squared
gradx = np.fft.irfft2(1j*self.k*self.thah)
grady = np.fft.irfft2(1j*self.l*self.thah)
self.gradth2m = (gradx**2 + grady**2).mean(axis=1)
# Osborn-Cox amplification factor
#self.thm_y = 4*np.sin(self.y*self.dl)
#thm_y = self.block_average(self.thm_y)
#gradth2m = self.block_average(self.gradth2m)
#self.A2_OC = gradth2m / thm_y**2
#self.A2_OC[thm_y < 1.e-14] = np.nan
# triple term
self.vth2m = (self.v*(self.tha**2)).mean(axis=1)
# diff transport
self.th2m = (self.tha**2).mean(axis=1)
def get_diagnostic(self, dname):
return (self.diagnostics[dname]['value'] /
self.diagnostics[dname]['count'])
def spec_var(self, ph):
""" compute variance of p from Fourier coefficients ph """
var_dens = 2. * np.abs(ph)**2 / self.M**2
# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
var_dens[...,0] = var_dens[...,0]/2.
var_dens[...,-1] = var_dens[...,-1]/2.
return var_dens.sum()
def block_average(self,A, nblocks = 256):
""" Block average A onto A blocks """
nave = self.nx/nblocks
Ab = np.empty(nblocks)
for i in range(nblocks):
Ab[i] = A[i*nave:(i+1)*nave].mean()
return Ab
#grad2 = (wv2*(np.abs(thh)**2)).sum()/(N**2)
# a test initial concentration
#x0,y0 = pi,pi
#r = np.sqrt((x-x0)[np.newaxis,...]**2+(y-y0)[...,np.newaxis]**2)
#th = np.zeros(N,N)
#th = np.exp(-(r**2))
| 31.189295 | 85 | 0.544054 |
acf1c9fef9d74cd913985e1ebdd71fd42de7b1a3 | 8,343 | py | Python | tests/test_job.py | chrieke/up42-py | 01c779354f336d83c18eb0d4c48ca0842fa7278d | [
"MIT"
] | 72 | 2020-04-06T16:36:36.000Z | 2022-03-30T23:39:14.000Z | tests/test_job.py | j-tr/up42-py | 163b5a324775998987fc646afe8d5c257dd559ee | [
"MIT"
] | 59 | 2020-04-08T14:50:40.000Z | 2022-03-23T22:14:46.000Z | tests/test_job.py | j-tr/up42-py | 163b5a324775998987fc646afe8d5c257dd559ee | [
"MIT"
] | 40 | 2020-04-07T22:34:05.000Z | 2022-03-23T10:57:15.000Z | import os
from pathlib import Path
import json
import time
import tempfile
import pytest
# pylint: disable=unused-import
from .context import Job, JobTask
from .fixtures import (
auth_mock,
auth_live,
job_mock,
job_live,
jobtask_mock,
workflow_live,
)
from .fixtures import DOWNLOAD_URL, JOBTASK_ID
def test_job_info(job_mock):
del job_mock._info
assert isinstance(job_mock, Job)
assert job_mock.info["xyz"] == 789
assert job_mock._info["xyz"] == 789
# pylint: disable=unused-argument
@pytest.mark.parametrize("status", ["NOT STARTED", "PENDING", "RUNNING"])
def test_job_status(job_mock, status, requests_mock):
del job_mock._info
url_job_info = (
f"{job_mock.auth._endpoint()}/projects/"
f"{job_mock.project_id}/jobs/{job_mock.job_id}"
)
requests_mock.get(url=url_job_info, json={"data": {"status": status}, "error": {}})
assert job_mock.status == status
# pylint: disable=unused-argument
@pytest.mark.parametrize(
"status,expected",
[
("NOT STARTED", False),
("PENDING", False),
("RUNNING", False),
("FAILED", False),
("SUCCEEDED", True),
],
)
def test_is_succeeded(job_mock, status, expected, requests_mock):
del job_mock._info
url_job_info = (
f"{job_mock.auth._endpoint()}/projects/"
f"{job_mock.project_id}/jobs/{job_mock.job_id}"
)
requests_mock.get(url=url_job_info, json={"data": {"status": status}, "error": {}})
assert job_mock.is_succeeded == expected
@pytest.mark.parametrize("status", ["SUCCEEDED"])
def test_track_status_pass(job_mock, status, requests_mock):
del job_mock._info
url_job_info = (
f"{job_mock.auth._endpoint()}/projects/"
f"{job_mock.project_id}/jobs/{job_mock.job_id}"
)
requests_mock.get(url=url_job_info, json={"data": {"status": status}, "error": {}})
job_status = job_mock.track_status()
assert job_status == status
@pytest.mark.parametrize("status", ["FAILED", "ERROR", "CANCELLED", "CANCELLING"])
def test_track_status_fail(job_mock, status, requests_mock):
del job_mock._info
url_job_info = (
f"{job_mock.auth._endpoint()}/projects/"
f"{job_mock.project_id}/jobs/{job_mock.job_id}"
)
requests_mock.get(url=url_job_info, json={"data": {"status": status}, "error": {}})
with pytest.raises(ValueError):
job_mock.track_status()
def test_cancel_job(job_mock, requests_mock):
url = f"{job_mock.auth._endpoint()}/projects/{job_mock.project_id}/jobs/{job_mock.job_id}/cancel/"
requests_mock.post(url, status_code=200)
job_mock.cancel_job()
def test_download_quicklook(job_mock, requests_mock):
url = (
f"{job_mock.auth._endpoint()}/projects/{job_mock.project_id}/jobs/{job_mock.job_id}"
f"/tasks/{JOBTASK_ID}/outputs/quicklooks/a_quicklook.png"
)
quicklook_file = Path(__file__).resolve().parent / "mock_data/a_quicklook.png"
requests_mock.get(url, content=open(quicklook_file, "rb").read())
with tempfile.TemporaryDirectory() as tempdir:
quick = job_mock.download_quicklooks(tempdir)
assert len(quick) == 1
assert Path(quick[0]).exists()
assert Path(quick[0]).suffix == ".png"
def test_get_result_json(job_mock):
assert job_mock.get_results_json() == {
"type": "FeatureCollection",
"features": [],
}
def test_get_logs(job_mock):
assert job_mock.get_logs(as_return=True)[JOBTASK_ID] == ""
assert not job_mock.get_logs()
def test_get_jobtasks(job_mock):
job_tasks = job_mock.get_jobtasks()
assert isinstance(job_tasks[0], JobTask)
assert job_tasks[0].jobtask_id == JOBTASK_ID
def test_get_jobtasks_result_json(job_mock):
res = job_mock.get_jobtasks_results_json()
assert len(res) == 1
assert res[JOBTASK_ID] == {
"type": "FeatureCollection",
"features": [],
}
def test_job_download_result(job_mock, requests_mock):
out_tgz = Path(__file__).resolve().parent / "mock_data/result_tif.tgz"
with open(out_tgz, "rb") as src_tgz:
out_tgz_file = src_tgz.read()
requests_mock.get(
url=DOWNLOAD_URL,
content=out_tgz_file,
headers={"x-goog-stored-content-length": "163"},
)
with tempfile.TemporaryDirectory() as tempdir:
out_files = job_mock.download_results(tempdir)
out_paths = [Path(p) for p in out_files]
for path in out_paths:
assert path.exists()
assert len(out_paths) == 2
assert out_paths[0].name in [
"7e17f023-a8e3-43bd-aaac-5bbef749c7f4_0-0.tif",
"data.json",
]
assert out_paths[1].name in [
"7e17f023-a8e3-43bd-aaac-5bbef749c7f4_0-0.tif",
"data.json",
]
assert out_paths[0] != out_paths[1]
assert out_paths[1].parent.exists()
assert out_paths[1].parent.is_dir()
def test_job_download_result_nounpacking(job_mock, requests_mock):
out_tgz = Path(__file__).resolve().parent / "mock_data/result_tif.tgz"
with open(out_tgz, "rb") as src_tgz:
out_tgz_file = src_tgz.read()
requests_mock.get(
url=DOWNLOAD_URL,
content=out_tgz_file,
headers={"x-goog-stored-content-length": "163"},
)
with tempfile.TemporaryDirectory() as tempdir:
out_files = job_mock.download_results(tempdir, unpacking=False)
for file in out_files:
assert Path(file).exists()
assert len(out_files) == 1
@pytest.mark.skip(reason="Sometimes takes quite long to cancel the job on the server.")
@pytest.mark.live
def test_cancel_job_live(workflow_live):
input_parameters_json = (
Path(__file__).resolve().parent / "mock_data/input_params_simple.json"
)
jb = workflow_live.test_job(
input_parameters=input_parameters_json, track_status=False
)
# Can happen that the test job is finished before the cancellation kicks in server-side.
jb.cancel_job()
# Give service time to cancel job before assertions
time.sleep(3)
assert jb.status in ["CANCELLED", "CANCELLING"]
assert isinstance(jb, Job)
with open(input_parameters_json) as src:
job_info_params = json.load(src)
job_info_params.update({"config": {"mode": "DRY_RUN"}})
assert jb._info["inputs"] == job_info_params
assert jb._info["mode"] == "DRY_RUN"
@pytest.mark.live
def test_job_download_result_live(job_live):
with tempfile.TemporaryDirectory() as tempdir:
out_files = job_live.download_results(Path(tempdir))
for file in out_files:
assert Path(file).exists()
assert len(out_files) == 2
@pytest.mark.live
def test_job_download_result_no_tiff_live(auth_live):
with tempfile.TemporaryDirectory() as tempdir:
job = Job(
auth=auth_live,
project_id=auth_live.project_id,
job_id=os.getenv("TEST_UP42_JOB_ID_NC_FILE"),
)
out_files = job.download_results(Path(tempdir))
assert Path(out_files[0]).exists()
assert Path(out_files[1]).exists()
assert any(".nc" in s for s in out_files)
assert any("data.json" in s for s in out_files)
assert len(out_files) == 2
@pytest.mark.live
def test_job_download_result_dimap_live(auth_live):
with tempfile.TemporaryDirectory() as tempdir:
job = Job(
auth=auth_live,
project_id=auth_live.project_id,
job_id=os.getenv("TEST_UP42_JOB_ID_DIMAP_FILE"),
)
out_files = job.download_results(Path(tempdir))
print(out_files)
assert Path(out_files[0]).exists()
assert Path(out_files[20]).exists()
assert Path(out_files[-1]).exists()
assert "data.json" in [Path(of).name for of in out_files]
assert len(out_files) == 54
@pytest.mark.skip
@pytest.mark.live
def test_job_download_result_live_2gb_big_exceeding_2min_gcs_treshold(auth_live):
job = Job(
auth=auth_live,
project_id=auth_live.project_id,
job_id="30f82b44-1505-4773-ab23-31fa61ba9b4c",
)
with tempfile.TemporaryDirectory() as tempdir:
out_files = job.download_results(Path(tempdir))
for file in out_files:
assert Path(file).exists()
assert len(out_files) == 490
| 31.130597 | 102 | 0.664989 |
acf1ca612b60ca7aa85bcec6d5b290d2de296bcd | 104,594 | py | Python | src/blp/test/test_blp.py | nickmik33/blp | d5d45c141973fe66482169d5763476f26acb7077 | [
"Apache-2.0"
] | 24 | 2021-05-31T03:32:51.000Z | 2022-02-26T23:19:47.000Z | src/blp/test/test_blp.py | nickmik33/blp | d5d45c141973fe66482169d5763476f26acb7077 | [
"Apache-2.0"
] | 10 | 2021-06-25T11:57:30.000Z | 2022-03-18T13:48:34.000Z | src/blp/test/test_blp.py | nickmik33/blp | d5d45c141973fe66482169d5763476f26acb7077 | [
"Apache-2.0"
] | 9 | 2021-06-04T03:14:46.000Z | 2022-03-19T15:20:49.000Z | import copy
import datetime
import itertools
import queue
import blpapi
import numpy
import pandas
import pytest
import pytz
from pandas import Timestamp as TS
from pandas.testing import assert_frame_equal, assert_series_equal
from blp import blp
STREAM_SUBSCRIPTION_TIMEOUT = 20
QUERY_TIMEOUT = 50000
HOST = "localhost"
PORT = 8194
SERVICES = {
"HistoricalDataRequest": "//blp/refdata",
"ReferenceDataRequest": "//blp/refdata",
"GetBrokerSpecForUuid": "//blp/emsx.brokerspec",
"CreateOrderAndRouteEx": "//blp/emapisvc_beta",
"GetBrokerStrategyInfoWithAssetClass": "//blp/emapisvc_beta",
}
class MockEventQueue:
def __init__(self):
self._values = []
def nextEvent(self, timeout=None):
return self._values.pop(0)
def extend(self, value):
self._values.extend(value)
def __iter__(self):
yield from self._values
class MockBlpQuery(blp.BlpQuery):
"""A mock class for :class:`blp.blp.BlpQuery`.
Args:
cache_data (dict): A cache of requests and responses. Keys should be the id of the request_data as it would
appear in BlpQuery.query(), i.e. id(request_data).
Values should be a list of responses conforming to the spec that would be yielded from get_response.
"""
lookup = {
"HistoricalDataRequest": "HistoricalDataResponse",
"ReferenceDataRequest": "ReferenceDataResponse",
"IntradayTickRequest": "IntradayTickResponse",
"IntradayBarRequest": "IntradayBarResponse",
"FieldInfoRequest": "FieldInfoResponse",
}
def __init__(
self,
host="localhost",
port=8194,
timeout=10000,
parser=None,
cache_data=None,
**kwargs,
):
self._cache = cache_data
super().__init__(host=host, port=port, timeout=timeout, parser=parser, **kwargs)
def start(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def create_request(self, request_data):
return id(request_data)
def send_request(self, request, data_queue, correlation_id=None):
res = self._cache[request]
data_queue.extend(res)
def get_response(self, data_queue, timeout=None):
for data in data_queue:
yield data
@pytest.fixture(
scope="class",
params=[pytest.param((HOST, PORT), marks=pytest.mark.bbg)],
ids=lambda host_port: f"{host_port[0]}:{host_port[1]}",
)
def bstream(request):
host, port = request.param
return blp.BlpStream(host=host, port=port, setNumStartAttempts=1)
@pytest.fixture(
scope="module",
params=[pytest.param((HOST, PORT), marks=pytest.mark.bbg)],
ids=lambda host_port: f"{host_port[0]}:{host_port[1]}",
)
def bquery(request):
host, port = request.param
with blp.BlpQuery(host, port, timeout=QUERY_TIMEOUT) as _bquery:
yield _bquery
@pytest.fixture(
scope="module",
params=[pytest.param((HOST, PORT), marks=pytest.mark.bbg)],
ids=lambda host_port: f"{host_port[0]}:{host_port[1]}",
)
def bcon(request):
host, port = request.param
sopts = blpapi.SessionOptions()
sopts.setServerHost(host)
sopts.setServerPort(port)
session = blpapi.Session(sopts)
session.start()
session.openService("//blp/refdata")
session.openService("//blp/emsx.brokerspec")
session.openService("//blp/emapisvc_beta")
yield session
session.stop()
def is_not_market_hours(now=None):
if not now:
now = datetime.datetime.now(tz=pytz.timezone("America/New_York"))
friday, sunday = (4, 6)
day_seconds = 24 * 60 * 60
friday_close = friday * day_seconds + 17 * 60 * 60
sunday_open = sunday * day_seconds + 18 * 60 * 60
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
seconds_since_midnight = (now - midnight).seconds
week_seconds = now.weekday() * 24 * 60 * 60 + seconds_since_midnight
is_closed = (week_seconds >= friday_close) and (week_seconds <= sunday_open)
return is_closed
def get_intraday_dates():
prev_date = pandas.Timestamp.now() + pandas.offsets.BusinessDay(-7)
sd = prev_date + pandas.Timedelta(hours=8)
ed = sd + pandas.Timedelta(minutes=2)
sd = sd.strftime("%Y-%m-%dT%H:%M:%S")
ed = ed.strftime("%Y-%m-%dT%H:%M:%S")
return sd, ed
def assert_streaming_equal(data, cid):
if not isinstance(data, dict):
raise ValueError(f"data is unknown type {type(data)}, should be dict")
for key in data:
if key == "fragmentType":
assert data[key] == 0
elif key == "correlationIds":
assert data[key] == [cid]
elif key == "messageType":
assert data[key] == "MarketDataEvents"
elif key == "topicName":
assert data[key] == ""
elif key == "timeReceived":
assert isinstance(data[key], pandas.Timestamp)
elif key == "element":
assert list(data[key].keys()) == ["MarketDataEvents"]
else:
raise KeyError(f"Unknown key {key} in data")
def assert_eventdata_equal(data, exp_data):
assert len(data) == len(exp_data)
for d, ed in zip(data, exp_data):
cid = d["message"].pop("correlationIds")
ed["message"].pop("correlationIds")
assert len(cid) == 1
assert isinstance(cid[0], int)
assert data == exp_data
def assert_info_equal(data, exp_data, ignore_overrides=True):
data = copy.deepcopy(data)
exp_data = copy.deepcopy(exp_data)
assert len(data) == len(exp_data)
for d, ed in zip(data, exp_data):
cid = d["message"].pop("correlationIds")
ed["message"].pop("correlationIds")
assert len(cid) == 1
assert isinstance(cid[0], int)
d["message"]["element"]["fieldResponse"] = sorted(
d["message"]["element"]["fieldResponse"], key=lambda x: x["fieldData"]["id"]
)
ed["message"]["element"]["fieldResponse"] = sorted(
ed["message"]["element"]["fieldResponse"],
key=lambda x: x["fieldData"]["id"],
)
if ignore_overrides:
for di in [d, ed]:
for resp in di["message"]["element"]["fieldResponse"]:
if "fieldInfo" in resp["fieldData"]:
del resp["fieldData"]["fieldInfo"]["fieldInfo"]["overrides"]
if "fieldError" in resp["fieldData"]:
del resp["fieldData"]["fieldError"]["fieldError"]["source"]
assert data == exp_data
def assert_bar_parsed(data, security, event):
for datum in data:
assert set(datum) == {"security", "data", "events"}
assert datum["security"] == security
assert datum["events"] == [event]
for bar in datum["data"]:
assert set(bar) == {
"time",
"open",
"high",
"low",
"close",
"volume",
"numEvents",
"value",
}
assert isinstance(bar["time"], pandas.Timestamp)
assert isinstance(bar["open"], float)
assert isinstance(bar["high"], float)
assert isinstance(bar["low"], float)
assert isinstance(bar["close"], float)
assert isinstance(bar["volume"], int)
assert isinstance(bar["numEvents"], int)
assert isinstance(bar["value"], float)
def assert_tick_parsed(data, security, event):
for datum in data:
assert set(datum) == {"security", "data", "events"}
assert datum["security"] == security
assert datum["events"] == [event]
for tick in datum["data"]:
assert set(tick) == {"time", "type", "value", "size"}
assert isinstance(tick["time"], pandas.Timestamp)
assert tick["type"] == event
assert isinstance(tick["value"], float)
assert isinstance(tick["size"], int)
def params_from_funcs(funcs):
ids = []
argvalues = []
for f in funcs:
ids.append(f.__name__)
argvalues.append(f())
return {"ids": ids, "argvalues": argvalues}
def test_connect(bstream):
with bstream:
pass
def test_iterate_empty_timeout(bstream):
with bstream:
with pytest.raises(queue.Empty):
for _ in bstream.events(timeout=0.1):
pass
def test_subscribe_data(bstream):
sub_dict = {
"DOESCRUD Index": {"fields": ["LAST_PRICE"]},
"BAD_TICKER": {"fields": ["LAST_PRICE"]},
"USDCAD Curncy": {"fields": ["BAD_FIELD"]},
"EURUSD Curncy": {"fields": ["LAST_PRICE", "BAD_FIELD"]},
}
res_exp = {
"DOESCRUD Index": True,
"BAD_TICKER": False,
"USDCAD Curncy": False,
"EURUSD Curncy": ["BAD_FIELD"],
}
with bstream:
res = bstream.subscribe(sub_dict, timeout=STREAM_SUBSCRIPTION_TIMEOUT)
assert res == res_exp
def test_unsubscribe_data(bstream):
sub_dict = {"DOESCRUD Index": {"fields": ["LAST_PRICE"]}}
res_exp = {"DOESCRUD Index": True}
with bstream:
res1 = bstream.subscribe(sub_dict, timeout=STREAM_SUBSCRIPTION_TIMEOUT)
res2 = bstream.unsubscribe(sub_dict, timeout=STREAM_SUBSCRIPTION_TIMEOUT)
assert res1 == res_exp
assert res2 == res_exp
def test_resubscribe_data(bstream):
sub_dict = {"DOESCRUD Index": {"fields": ["LAST_PRICE"]}}
res_exp = {"DOESCRUD Index": True}
with bstream:
res1 = bstream.subscribe(sub_dict, timeout=STREAM_SUBSCRIPTION_TIMEOUT)
res2 = bstream.resubscribe(sub_dict, timeout=STREAM_SUBSCRIPTION_TIMEOUT)
assert res1 == res_exp
assert res2 == res_exp
def test_resubscribe_missing_correlationid(bstream):
sub_dict1 = {"DOESCRUD Index": {"fields": ["LAST_PRICE"]}}
sub_dict2 = {
"DOESCRUD Index": {"fields": ["LAST_PRICE"]},
"USDCAD Curncy": {"fields": ["PX_LAST"]},
}
with bstream:
bstream.subscribe(sub_dict1, timeout=STREAM_SUBSCRIPTION_TIMEOUT)
with pytest.raises(blpapi.exception.NotFoundException):
bstream.resubscribe(sub_dict2, timeout=STREAM_SUBSCRIPTION_TIMEOUT)
def test_unsubscribe_missing_correlationid(bstream):
sub_dict1 = {"DOESCRUD Index": {"fields": ["LAST_PRICE"]}}
sub_dict2 = {
"DOESCRUD Index": {"fields": ["LAST_PRICE"]},
"USDCAD Curncy": {"fields": ["PX_LAST"]},
}
with bstream:
bstream.subscribe(sub_dict1, timeout=STREAM_SUBSCRIPTION_TIMEOUT)
with pytest.raises(queue.Empty):
bstream.unsubscribe(sub_dict2, timeout=1)
@pytest.mark.skipif(is_not_market_hours(), reason="Requires market to be open")
def test_iter_data(bstream):
sub_dict = {"USDCAD Curncy": {"fields": ["LAST_PRICE"]}}
data = []
max_events = 3
with bstream:
bstream.subscribe(sub_dict, timeout=STREAM_SUBSCRIPTION_TIMEOUT)
# add timeout to avoid hanging indefinitely if no market data is arriving
for datum in bstream.events(timeout=60):
data.append(datum)
if len(data) == max_events:
break
for d in data:
assert_streaming_equal(d, "USDCAD Curncy")
def bbg_request_params():
historical = {
"fields": ["PX_LAST", "PX_VOLUME"],
"securities": ["SPY US Equity", "IBM US Equity"],
"startDate": "20180101",
"endDate": "20180105",
}
historical_adjustments = {
"fields": ["PX_LAST", "PX_VOLUME"],
"securities": ["SPY US Equity", "IBM US Equity"],
"startDate": "20180101",
"endDate": "20180105",
"periodicityAdjustment": "ACTUAL",
}
reference = {
"fields": ["PX_LAST", "PX_VOLUME"],
"securities": ["SPY US Equity", "IBM US Equity"],
}
reference_overrides = {
"fields": ["SETTLE_DT"],
"securities": ["AUD Curncy"],
"overrides": [{"overrides": {"fieldId": "REFERENCE_DATE", "value": "20180101"}}],
}
broker_spec = {"uuid": 1234567}
order_and_route = {
"EMSX_TICKER": "IBM US Equity",
"EMSX_AMOUNT": 1,
"EMSX_ORDER_TYPE": "MKT",
"EMSX_TIF": "DAY",
"EMSX_HAND_INSTRUCTION": "ANY",
"EMSX_SIDE": "BUY",
"EMSX_BROKER": "BB",
}
broker_strategy = {
"EMSX_REQUEST_SEQ": 1,
"EMSX_ASSET_CLASS": "FUT",
"EMSX_BROKER": "BMTB",
"EMSX_STRATEGY": "VWAP",
}
return {
"ids": [
"historical",
"historical_adjustments",
"reference",
"reference_overrides",
"broker_spec",
"order_and_route",
"broker_strategy",
],
"argvalues": [
("HistoricalDataRequest", historical),
("HistoricalDataRequest", historical_adjustments),
("ReferenceDataRequest", reference),
("ReferenceDataRequest", reference_overrides),
("GetBrokerSpecForUuid", broker_spec),
("CreateOrderAndRouteEx", order_and_route),
("GetBrokerStrategyInfoWithAssetClass", broker_strategy),
],
}
@pytest.mark.parametrize("request_type, request_data", **bbg_request_params())
def test_dict_to_request(bcon, request_type, request_data):
bbg_request = bcon.getService(SERVICES[request_type]).createRequest(request_type)
round_trip = blp.element_to_dict(blp.dict_to_req(bbg_request, request_data).asElement())
round_trip_data = round_trip[list(round_trip)[0]]
assert request_data == round_trip_data
@pytest.mark.parametrize(
"request_data",
ids=["reference_empty", "reference", "historical"],
argvalues=[
{"ReferenceDataRequest": {}},
{"ReferenceDataRequest": {"fields": ["PX_LAST"], "securities": ["SPY US Equity"]}},
{
"HistoricalDataRequest": {
"fields": ["PX_LAST"],
"securities": ["SPY US Equity"],
"startDate": "20180101",
"endDate": "20180105",
}
},
],
)
def test_create_request_smoketest(bquery, request_data):
bquery.create_request(request_data)
def historical_response():
return [
{
"eventType": 6,
"eventTypeName": "blpapi.Event.PARTIAL_RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [1],
"messageType": "HistoricalDataResponse",
"timeReceived": None,
"element": {
"HistoricalDataResponse": {
"securityData": {
"security": "SPY US Equity",
"eidData": [],
"sequenceNumber": 0,
"fieldExceptions": [],
"fieldData": [
{
"fieldData": {
"date": TS("2018-01-02 00:00:00"),
"PX_LAST": 268.77,
"PX_VOLUME": 86655749.0,
}
},
{
"fieldData": {
"date": TS("2018-01-03 00:00:00"),
"PX_LAST": 270.47,
"PX_VOLUME": 90070416.0,
}
},
{
"fieldData": {
"date": TS("2018-01-04 00:00:00"),
"PX_LAST": 271.61,
"PX_VOLUME": 80636408.0,
}
},
{
"fieldData": {
"date": TS("2018-01-05 00:00:00"),
"PX_LAST": 273.42,
"PX_VOLUME": 83523995.0,
}
},
],
}
}
},
},
},
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [1],
"messageType": "HistoricalDataResponse",
"timeReceived": None,
"element": {
"HistoricalDataResponse": {
"securityData": {
"security": "IBM US Equity",
"eidData": [],
"sequenceNumber": 1,
"fieldExceptions": [],
"fieldData": [
{
"fieldData": {
"date": TS("2018-01-02 00:00:00"),
"PX_LAST": 154.25,
"PX_VOLUME": 4202503.0,
}
},
{
"fieldData": {
"date": TS("2018-01-03 00:00:00"),
"PX_LAST": 158.49,
"PX_VOLUME": 9441567.0,
}
},
{
"fieldData": {
"date": TS("2018-01-04 00:00:00"),
"PX_LAST": 161.7,
"PX_VOLUME": 7556249.0,
}
},
{
"fieldData": {
"date": TS("2018-01-05 00:00:00"),
"PX_LAST": 162.49,
"PX_VOLUME": 5195764.0,
}
},
],
}
}
},
},
},
]
def test_get_response(bquery):
reqd = {
"HistoricalDataRequest": {
"fields": ["PX_LAST", "PX_VOLUME"],
"securities": ["SPY US Equity", "IBM US Equity"],
"startDate": "20180101",
"endDate": "20180105",
}
}
req = bquery.create_request(reqd)
data_queue = blpapi.EventQueue()
bquery.send_request(req, data_queue)
res_list = [data for data in bquery.get_response(data_queue)]
exp_res = historical_response()
assert_eventdata_equal(res_list, exp_res)
def test_bquery_timeout(bquery):
with pytest.raises(ConnectionError):
[i for i in bquery.get_response(blpapi.EventQueue(), timeout=1)]
def test_query_historical_data(bquery):
reqd = {
"HistoricalDataRequest": {
"fields": ["PX_LAST", "PX_VOLUME"],
"securities": ["SPY US Equity", "IBM US Equity"],
"startDate": "20180101",
"endDate": "20180105",
}
}
res_list = bquery.query(reqd, parse=False, collector=list)
exp_res = historical_response()
assert_eventdata_equal(res_list, exp_res)
def test_query_reference_data(bquery):
reqd = {"ReferenceDataRequest": {"fields": ["NAME"], "securities": ["AUD Curncy", "EUR Curncy"]}}
res_list = bquery.query(reqd, parse=False, collector=list)
exp_res = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [1],
"messageType": "ReferenceDataResponse",
"timeReceived": None,
"element": {
"ReferenceDataResponse": [
{
"securityData": {
"security": "AUD Curncy",
"eidData": [],
"fieldExceptions": [],
"sequenceNumber": 0,
"fieldData": {"fieldData": {"NAME": "Australian Dollar Spot"}},
}
},
{
"securityData": {
"security": "EUR Curncy",
"eidData": [],
"fieldExceptions": [],
"sequenceNumber": 1,
"fieldData": {"fieldData": {"NAME": "Euro Spot"}},
}
},
]
},
},
}
]
assert_eventdata_equal(res_list, exp_res)
def test_query_bulk_reference_data(bquery):
reqd = {
"ReferenceDataRequest": {
"securities": ["GC1 Comdty"],
"fields": ["FUT_CHAIN"],
"overrides": [{"overrides": {"fieldId": "CHAIN_DATE", "value": "20050101"}}],
}
}
res_list = bquery.query(reqd, parse=False, collector=list)
exp_res = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [18],
"messageType": "ReferenceDataResponse",
"timeReceived": None,
"element": {
"ReferenceDataResponse": [
{
"securityData": {
"security": "GC1 Comdty",
"eidData": [],
"fieldExceptions": [],
"sequenceNumber": 0,
"fieldData": {
"fieldData": {
"FUT_CHAIN": [
{"FUT_CHAIN": {"Security Description": "GCF05 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCG05 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCH05 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCJ05 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCM05 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCQ05 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCV05 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCZ05 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCG06 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCJ06 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCM06 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCQ06 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCV06 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCZ06 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCM07 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCZ07 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCM08 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCZ08 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCM09 Comdty"}},
{"FUT_CHAIN": {"Security Description": "GCZ09 Comdty"}},
]
}
},
}
}
]
},
},
}
]
assert_eventdata_equal(res_list, exp_res)
def field_info_one_field():
request = {"FieldInfoRequest": {"id": ["PX_LAST"]}}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [1],
"messageType": "fieldResponse",
"timeReceived": None,
"element": {
"fieldResponse": [
{
"fieldData": {
"id": "PR005",
"fieldInfo": {
"fieldInfo": {
"mnemonic": "PX_LAST",
"description": "Last Price",
"datatype": "Double",
"categoryName": [],
"property": [],
"overrides": [
"PX628",
"DY628",
"DZ066",
"DT454",
"FL021",
"PX957",
"FL039",
"FL037",
"FL026",
"SP162",
"DS028",
"FL235",
"FO009",
"FL024",
"DT456",
"FL023",
"DY719",
"AN175",
"PX342",
"DS029",
"DY630",
"DY629",
"DT455",
"DS170",
"YL112",
],
"ftype": "Price",
}
},
}
}
]
},
},
}
]
return request, response
def field_info_one_field_with_docs():
request = {"FieldInfoRequest": {"id": ["PX_LAST"], "returnFieldDocumentation": True}}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [1],
"messageType": "fieldResponse",
"timeReceived": None,
"element": {
"fieldResponse": [
{
"fieldData": {
"id": "PR005",
"fieldInfo": {
"fieldInfo": {
"mnemonic": "PX_LAST",
"description": "Last Price",
"datatype": "Double",
"documentation": "Last price for the security.\n\nEquities:\n Returns the last price provided by the exchange. For securities that trade Monday through Friday, this field will be populated only if such information has been provided by the exchange in the past 30 trading days. For initial public offerings (IPO), the day before the first actual trading day may return the IPO price. For all other securities, this field will be populated only if such information was provided by the exchange in the last 30 calendar days. This applies to common stocks, receipts, warrants, and real estate investment trusts (REITs).\n\nEquity Derivatives:\n Equity Options, Spot Indices, Index Futures and Commodity Futures:\n Returns the last trade price. No value is returned for expired contracts.\n\n Synthetic Options:\n Returns N.A.\n\nFixed Income:\n Returns the last price received from the current pricing source. The last price will always come from the date and time in LAST_UPDATE/LAST_UPDATE_DT. If there was no contributed last at that time the first valid value from mid/bid/ask will be used. The value returned will be a discount if Pricing Source Quote Type (DS962, PCS_QUOTE_TYP) is 2 (Discount Quoted). For information specific to the last trade see the price (PR088, PX_LAST_ACTUAL), time (P2788, LAST_TRADE_TIME), and date (P2789, LAST_TRADE_DATE) fields.\n\nReturns the last price received from the current pricing source. If last price is not available, then a mid computed from bid and ask will be returned. If either bid or ask is not available to compute mid, the field returns whichever side that is received.\n\nEquity Indices:\n Returns either the current quote price of the index or the last available close price of the index.\n\nCustom Indices:\n Returns the value the custom index expression evaluates to. Since the expression is user defined, the value has no units.\n\nEconomic Statistics:\n Provides the revision of the prior release. \n\nFutures and Options:\n Returns the last traded price until settlement price is received, at which time the settlement price is returned. If no trade or settlement price is available for the current day, then the last settlement price received is provided. No value is returned for expired contracts.\nSettlement Price (PR277, PX_SETTLE) and Futures Trade Price (PR083, FUT_PX) can be used instead to return settlement price and closing price respectively at all times regardless of these parameters.\n\nSwaps and Credit Default Swaps:\n Not supported for synthetics.\n\nMutual Funds:\n Closed-End, Exchange Traded and Open-End Funds Receiving Intraday Pricing from Exchange Feeds:\n Returns the most recent trade price.\n\n Open-End and Hedge Funds:\n Returns the net asset value (NAV). If no NAV is available, the bid is returned, and if no bid is available then the ask is returned.\n\n Money Market Funds that Display Days to Maturity and Yield:\n Returns a yield.\n\nCurrencies:\n Broken Date Type Currencies (e.g. USD/JPY 3M Curncy):\n Returns the average of the bid and ask.\n\n For All Other Currency Types:\n Returns the last trade price if it is valid and available. If last trade is not available then mid price is returned. Mid price is the average of the bid and ask. If a valid bid and ask are not available, then a bid or ask is returned based on which is non-zero. If no data is available for the current day, then the previous day's last trade is returned.\n\nOTC FX Options:\n Returns the premium of the option in nominal amount. Returns the price of the option expressed in a currency opposite of the notional currency.\n\nMortgages:\n Returns the last price received from the current pricing source. If this field is empty for any reason, then last ask is returned and if no ask is available, then last bid is returned.\n\nMunicipals:\n Returns the last price received from the current pricing source.\n\nPortfolio:\nNet asset value (NAV) as computed in the Portfolio & Risk Analytics function and used for Total Return computations. It is the cumulated daily total returns applied to the user-defined price/value at portfolio's inception date.", # noqa 501
"categoryName": [],
"property": [],
"overrides": [
"PX628",
"DY628",
"DZ066",
"DT454",
"FL021",
"PX957",
"FL039",
"FL037",
"FL026",
"SP162",
"DS028",
"FL235",
"FO009",
"FL024",
"DT456",
"FL023",
"DY719",
"AN175",
"PX342",
"DS029",
"DY630",
"DY629",
"DT455",
"DS170",
"YL112",
],
"ftype": "Price",
}
},
}
}
]
},
},
}
]
return request, response
def field_info_two_fields():
request = {"FieldInfoRequest": {"id": ["PX_LAST", "NAME"]}}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [1],
"messageType": "fieldResponse",
"timeReceived": None,
"element": {
"fieldResponse": [
{
"fieldData": {
"id": "PR005",
"fieldInfo": {
"fieldInfo": {
"mnemonic": "PX_LAST",
"description": "Last Price",
"datatype": "Double",
"categoryName": [],
"property": [],
"overrides": [
"PX628",
"DY628",
"DZ066",
"DT454",
"FL021",
"PX957",
"FL039",
"FL037",
"FL026",
"SP162",
"DS028",
"FL235",
"FO009",
"FL024",
"DT456",
"FL023",
"DY719",
"AN175",
"PX342",
"DS029",
"DY630",
"DY629",
"DT455",
"DS170",
"YL112",
],
"ftype": "Price",
}
},
}
},
{
"fieldData": {
"id": "DS002",
"fieldInfo": {
"fieldInfo": {
"mnemonic": "NAME",
"description": "Name",
"datatype": "String",
"categoryName": [],
"property": [],
"overrides": [],
"ftype": "Character",
}
},
}
},
]
},
},
}
]
return request, response
def field_info_bad_field():
request = {"FieldInfoRequest": {"id": ["bad_field"]}}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [1],
"messageType": "fieldResponse",
"timeReceived": None,
"element": {
"fieldResponse": [
{
"fieldData": {
"id": "BAD_FIELD",
"fieldError": {
"fieldError": {
"source": "apiflds@tkusr-ob-009",
"code": -103,
"category": "BAD_FLD",
"message": "Unknown Field Id/Mnemonic",
}
},
}
}
]
},
},
}
]
return request, response
def field_info_params():
funcs = [
field_info_one_field,
field_info_one_field_with_docs,
field_info_two_fields,
field_info_bad_field,
]
return params_from_funcs(funcs)
@pytest.mark.parametrize("bbg_request, exp_res", **field_info_params())
def test_query_field_info(bquery, bbg_request, exp_res):
res_list = bquery.query(bbg_request, parse=False, collector=list)
assert_info_equal(res_list, exp_res)
def test_bquery_context_smoketest(bquery):
reqd = {
"HistoricalDataRequest": {
"fields": ["PX_LAST"],
"securities": ["SPY US Equity"],
"startDate": "20180101",
"endDate": "20180105",
}
}
with blp.BlpQuery(HOST, PORT):
bquery.query(reqd)
def parser_raises_params():
bad_field = {
"HistoricalDataRequest": {
"fields": ["BAD_FIELD"],
"securities": ["SPY US Equity"],
"startDate": "20180101",
"endDate": "20180105",
}
}
bad_security = {
"HistoricalDataRequest": {
"fields": ["PX_LAST"],
"securities": ["BAD_SECURITY"],
"startDate": "20180101",
"endDate": "20180105",
}
}
missing_start_date = {"HistoricalDataRequest": {"fields": ["PX_LAST"], "securities": ["BAD_SECURITY"]}}
non_applicable_field_hist = {
"HistoricalDataRequest": {
"fields": ["PX_VOLUME"],
"securities": ["DOESCRUD Index"],
"startDate": "20180102",
"endDate": "20180203",
}
}
non_applicable_field_one_valid_hist = {
"HistoricalDataRequest": {
"fields": ["PX_VOLUME", "PX_LAST"],
"securities": ["DOESCRUD Index"],
"startDate": "20180102",
"endDate": "20180203",
}
}
bad_fld_invalid_ref = {"ReferenceDataRequest": {"fields": ["not_a_field"], "securities": ["RSF82 Comdty"]}}
return {
"ids": [
"bad_field",
"bad_security",
"missing_start_date",
"non_applicable_field_hist",
"non_applicable_field_one_valid_hist",
"bad_fld_invalid_ref",
],
"argvalues": [
(bad_field, blp.BlpParser._process_field_exception),
(bad_security, blp.BlpParser._validate_security_error),
(missing_start_date, blp.BlpParser._validate_response_error),
(non_applicable_field_hist, blp.BlpParser._validate_fields_exist),
(non_applicable_field_one_valid_hist, blp.BlpParser._validate_fields_exist),
(bad_fld_invalid_ref, blp.BlpParser._process_field_exception),
],
}
@pytest.mark.parametrize("bbg_request, processor", **parser_raises_params())
def test_parser_processors_raises(bquery, bbg_request, processor):
response = bquery.query(bbg_request, parse=False, collector=next)
with pytest.raises(TypeError):
processor(response, bbg_request)
def parser_valid_params():
valid_eco_hist = {
"HistoricalDataRequest": {
"fields": ["PX_LAST"],
"securities": ["DOESCRUD Index"],
"startDate": "20180102",
"endDate": "20180203",
}
}
non_applicable_field_ref = {"ReferenceDataRequest": {"fields": ["ID_EXCH_SYMBOL"], "securities": ["RSF82 Comdty"]}}
return {
"ids": ["valid_eco_hist", "non_applicable_field_ref"],
"argvalues": [
(valid_eco_hist, blp.BlpParser._validate_fields_exist),
(non_applicable_field_ref, blp.BlpParser._process_field_exception),
],
}
@pytest.mark.parametrize("bbg_request, processor", **parser_valid_params())
def test_parser_processors_valid(bquery, bbg_request, processor):
response = bquery.query(bbg_request, parse=False, collector=next)
processor(response, bbg_request)
def mock_timeout_response():
request = {"HistoricalDataRequest": {}}
response = [{"eventType": 10, "eventTypeName": "blpapi.Event.TIMEOUT", "message": {}, "messageNumber": 0}]
data = {id(request): response}
return data, request, blp.BlpParser._validate_event
def mock_unknown_response():
request = {"HistoricalDataRequest": {}}
response = [{"eventType": -1, "eventTypeName": "blpapi.Event.UNKNOWN", "message": {}, "messageNumber": 0}]
data = {id(request): response}
return data, request, blp.BlpParser._validate_event
def mock_bad_field():
request = {
"HistoricalDataRequest": {
"fields": ["BAD_FIELD"],
"securities": ["SPY US Equity"],
"startDate": "20180101",
"endDate": "20180105",
}
}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [35],
"messageType": "HistoricalDataResponse",
"timeReceived": None,
"element": {
"HistoricalDataResponse": {
"securityData": {
"security": "SPY US Equity",
"eidData": [],
"sequenceNumber": 0,
"fieldExceptions": [
{
"fieldExceptions": {
"fieldId": "BAD_FIELD",
"errorInfo": {
"errorInfo": {
"source": "2979::bbdbh1",
"code": 1,
"category": "BAD_FLD",
"message": "Invalid field",
"subcategory": "NOT_APPLICABLE_TO_HIST_DATA",
}
},
}
}
],
"fieldData": [],
}
}
},
},
}
]
data = {id(request): response}
return data, request, blp.BlpParser._process_field_exception
def mock_bad_security():
request = {
"HistoricalDataRequest": {
"fields": ["PX_LAST"],
"securities": ["BAD_SECURITY"],
"startDate": "20180101",
"endDate": "20180105",
}
}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [30],
"messageType": "HistoricalDataResponse",
"timeReceived": None,
"element": {
"HistoricalDataResponse": {
"securityData": {
"security": "BAD_SECURITY",
"eidData": [],
"sequenceNumber": 0,
"securityError": {
"securityError": {
"source": "2979::bbdbh4",
"code": 15,
"category": "BAD_SEC",
"message": "Unknown/Invalid securityInvalid Security [nid:2979] ",
"subcategory": "INVALID_SECURITY",
}
},
"fieldExceptions": [],
"fieldData": [],
}
}
},
},
}
]
data = {id(request): response}
return data, request, blp.BlpParser._validate_security_error
def mock_missing_start_date():
request = {"HistoricalDataRequest": {"fields": ["PX_LAST"], "securities": ["BAD_SECURITY"]}}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [25],
"messageType": "HistoricalDataResponse",
"timeReceived": None,
"element": {
"HistoricalDataResponse": {
"responseError": {
"source": "bbdbh4",
"code": 30,
"category": "BAD_ARGS",
"message": "Invalid start date specified [nid:2979] ",
"subcategory": "INVALID_START_DATE",
}
}
},
},
}
]
data = {id(request): response}
return data, request, blp.BlpParser._validate_response_error
def mock_non_applicable_field_hist():
request = {
"HistoricalDataRequest": {
"fields": ["PX_VOLUME"],
"securities": ["DOESCRUD Index"],
"startDate": "20180105",
"endDate": "20180112",
}
}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [20],
"messageType": "HistoricalDataResponse",
"timeReceived": None,
"element": {
"HistoricalDataResponse": {
"securityData": {
"security": "DOESCRUD Index",
"eidData": [],
"sequenceNumber": 0,
"fieldExceptions": [],
"fieldData": [],
}
}
},
},
}
]
data = {id(request): response}
return data, request, blp.BlpParser._validate_fields_exist
def mock_non_applicable_field_one_valid_hist():
request = {
"HistoricalDataRequest": {
"fields": ["PX_VOLUME", "PX_LAST"],
"securities": ["DOESCRUD Index"],
"startDate": "20180105",
"endDate": "20180112",
}
}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [15],
"messageType": "HistoricalDataResponse",
"timeReceived": None,
"element": {
"HistoricalDataResponse": {
"securityData": {
"security": "DOESCRUD Index",
"eidData": [],
"sequenceNumber": 0,
"fieldExceptions": [],
"fieldData": [
{"fieldData": {"date": TS("2018-01-05 00:00:00"), "PX_LAST": 419515.0}},
{"fieldData": {"date": TS("2018-01-12 00:00:00"), "PX_LAST": 412654.0}},
],
}
}
},
},
}
]
data = {id(request): response}
return data, request, blp.BlpParser._validate_fields_exist
def mock_bad_fld_invalid_ref():
request = {"ReferenceDataRequest": {"fields": ["not_a_field"], "securities": ["RSF82 Comdty"]}}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [5],
"messageType": "ReferenceDataResponse",
"timeReceived": None,
"element": {
"ReferenceDataResponse": [
{
"securityData": {
"security": "RSF82 Comdty",
"eidData": [],
"fieldExceptions": [
{
"fieldExceptions": {
"fieldId": "not_a_field",
"errorInfo": {
"errorInfo": {
"source": "180::bbdbl9",
"code": 9,
"category": "BAD_FLD",
"message": "Field not valid",
"subcategory": "INVALID_FIELD",
}
},
}
}
],
"sequenceNumber": 0,
"fieldData": {"fieldData": {}},
}
}
]
},
},
}
]
data = {id(request): response}
return data, request, blp.BlpParser._process_field_exception
def mock_parser_raises_params():
funcs = [
mock_timeout_response,
mock_unknown_response,
mock_bad_field,
mock_bad_security,
mock_missing_start_date,
mock_non_applicable_field_hist,
mock_non_applicable_field_one_valid_hist,
mock_bad_fld_invalid_ref,
]
return params_from_funcs(funcs)
@pytest.mark.parametrize("data, bbg_request, processor", **mock_parser_raises_params())
def test_mocked_parser_processors_raises(data, bbg_request, processor, mocker):
mocker.patch("blpapi.EventQueue", MockEventQueue)
mocker.patch("blp.blp.BlpQuery", MockBlpQuery)
response = blp.BlpQuery(cache_data=data).query(bbg_request, parse=False, collector=next)
with pytest.raises(TypeError):
processor(response, bbg_request)
def mock_valid_eco_hist():
request = {
"HistoricalDataRequest": {
"fields": ["PX_LAST"],
"securities": ["DOESCRUD Index"],
"startDate": "20180105",
"endDate": "20180112",
}
}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [10],
"messageType": "HistoricalDataResponse",
"timeReceived": None,
"element": {
"HistoricalDataResponse": {
"securityData": {
"security": "DOESCRUD Index",
"eidData": [],
"sequenceNumber": 0,
"fieldExceptions": [],
"fieldData": [
{"fieldData": {"date": TS("2018-01-05 00:00:00"), "PX_LAST": 419515.0}},
{"fieldData": {"date": TS("2018-01-12 00:00:00"), "PX_LAST": 412654.0}},
],
}
}
},
},
}
]
data = {id(request): response}
return data, request, blp.BlpParser._validate_fields_exist
def mock_non_applicable_field_ref():
request = {"ReferenceDataRequest": {"fields": ["ID_EXCH_SYMBOL"], "securities": ["RSF82 Comdty"]}}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [11],
"messageType": "ReferenceDataResponse",
"timeReceived": None,
"element": {
"ReferenceDataResponse": [
{
"securityData": {
"security": "RSF82 Comdty",
"eidData": [],
"fieldExceptions": [
{
"fieldExceptions": {
"fieldId": "ID_EXCH_SYMBOL",
"errorInfo": {
"errorInfo": {
"source": "3589::bbdbd14",
"code": 9,
"category": "BAD_FLD",
"message": "Field not applicable to security",
"subcategory": "NOT_APPLICABLE_TO_REF_DATA",
}
},
}
}
],
"sequenceNumber": 0,
"fieldData": {"fieldData": {}},
}
}
]
},
},
}
]
data = {id(request): response}
return data, request, blp.BlpParser._process_field_exception
def mock_parser_valid_params():
funcs = [mock_valid_eco_hist, mock_non_applicable_field_ref]
return params_from_funcs(funcs)
@pytest.mark.parametrize("data, bbg_request, processor", **mock_parser_valid_params())
def test_mocked_parser_processors_valid(data, bbg_request, processor, mocker):
mocker.patch("blpapi.EventQueue", MockEventQueue)
mocker.patch("blp.blp.BlpQuery", MockBlpQuery)
response = blp.BlpQuery(cache_data=data).query(bbg_request, parse=False, collector=next)
processor(response, bbg_request)
def parser_parse_historical():
request = {
"HistoricalDataRequest": {
"fields": ["PX_LAST"],
"securities": ["SPY US Equity"],
"startDate": "20180102",
"endDate": "20180103",
}
}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [15],
"messageType": "HistoricalDataResponse",
"timeReceived": None,
"element": {
"HistoricalDataResponse": {
"securityData": {
"security": "SPY US Equity",
"eidData": [],
"sequenceNumber": 0,
"fieldExceptions": [],
"fieldData": [
{"fieldData": {"date": TS("2018-01-02 00:00:00"), "PX_LAST": 268.77}},
{"fieldData": {"date": TS("2018-01-03 00:00:00"), "PX_LAST": 270.47}},
],
}
}
},
},
}
]
parsed = [
{
"security": "SPY US Equity",
"fields": ["PX_LAST"],
"data": [{"date": TS(2018, 1, 2), "PX_LAST": 268.77}, {"date": TS(2018, 1, 3), "PX_LAST": 270.47}],
}
]
parse_steps = None
return parse_steps, request, response, parsed
def parser_parse_multi_historical():
request = {
"HistoricalDataRequest": {
"fields": ["PX_LAST"],
"securities": ["SPY US Equity", "TLT US Equity"],
"startDate": "20180103",
"endDate": "20180103",
}
}
response = [
{
"eventType": 6,
"eventTypeName": "blpapi.Event.PARTIAL_RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [8],
"messageType": "HistoricalDataResponse",
"timeReceived": None,
"element": {
"HistoricalDataResponse": {
"securityData": {
"security": "SPY US Equity",
"eidData": [],
"sequenceNumber": 0,
"fieldExceptions": [],
"fieldData": [{"fieldData": {"date": TS("2018-01-03 00:00:00"), "PX_LAST": 270.47}}],
}
}
},
},
},
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [8],
"messageType": "HistoricalDataResponse",
"timeReceived": None,
"element": {
"HistoricalDataResponse": {
"securityData": {
"security": "TLT US Equity",
"eidData": [],
"sequenceNumber": 1,
"fieldExceptions": [],
"fieldData": [{"fieldData": {"date": TS("2018-01-03 00:00:00"), "PX_LAST": 126.09}}],
}
}
},
},
},
]
parsed = [
{"security": "SPY US Equity", "fields": ["PX_LAST"], "data": [{"date": TS(2018, 1, 3), "PX_LAST": 270.47}]},
{"security": "TLT US Equity", "fields": ["PX_LAST"], "data": [{"date": TS(2018, 1, 3), "PX_LAST": 126.09}]},
]
parse_steps = None
return parse_steps, request, response, parsed
def parser_parse_reference():
request = {"ReferenceDataRequest": {"fields": ["NAME"], "securities": ["SPY US Equity", "TLT US Equity"]}}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [10],
"messageType": "ReferenceDataResponse",
"timeReceived": None,
"element": {
"ReferenceDataResponse": [
{
"securityData": {
"security": "SPY US Equity",
"eidData": [],
"fieldExceptions": [],
"sequenceNumber": 0,
"fieldData": {"fieldData": {"NAME": "SPDR S&P 500 ETF TRUST"}},
}
},
{
"securityData": {
"security": "TLT US Equity",
"eidData": [],
"fieldExceptions": [],
"sequenceNumber": 1,
"fieldData": {"fieldData": {"NAME": "ISHARES 20+ YEAR TREASURY BO"}},
}
},
]
},
},
}
]
parsed = [
{"security": "SPY US Equity", "fields": ["NAME"], "data": {"NAME": "SPDR S&P 500 ETF TRUST"}},
{"security": "TLT US Equity", "fields": ["NAME"], "data": {"NAME": "ISHARES 20+ YEAR TREASURY BO"}},
]
parse_steps = None
return parse_steps, request, response, parsed
def parser_parse_bulk_reference():
request = {
"ReferenceDataRequest": {
"fields": ["FUT_CHAIN"],
"securities": ["C 1 Comdty"],
"overrides": [{"overrides": {"fieldId": "CHAIN_DATE", "value": "20100101"}}],
}
}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [5],
"messageType": "ReferenceDataResponse",
"timeReceived": None,
"element": {
"ReferenceDataResponse": [
{
"securityData": {
"security": "C 1 Comdty",
"eidData": [],
"fieldExceptions": [],
"sequenceNumber": 0,
"fieldData": {
"fieldData": {
"FUT_CHAIN": [
{"FUT_CHAIN": {"Security Description": "C H10 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C K10 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C N10 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C U10 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C Z10 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C H11 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C K11 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C N11 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C U11 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C Z11 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C H12 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C K12 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C N12 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C U12 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C Z12 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C N13 Comdty"}},
{"FUT_CHAIN": {"Security Description": "C Z13 Comdty"}},
]
}
},
}
}
]
},
},
}
]
parsed = [
{
"security": "C 1 Comdty",
"fields": ["FUT_CHAIN"],
"data": {
"FUT_CHAIN": [
{"Security Description": "C H10 Comdty"},
{"Security Description": "C K10 Comdty"},
{"Security Description": "C N10 Comdty"},
{"Security Description": "C U10 Comdty"},
{"Security Description": "C Z10 Comdty"},
{"Security Description": "C H11 Comdty"},
{"Security Description": "C K11 Comdty"},
{"Security Description": "C N11 Comdty"},
{"Security Description": "C U11 Comdty"},
{"Security Description": "C Z11 Comdty"},
{"Security Description": "C H12 Comdty"},
{"Security Description": "C K12 Comdty"},
{"Security Description": "C N12 Comdty"},
{"Security Description": "C U12 Comdty"},
{"Security Description": "C Z12 Comdty"},
{"Security Description": "C N13 Comdty"},
{"Security Description": "C Z13 Comdty"},
]
},
}
]
parse_steps = None
return parse_steps, request, response, parsed
def parser_parse_bad_field_reference_unprocessed():
request = {"ReferenceDataRequest": {"fields": ["not_a_field"], "securities": ["RSF82 Comdty"]}}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [5],
"messageType": "ReferenceDataResponse",
"timeReceived": None,
"element": {
"ReferenceDataResponse": [
{
"securityData": {
"security": "RSF82 Comdty",
"eidData": [],
"fieldExceptions": [
{
"fieldExceptions": {
"fieldId": "not_a_field",
"errorInfo": {
"errorInfo": {
"source": "180::bbdbl9",
"code": 9,
"category": "BAD_FLD",
"message": "Field not valid",
"subcategory": "INVALID_FIELD",
}
},
}
}
],
"sequenceNumber": 0,
"fieldData": {"fieldData": {}},
}
}
]
},
},
}
]
parsed = [{"security": "RSF82 Comdty", "fields": ["not_a_field"], "data": {}}]
parse_steps = []
return parse_steps, request, response, parsed
def parser_parse_empty_historical():
request = {
"HistoricalDataRequest": {
"fields": ["PX_VOLUME"],
"securities": ["DOESCRUD Index"],
"startDate": "20180102",
"endDate": "20180203",
}
}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [10],
"messageType": "HistoricalDataResponse",
"timeReceived": None,
"element": {
"HistoricalDataResponse": {
"securityData": {
"security": "DOESCRUD Index",
"eidData": [],
"sequenceNumber": 0,
"fieldExceptions": [],
"fieldData": [],
}
}
},
},
}
]
parsed = [{"security": "DOESCRUD Index", "fields": ["PX_VOLUME"], "data": []}]
parse_steps = None
return parse_steps, request, response, parsed
def parser_parse_empty_reference():
request = {"ReferenceDataRequest": {"fields": ["ID_EXCH_SYMBOL"], "securities": ["RSF82 Comdty"]}}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [11],
"messageType": "ReferenceDataResponse",
"timeReceived": None,
"element": {
"ReferenceDataResponse": [
{
"securityData": {
"security": "RSF82 Comdty",
"eidData": [],
"fieldExceptions": [
{
"fieldExceptions": {
"fieldId": "ID_EXCH_SYMBOL",
"errorInfo": {
"errorInfo": {
"source": "3589::bbdbd14",
"code": 9,
"category": "BAD_FLD",
"message": "Field not applicable to security",
"subcategory": "NOT_APPLICABLE_TO_REF_DATA",
}
},
}
}
],
"sequenceNumber": 0,
"fieldData": {"fieldData": {}},
}
}
]
},
},
}
]
parsed = [{"security": "RSF82 Comdty", "fields": ["ID_EXCH_SYMBOL"], "data": {"ID_EXCH_SYMBOL": None}}]
parse_steps = None
return parse_steps, request, response, parsed
def parser_parse_empty_reference_unprocessed():
request = {"ReferenceDataRequest": {"fields": ["ID_EXCH_SYMBOL"], "securities": ["RSF82 Comdty"]}}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [11],
"messageType": "ReferenceDataResponse",
"timeReceived": None,
"element": {
"ReferenceDataResponse": [
{
"securityData": {
"security": "RSF82 Comdty",
"eidData": [],
"fieldExceptions": [
{
"fieldExceptions": {
"fieldId": "ID_EXCH_SYMBOL",
"errorInfo": {
"errorInfo": {
"source": "3589::bbdbd14",
"code": 9,
"category": "BAD_FLD",
"message": "Field not applicable to security",
"subcategory": "NOT_APPLICABLE_TO_REF_DATA",
}
},
}
}
],
"sequenceNumber": 0,
"fieldData": {"fieldData": {}},
}
}
]
},
},
}
]
parsed = [{"security": "RSF82 Comdty", "fields": ["ID_EXCH_SYMBOL"], "data": {}}]
parse_steps = []
return parse_steps, request, response, parsed
def parser_parse_field_info_multi():
request = {"FieldInfoRequest": {"id": ["NAME", "VOLUME"]}}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [20],
"messageType": "fieldResponse",
"timeReceived": None,
"element": {
"fieldResponse": [
{
"fieldData": {
"id": "DS002",
"fieldInfo": {
"fieldInfo": {
"mnemonic": "NAME",
"description": "Name",
"datatype": "String",
"categoryName": [],
"property": [],
"overrides": [],
"ftype": "Character",
}
},
}
},
{
"fieldData": {
"id": "RQ013",
"fieldInfo": {
"fieldInfo": {
"mnemonic": "VOLUME",
"description": "Volume - Realtime",
"datatype": "Int64",
"categoryName": [],
"property": [],
"overrides": [],
"ftype": "Real",
}
},
}
},
]
},
},
}
]
parsed = [
{
"id": ["NAME", "VOLUME"],
"data": {
"DS002": {
"mnemonic": "NAME",
"description": "Name",
"datatype": "String",
"categoryName": [],
"property": [],
"overrides": [],
"ftype": "Character",
},
"RQ013": {
"mnemonic": "VOLUME",
"description": "Volume - Realtime",
"datatype": "Int64",
"categoryName": [],
"property": [],
"overrides": [],
"ftype": "Real",
},
},
}
]
parse_steps = []
return parse_steps, request, response, parsed
def parser_parse_field_list():
request = {"FieldListRequest": {"fieldType": "All"}}
response = [
{
"eventType": 6,
"eventTypeName": "blpapi.Event.PARTIAL_RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [20],
"messageType": "fieldResponse",
"timeReceived": None,
"element": {
"fieldResponse": [
{
"fieldData": {
"id": "DS002",
"fieldInfo": {
"fieldInfo": {
"mnemonic": "NAME",
"description": "Name",
"datatype": "String",
"categoryName": [],
"property": [],
"overrides": [],
"ftype": "Character",
}
},
}
},
{
"fieldData": {
"id": "RQ013",
"fieldInfo": {
"fieldInfo": {
"mnemonic": "VOLUME",
"description": "Volume - Realtime",
"datatype": "Int64",
"categoryName": [],
"property": [],
"overrides": [],
"ftype": "Real",
}
},
}
},
]
},
},
},
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [20],
"messageType": "fieldResponse",
"timeReceived": None,
"element": {
"fieldResponse": [
{
"fieldData": {
"id": "A0001",
"fieldInfo": {
"fieldInfo": {
"mnemonic": "ARD_REVENUES",
"description": "ARD Revenues",
"datatype": "Double",
"categoryName": [
"Fundamentals/Bloomberg Fundamentals/Standard Labels ARD/Income Statement/Revenues/Income/Gains/Losses on RE/Inv't" # noqa 501
],
"property": [],
"overrides": [],
"ftype": "Real",
}
},
}
}
]
},
},
},
]
parsed = [
{
"id": ["DS002", "RQ013"],
"data": {
"DS002": {
"mnemonic": "NAME",
"description": "Name",
"datatype": "String",
"categoryName": [],
"property": [],
"overrides": [],
"ftype": "Character",
},
"RQ013": {
"mnemonic": "VOLUME",
"description": "Volume - Realtime",
"datatype": "Int64",
"categoryName": [],
"property": [],
"overrides": [],
"ftype": "Real",
},
},
},
{
"id": ["A0001"],
"data": {
"A0001": {
"mnemonic": "ARD_REVENUES",
"description": "ARD Revenues",
"datatype": "Double",
"categoryName": [
"Fundamentals/Bloomberg Fundamentals/Standard Labels ARD/Income Statement/Revenues/Income/Gains/Losses on RE/Inv't" # noqa 501
],
"property": [],
"overrides": [],
"ftype": "Real",
},
},
},
]
parse_steps = []
return parse_steps, request, response, parsed
def parser_parse_intraday_bar():
request = {
"IntradayBarRequest": {
"eventType": "TRADE",
"security": "CL1 Comdty",
"interval": 1,
"startDateTime": "2019-04-24T08:00:00",
"endDateTime": "2019-04-24T08:02:00",
}
}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [7],
"messageType": "IntradayBarResponse",
"timeReceived": None,
"element": {
"IntradayBarResponse": {
"barData": {
"eidData": [],
"delayedSecurity": True,
"barTickData": [
{
"barTickData": {
"time": TS("2019-04-24 08:00:00"),
"open": 65.85,
"high": 65.89,
"low": 65.85,
"close": 65.86,
"volume": 565,
"numEvents": 209,
"value": 37215.16,
}
},
{
"barTickData": {
"time": TS("2019-04-24 08:01:00"),
"open": 65.87,
"high": 65.87,
"low": 65.83,
"close": 65.86,
"volume": 382,
"numEvents": 117,
"value": 25154.7,
}
},
],
}
}
},
},
}
]
parsed = [
{
"security": "CL1 Comdty",
"data": [
{
"time": TS("2019-04-24 08:00:00"),
"open": 65.85,
"high": 65.89,
"low": 65.85,
"close": 65.86,
"volume": 565,
"numEvents": 209,
"value": 37215.16,
},
{
"time": TS("2019-04-24 08:01:00"),
"open": 65.87,
"high": 65.87,
"low": 65.83,
"close": 65.86,
"volume": 382,
"numEvents": 117,
"value": 25154.7,
},
],
"events": ["TRADE"],
}
]
parse_steps = None
return parse_steps, request, response, parsed
def parser_parse_intraday_tick():
request = {
"IntradayTickRequest": {
"eventTypes": ["TRADE"],
"security": "CL1 Comdty",
"startDateTime": "2019-04-24T08:00:00",
"endDateTime": "2019-04-24T08:00:00",
}
}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [23],
"messageType": "IntradayTickResponse",
"timeReceived": None,
"element": {
"IntradayTickResponse": {
"tickData": {
"eidData": [],
"tickData": [
{
"tickData": {
"time": TS("2019-04-24 08:00:00"),
"type": "TRADE",
"value": 65.85,
"size": 4,
}
}, # noqa: E501
{
"tickData": {
"time": TS("2019-04-24 08:00:00"),
"type": "TRADE",
"value": 65.85,
"size": 2,
}
}, # noqa: E501
],
}
}
},
},
}
]
parsed = [
{
"security": "CL1 Comdty",
"data": [
{"time": TS("2019-04-24 08:00:00"), "type": "TRADE", "value": 65.85, "size": 4},
{"time": TS("2019-04-24 08:00:00"), "type": "TRADE", "value": 65.85, "size": 2},
],
"events": ["TRADE"],
}
]
parse_steps = None
return parse_steps, request, response, parsed
def parser_parse_intraday_tick_multi():
request = {
"IntradayTickRequest": {
"eventTypes": ["BID", "ASK"],
"security": "CL1 Comdty",
"startDateTime": "2019-04-24T08:00:00",
"endDateTime": "2019-04-24T08:00:01",
}
}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [23],
"messageType": "IntradayTickResponse",
"timeReceived": None,
"element": {
"IntradayTickResponse": {
"tickData": {
"eidData": [],
"tickData": [
{
"tickData": {
"time": TS("2019-04-24 08:00:00"),
"type": "BID",
"value": 65.85,
"size": 4,
}
},
{
"tickData": {
"time": TS("2019-04-24 08:00:00"),
"type": "BID",
"value": 65.85,
"size": 9,
}
},
{
"tickData": {
"time": TS("2019-04-24 08:00:00"),
"type": "ASK",
"value": 65.86,
"size": 50,
}
},
],
}
}
},
},
}
]
parsed = [
{
"security": "CL1 Comdty",
"data": [
{"time": TS("2019-04-24 08:00:00"), "type": "BID", "value": 65.85, "size": 4},
{"time": TS("2019-04-24 08:00:00"), "type": "BID", "value": 65.85, "size": 9},
{"time": TS("2019-04-24 08:00:00"), "type": "ASK", "value": 65.86, "size": 50},
],
"events": ["BID", "ASK"],
}
]
parse_steps = None
return parse_steps, request, response, parsed
def parser_parse_instrument_list():
request = {"instrumentListRequest": {"maxResults": 3}}
response = [
{
"eventType": 5,
"eventTypeName": "blpapi.Event.RESPONSE",
"messageNumber": 0,
"message": {
"fragmentType": 0,
"correlationIds": [7],
"messageType": "InstrumentListResponse",
"timeReceived": None,
"element": {
"InstrumentListResponse": {
"results": [
{"results": {"security": "SPX<index>", "description": "S&P 500 Index"}},
{
"results": {
"security": "USGG10YR<index>",
"description": "US Generic Govt 10 Year Yield",
}
},
{"results": {"security": "XAU<crncy>", "description": "Gold United States Dollar Spot"}},
]
}
},
},
}
]
parsed = [
{"security": "SPX<index>", "description": "S&P 500 Index"},
{"security": "USGG10YR<index>", "description": "US Generic Govt 10 Year Yield"},
{"security": "XAU<crncy>", "description": "Gold United States Dollar Spot"},
]
parse_steps = None
return parse_steps, request, response, parsed
def parser_data_params(mock=True):
funcs = [
parser_parse_historical,
parser_parse_reference,
parser_parse_bulk_reference,
parser_parse_bad_field_reference_unprocessed,
parser_parse_empty_historical,
parser_parse_empty_reference,
parser_parse_empty_reference_unprocessed,
parser_parse_field_info_multi,
]
# only test mocked versions of these since live data changes
if mock:
prefix = "mock_"
funcs.extend(
[
parser_parse_multi_historical,
parser_parse_intraday_bar,
parser_parse_intraday_tick,
parser_parse_intraday_tick_multi,
parser_parse_field_list,
parser_parse_instrument_list,
]
)
else:
prefix = ""
ids = []
argvalues = []
for f in funcs:
params = f()
if not mock:
params = params[0], params[1], params[3]
ids.append(prefix + f.__name__)
argvalues.append(params)
return {"ids": ids, "argvalues": argvalues}
@pytest.mark.parametrize("parse_steps, bbg_request, bbg_responses, exp_res", **parser_data_params(mock=True))
def test_parser_parse_mock_data_processed(parse_steps, bbg_request, bbg_responses, exp_res):
parser = blp.BlpParser(parse_steps)
res = list(itertools.chain.from_iterable((parser(r, bbg_request) for r in bbg_responses)))
assert res == exp_res
@pytest.mark.parametrize("parse_steps, bbg_request, exp_res", **parser_data_params(mock=False))
def test_parser_parse_real_data_processed(bquery, parse_steps, bbg_request, exp_res):
parser = blp.BlpParser(parse_steps)
res = bquery.query(bbg_request, parser, collector=list)
assert res == exp_res
def test_parser_parse_intraday_bar(bquery):
sd, ed = get_intraday_dates()
bar_request = {
"IntradayBarRequest": {
"eventType": "TRADE",
"security": "CL1 Comdty",
"interval": 1,
"startDateTime": sd,
"endDateTime": ed,
}
}
parser = blp.BlpParser()
res = bquery.query(bar_request, parser, collector=list)
assert_bar_parsed(res, "CL1 Comdty", "TRADE")
def test_parser_parse_intraday_tick(bquery):
sd, ed = get_intraday_dates()
tick_request = {
"IntradayTickRequest": {
"eventTypes": ["TRADE"],
"security": "CL1 Comdty",
"startDateTime": sd,
"endDateTime": ed,
}
}
parser = blp.BlpParser()
res = bquery.query(tick_request, parser, collector=list)
assert_tick_parsed(res, "CL1 Comdty", "TRADE")
def collector_params():
bulk_reference_parsed_data = [
{
"security": "C 1 Comdty",
"fields": ["FUT_CHAIN"],
"data": {"FUT_CHAIN": [{"Security Description": "C H10 Comdty"}]},
},
{
"security": "S 1 Comdty",
"fields": ["FUT_CHAIN"],
"data": {"FUT_CHAIN": [{"Security Description": "S F10 Comdty"}, {"Security Description": "S H10 Comdty"}]},
},
]
bulk_reference_collected_exp = {
"C 1 Comdty": {"FUT_CHAIN": pandas.DataFrame(["C H10 Comdty"], columns=["Security Description"])},
"S 1 Comdty": {
"FUT_CHAIN": pandas.DataFrame(["S F10 Comdty", "S H10 Comdty"], columns=["Security Description"])
},
}
bulk_reference_parsed_data_multi = [
{
"security": "BCOM Index",
"fields": ["INDX_MWEIGHT_HIST"],
"data": {
"INDX_MWEIGHT_HIST": [
{"Index Member": "BON9", "Percent Weight": 2.89},
{"Index Member": "C N9", "Percent Weight": 5.32},
]
},
}
]
bulk_reference_collected_multi_exp = {
"BCOM Index": {
"INDX_MWEIGHT_HIST": pandas.DataFrame({"Index Member": ["BON9", "C N9"], "Percent Weight": [2.89, 5.32]})
}
}
# test for https://github.com/matthewgilbert/blp/pull/8
bulk_reference_parsed_data_empty = [
{"security": "TRAD3 BZ Equity", "fields": ["DVD_HIST_ALL"], "data": {"DVD_HIST_ALL": None}}
]
bulk_reference_empty_collected_exp = {"TRAD3 BZ Equity": {}}
return {
"ids": [
"collect_many_to_bds_single_bulk",
"collect_many_to_bds_with_multi_bulk",
"collect_many_empty_to_bds_single_bulk",
],
"argvalues": [
(bulk_reference_parsed_data, blp.BlpQuery().collect_many_to_bds, bulk_reference_collected_exp),
(bulk_reference_parsed_data_multi, blp.BlpQuery().collect_many_to_bds, bulk_reference_collected_multi_exp),
(bulk_reference_parsed_data_empty, blp.BlpQuery().collect_many_to_bds, bulk_reference_empty_collected_exp),
],
}
@pytest.mark.parametrize("parsed_data, collector, exp_res", **collector_params())
def test_collectors(parsed_data, collector, exp_res):
res = collector(parsed_data)
assert res.keys() == exp_res.keys()
for key in res:
assert res[key].keys() == exp_res[key].keys()
for sub_key in res[key]:
assert_frame_equal(res[key][sub_key], exp_res[key][sub_key])
def test_bdh(bquery):
df = bquery.bdh(["SPY US Equity"], ["PX_VOLUME"], start_date="20180102", end_date="20180103")
df_expect = pandas.DataFrame(
[(TS(2018, 1, 2), "SPY US Equity", 86655749.0), (TS(2018, 1, 3), "SPY US Equity", 90070416.0)],
columns=["date", "security", "PX_VOLUME"],
)
assert_frame_equal(df, df_expect)
@pytest.mark.bbg
def test_bdh_infer():
host = HOST
port = PORT
bquery = blp.BlpQuery(host, port, timeout=QUERY_TIMEOUT, field_column_map={}).start()
df = bquery.bdh(
["SPY US Equity"],
["PX_LAST_ACTUAL"],
start_date="20180102",
end_date="20180103",
)
df_expect = pandas.DataFrame(
[(TS(2018, 1, 2), "SPY US Equity", 268.77), (TS(2018, 1, 3), "SPY US Equity", 270.47)],
columns=["date", "security", "PX_LAST_ACTUAL"],
)
assert_frame_equal(df, df_expect)
@pytest.mark.bbg
def test_bdh_coerce_none():
host = HOST
port = PORT
bquery = blp.BlpQuery(
host, port, timeout=QUERY_TIMEOUT, field_column_map={"PX_VOLUME": lambda x: pandas.Series(x, dtype="float64")}
).start()
df = bquery.bdh(
["DOESCRUD Index"],
["PX_VOLUME", "PX_LAST"],
start_date="20180105",
end_date="20180105",
)
dtypes = {"PX_VOLUME": numpy.dtype("float64")}
df_expect = pandas.DataFrame(
[(TS(2018, 1, 5), "DOESCRUD Index", None, 419515.0)],
columns=["date", "security", "PX_VOLUME", "PX_LAST"],
).astype(dtypes)
assert_frame_equal(df, df_expect)
def test_bdh_empty(bquery):
df = bquery.bdh(["DOESCRUD Index"], ["PX_VOLUME"], start_date="20180105", end_date="20180105")
df_expect = pandas.DataFrame({"date": [], "security": [], "PX_VOLUME": []}).astype("O")
assert_frame_equal(df, df_expect)
def test_bdp(bquery):
df = bquery.bdp(["SPY US Equity"], ["ID_EXCH_SYMBOL"])
df_expect = pandas.DataFrame([("SPY US Equity", "SPY")], columns=["security", "ID_EXCH_SYMBOL"])
assert_frame_equal(df, df_expect)
@pytest.mark.bbg
def test_bdp_infer():
host = HOST
port = PORT
bquery = blp.BlpQuery(host, port, timeout=QUERY_TIMEOUT).start()
df = bquery.bdp(["SPY US Equity"], ["NAME"])
df_expect = pandas.DataFrame([("SPY US Equity", "SPDR S&P 500 ETF TRUST")], columns=["security", "NAME"])
assert_frame_equal(df, df_expect)
def test_bdp_not_applicable(bquery):
# test allowing NOT_APPLICABLE_TO_REF_DATA
df = bquery.bdp(["RSF82 Comdty", "PLG18 Comdty"], ["ID_EXCH_SYMBOL"])
dtypes = {"security": numpy.dtype("O"), "ID_EXCH_SYMBOL": numpy.dtype("O")}
df_expect = pandas.DataFrame(
[["RSF82 Comdty", None], ["PLG18 Comdty", "PL"]],
columns=["security", "ID_EXCH_SYMBOL"],
).astype(dtypes)
assert_frame_equal(df, df_expect)
def test_bdp_bulk(bquery):
# bulk reference error
with pytest.raises(TypeError):
bquery.bdp(["BCOM Index"], ["INDX_MWEIGHT"])
def test_bds_non_bulk(bquery):
# non bulk data
with pytest.raises(TypeError):
bquery.bds("SPY US Equity", "PX_LAST")
def test_bdib(bquery):
sd, ed = get_intraday_dates()
df = bquery.bdib("CL1 Comdty", "TRADE", interval=1, start_datetime=sd, end_datetime=ed)
dtype_expect = pandas.Series(
[
numpy.dtype("datetime64[ns]"),
numpy.dtype("float64"),
numpy.dtype("float64"),
numpy.dtype("float64"),
numpy.dtype("float64"),
numpy.dtype("int64"),
numpy.dtype("int64"),
numpy.dtype("float64"),
],
index=["time", "open", "high", "low", "close", "volume", "numEvents", "value"],
)
assert_series_equal(df.dtypes, dtype_expect)
def bdit_params():
dtype_expect = pandas.Series(
[numpy.dtype("datetime64[ns]"), numpy.dtype("O"), numpy.dtype("float64"), numpy.dtype("int64")],
index=["time", "type", "value", "size"],
)
dtype_expect_cc = pandas.Series(
[
numpy.dtype("datetime64[ns]"),
numpy.dtype("O"),
numpy.dtype("float64"),
numpy.dtype("int64"),
numpy.dtype("O"),
],
index=["time", "type", "value", "size", "conditionCodes"],
)
return {
"ids": ["bdit", "bdit_with_condition_codes"],
"argvalues": [
("CL1 Comdty", ["TRADE"], None, dtype_expect),
(
"CL1 Comdty",
["TRADE"],
[("includeConditionCodes", True)],
dtype_expect_cc,
),
],
}
@pytest.mark.parametrize("ticker, fields, options, exp_res", **bdit_params())
def test_bdit(bquery, ticker, fields, options, exp_res):
sd, ed = get_intraday_dates()
df = bquery.bdit(ticker, fields, start_datetime=sd, end_datetime=ed, options=options)
assert_series_equal(df.dtypes, exp_res)
def test_create_historical_query():
exp_res = {
"HistoricalDataRequest": {
"securities": ["SPY US Equity"],
"fields": ["PX_LAST"],
"startDate": "20190101",
"endDate": "20190110",
}
}
assert blp.create_historical_query("SPY US Equity", "PX_LAST", "20190101", "20190110") == exp_res
assert blp.create_historical_query(["SPY US Equity"], ["PX_LAST"], "20190101", "20190110") == exp_res
exp_res = {
"HistoricalDataRequest": {
"securities": ["SPY US Equity"],
"fields": ["PX_LAST"],
"startDate": "20190101",
"endDate": "20190110",
"periodicitySelection": "DAILY",
}
}
res = blp.create_historical_query(
["SPY US Equity"],
["PX_LAST"],
"20190101",
"20190110",
options=[("periodicitySelection", "DAILY")],
)
assert res == exp_res
def test_create_reference_query():
exp_res = {"ReferenceDataRequest": {"securities": ["SPY US Equity"], "fields": ["PX_LAST"]}}
assert blp.create_reference_query("SPY US Equity", "PX_LAST") == exp_res
assert blp.create_reference_query(["SPY US Equity"], ["PX_LAST"]) == exp_res
exp_res = {
"ReferenceDataRequest": {
"securities": ["SPY US Equity"],
"fields": ["PX_LAST"],
"overrides": [{"overrides": {"fieldId": "TIME_ZONE_OVERRIDE", "value": 39}}],
}
}
assert blp.create_reference_query(["SPY US Equity"], ["PX_LAST"], overrides=[("TIME_ZONE_OVERRIDE", 39)]) == exp_res
| 39.144461 | 4,213 | 0.416401 |
acf1cba079e6cf841ba9fa8e033a88bd15e624ad | 208 | py | Python | hrpro/hrpro/doctype/pm_competency/test_pm_competency.py | thispl/hrpro | 1d477893777d30e3cfc853bb10553c668bdfa46b | [
"MIT"
] | null | null | null | hrpro/hrpro/doctype/pm_competency/test_pm_competency.py | thispl/hrpro | 1d477893777d30e3cfc853bb10553c668bdfa46b | [
"MIT"
] | null | null | null | hrpro/hrpro/doctype/pm_competency/test_pm_competency.py | thispl/hrpro | 1d477893777d30e3cfc853bb10553c668bdfa46b | [
"MIT"
] | 1 | 2019-12-31T06:52:12.000Z | 2019-12-31T06:52:12.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2018, VHRS and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestPM_Competency(unittest.TestCase):
pass
| 18.909091 | 43 | 0.769231 |
acf1ccea2ea13889f16afb87e0d693220e182a44 | 5,520 | py | Python | pushmanager/core/xmppclient.py | ymilki/pushmanager | 76943467476e5a725e9df9e3398bc39d24f409e1 | [
"Apache-2.0"
] | null | null | null | pushmanager/core/xmppclient.py | ymilki/pushmanager | 76943467476e5a725e9df9e3398bc39d24f409e1 | [
"Apache-2.0"
] | null | null | null | pushmanager/core/xmppclient.py | ymilki/pushmanager | 76943467476e5a725e9df9e3398bc39d24f409e1 | [
"Apache-2.0"
] | null | null | null | import logging
import time
from multiprocessing import JoinableQueue
from multiprocessing import Lock
from multiprocessing import Process
import xmpp
from pushmanager.core.settings import Settings
class XMPPQueue(object):
MAX_RETRY_COUNT = 3
retry_messages = {}
retry_messages_lock = Lock()
message_queue = None
worker_process = None
@classmethod
def start_worker(cls):
if cls.worker_process is not None:
return []
cls.message_queue = JoinableQueue()
cls.worker_process = Process(target=cls.process_queue, name='xmpp-queue')
cls.worker_process.daemon = True
cls.worker_process.start()
return [cls.worker_process.pid]
@classmethod
def _retry_message(cls, msg):
ret = True
cls.retry_messages_lock.acquire(True)
cls.retry_messages[msg] = cls.retry_messages.get(msg, 0) + 1
if cls.retry_messages[msg] >= cls.MAX_RETRY_COUNT:
del cls.retry_messages[msg]
ret = False
cls.retry_messages_lock.release()
return ret
@classmethod
def _del_retry_message(cls, msg):
cls.retry_messages_lock.acquire(True)
if cls.retry_messages.has_key(msg):
del cls.retry_messages[msg]
cls.retry_messages_lock.release()
@classmethod
def _process_queue_item(cls, jabber_client):
msg = cls.message_queue.get(True) # Blocks until a message is queued
recipient, message = msg
# Apply alias mapping, if any exists
recipient = Settings['aliases'].get(recipient, recipient)
xmpp_message = xmpp.protocol.Message(recipient, message)
try:
jabber_client.send(xmpp_message)
cls._del_retry_message(msg)
except IOError, e:
if cls._retry_message(msg):
logging.warning("Couldn't send the message, will retry... %s" % repr(msg))
cls.message_queue.put(msg)
else:
logging.error("Couldn't send the message %s" % repr(msg))
logging.error(repr(e))
except Exception, e:
logging.error("Couldn't send the message %s" % repr(msg))
logging.error(repr(e))
finally:
cls.message_queue.task_done()
@classmethod
def _xmpp_connect_and_auth(cls):
# Open connection to XMPP server
jabber_id = xmpp.protocol.JID(Settings['xmpp']['username'])
logging.info("Connecting to XMPP server...")
jabber_client = xmpp.Client(jabber_id.getDomain(), debug=[])
connected = jabber_client.connect(server=(Settings['xmpp']['server'], 5222))
if not connected:
logging.error("Unable to connect to XMPP server!")
return None
logging.info("Connected to XMPP server - %s" % connected)
authed = jabber_client.auth(jabber_id.getNode(), Settings['xmpp']['password'], resource=jabber_id.getResource())
if not authed:
logging.error("Unable to authenticate with XMPP server!")
return None
logging.info("Authenticated with XMPP server - %s" % authed)
jabber_client.sendInitPresence()
return jabber_client
@classmethod
def _xmpp_check_and_reconnect(cls, jabber_client):
jabber_client.Process(1)
if not jabber_client.isConnected():
logging.warning("Client is disconnected from XMPP server, reconnecting...")
jabber_client.reconnectAndReauth()
@classmethod
def process_queue(cls):
while True:
try:
jabber_client = cls._xmpp_connect_and_auth()
if not jabber_client:
return
while True:
cls._process_queue_item(jabber_client)
cls._xmpp_check_and_reconnect(jabber_client)
except Exception, e:
logging.error("Error processing queue, retrying... %s" % e)
finally:
try:
jabber_client.disconnect()
except IOError:
pass
time.sleep(3)
@classmethod
def enqueue_xmpp(cls, recipients, message):
if isinstance(recipients, (list,set,tuple)):
# Flatten non-string iterables
for recipient in recipients:
cls.enqueue_xmpp(recipient, message)
elif isinstance(recipients, (str,unicode)):
if cls.message_queue is not None:
cls.message_queue.put( (recipients, message) )
else:
logging.error("Could not enqueue XMPP message: XMPPQueue has not been initialized!")
else:
raise ValueError('Recipient(s) must be a string or iterable of strings')
@classmethod
def enqueue_user_xmpp(cls, recipients, *args, **kwargs):
"""Transforms a list of 'user' to 'user@default_domain', then invokes enqueue_xmpp."""
domain = Settings['xmpp']['default_domain']
if isinstance(recipients, (list,set,tuple)):
recipients = ['%s@%s' % (recepient, domain) if '@' not in recepient else recepient for recepient in recipients]
elif isinstance(recipients, (str,unicode)):
recipients = '%s@%s' % (recipients, domain) if '@' not in recipients else recipients
else:
raise ValueError('Recipient(s) must be a string or iterable of strings')
return cls.enqueue_xmpp(recipients, *args, **kwargs)
__all__ = ['XMPPQueue']
| 36.315789 | 123 | 0.620833 |
acf1cea3cb160e8f08b2efd1360ddc100de19d6c | 389 | py | Python | app.py | berndhader/cicd-buzz2 | c56e756e899597711b30e66dbacc8f97195f53bb | [
"Apache-2.0"
] | null | null | null | app.py | berndhader/cicd-buzz2 | c56e756e899597711b30e66dbacc8f97195f53bb | [
"Apache-2.0"
] | 3 | 2021-05-26T13:20:57.000Z | 2021-05-27T08:22:41.000Z | app.py | berndhader/cicd-buzz2 | c56e756e899597711b30e66dbacc8f97195f53bb | [
"Apache-2.0"
] | null | null | null | import os
import signal
from flask import Flask, render_template
from buzz import generator
app = Flask(__name__)
signal.signal(signal.SIGINT, lambda s, f: os._exit(0))
@app.route("/")
def generate_buzz():
buzz = generator.generate_buzz()
return render_template('index.html', buzz=buzz)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.getenv('PORT', 5000)))
| 22.882353 | 62 | 0.712082 |
acf1cecbb6ef1302cdbbccee991f0f4e3c3088b8 | 3,019 | py | Python | rmg_utils/resize_and_organize_images.py | Guthman/stylegan2-ada | 68b56bb7b362f9a4a5e19d8eae6fae88ff1d977a | [
"BSD-Source-Code"
] | null | null | null | rmg_utils/resize_and_organize_images.py | Guthman/stylegan2-ada | 68b56bb7b362f9a4a5e19d8eae6fae88ff1d977a | [
"BSD-Source-Code"
] | null | null | null | rmg_utils/resize_and_organize_images.py | Guthman/stylegan2-ada | 68b56bb7b362f9a4a5e19d8eae6fae88ff1d977a | [
"BSD-Source-Code"
] | null | null | null | import glob
import csv
from pathlib import Path
from PIL import Image
from tqdm.auto import tqdm
# Load metadata etc
filenames_and_labels = r'F:\temp\thesisdata\SAATCHI_DATASET_FULL.tsv'
delimiter = '\t'
image_input_folder = r'C:\Users\R\PycharmProjects\Thesis_Saatchi_scraper\micro_dataset1'
image_output_folder = r'F:\temp\thesisdata\saatchi_micro_resized512'
size_ = 512
def resize_and_pad_image(input_path: str,
output_path: str,
desired_size: int):
input_path_ = Path(input_path)
output_path_ = Path(output_path)
assert input_path_.is_file()
assert output_path_.is_dir(), print('Supplied output path is not a directory:' + output_path_.__str__())
if input_path_.stat().st_size > 0:
pass
else:
print(f'Filesize is 0, skipping file: {input_path_}')
return
filename = input_path_.name
img = Image.open(input_path)
old_size = img.size
ratio = float(desired_size) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
img = img.resize(new_size, Image.ANTIALIAS)
# create a new image and paste the resized on it
new_img = Image.new('RGB', (desired_size, desired_size))
new_img.paste(img, ((desired_size - new_size[0]) // 2,
(desired_size - new_size[1]) // 2))
full_output_path = output_path_ / filename
new_img.save(full_output_path)
# Create a dict with all filenames and associated labels
with open(filenames_and_labels, 'rt')as f:
data = list(csv.reader(f, delimiter=delimiter))
file_dict = {}
for row in data:
file_dict.update({row[0]: row[1]})
# Create list of sanitized labels to be used as folder names
label_folder_list = [s.replace(' ', '_')
.replace('&', '_')
.replace('/', '_')
.replace('__', '_')
.replace('__', '_')
.lower()
for s in set(file_dict.values())]
# Create dict for lookup up the correct folder name given a label
label_folder_lookup = {}
for s in set(file_dict.values()):
label_folder_lookup.update({s: s.replace(' ', '_')
.replace('&', '_')
.replace('/', '_')
.replace('__', '_')
.replace('__', '_')
.lower()})
print(f'Lookup dict: {label_folder_lookup}')
# Create the folders
for folder in label_folder_list:
Path(image_output_folder + '/' + folder).mkdir(parents=True, exist_ok=True)
print('Resizing and moving files...')
for file in tqdm(glob.glob(image_input_folder + '*/*')):
try:
image_output_folder_with_label = image_output_folder + '\\' + label_folder_lookup[file_dict[Path(file).name]]
resize_and_pad_image(file, image_output_folder_with_label, size_)
except KeyError:
print(f'Label not found for file {file}, skipping!')
| 35.517647 | 117 | 0.611792 |
acf1cf4e07390e4edab8ff9af0a5470314cff8c2 | 843 | py | Python | compound_selection/zinc15_eMolecules_similarity_set/20170730_eMolecules_similarity_set_pKa_filter/extract_smiles_from_sdf.py | choderalab/sampl6-logD-compound-selection | b14d82e8d3dd1091baeeff62b3fe6f093c03e321 | [
"MIT"
] | 3 | 2020-07-13T23:48:50.000Z | 2022-02-28T23:29:39.000Z | compound_selection/zinc15_eMolecules_similarity_set/20170730_eMolecules_similarity_set_pKa_filter/extract_smiles_from_sdf.py | choderalab/sampl6-logD-compound-selection | b14d82e8d3dd1091baeeff62b3fe6f093c03e321 | [
"MIT"
] | 1 | 2018-02-16T16:24:59.000Z | 2018-02-16T17:09:09.000Z | compound_selection/zinc15_eMolecules_similarity_set/20170730_eMolecules_similarity_set_pKa_filter/extract_smiles_from_sdf.py | choderalab/sampl6-logD-compound-selection | b14d82e8d3dd1091baeeff62b3fe6f093c03e321 | [
"MIT"
] | 4 | 2019-06-28T14:11:22.000Z | 2022-01-06T13:22:16.000Z | ##### IMPORT STARTING SET OF MOLECULES AS SMILES #####
# Convert molecules in SDF file to canonical isomeric smiles
ifs = oemolistream()
ofs = oemolostream()
ifs.SetFormat(OEFormat_SDF) # MDL SD File
ofs.SetFormat(OEFormat_SMI) # Canonical Isomeric Smiles
input_file_name = "eMol_similarity_set_2017_07.sdf"
output_file_name = input_file_name.split(".")[0] + "_CanIsoSMILES.smi"
print("Output file: ", output_file_name)
i=0 # To count molecules
if ifs.open(input_file_name):
if ofs.open(output_file_name):
for mol in ifs.GetOEGraphMols():
OEWriteMolecule(ofs, mol)
# print("Molecule ",i)
i=i+1
else:
OEThrow.Fatal("Unable to create output file.")
else:
OEThrow.Fatal("Unable to open input file.")
print("Done creating canonical isomeric SMILES for molecules in SDF file.")
| 30.107143 | 75 | 0.70344 |
acf1cf69a4f5a1a2738958a29710c1a1ee928008 | 244 | py | Python | geoscript/util/data.py | dvntucker/geoscript-py | ea1b6ac584f3813b542204370399bb8f3209d214 | [
"MIT"
] | null | null | null | geoscript/util/data.py | dvntucker/geoscript-py | ea1b6ac584f3813b542204370399bb8f3209d214 | [
"MIT"
] | null | null | null | geoscript/util/data.py | dvntucker/geoscript-py | ea1b6ac584f3813b542204370399bb8f3209d214 | [
"MIT"
] | null | null | null | from org.geotools.data.collection import ListFeatureCollection
def readFeatures(it, type, chunk):
i = 0
features = ListFeatureCollection(type)
while it.hasNext() and i < chunk:
features.add(it.next())
i = i+1
return features
| 24.4 | 62 | 0.713115 |
acf1d06753ac2c57f0b2db1f7210adab39885237 | 1,401 | py | Python | pyigm/metallicity/tests/test_metal_pdf.py | pyigm/pyigm | 8b4bc7f7f1c9f1c280720a4cc0693cd7cb79e9cb | [
"BSD-3-Clause"
] | 16 | 2016-02-12T19:03:38.000Z | 2021-08-11T15:16:25.000Z | pyigm/metallicity/tests/test_metal_pdf.py | pyigm/pyigm | 8b4bc7f7f1c9f1c280720a4cc0693cd7cb79e9cb | [
"BSD-3-Clause"
] | 204 | 2015-12-06T13:40:05.000Z | 2021-02-11T21:48:39.000Z | pyigm/metallicity/tests/test_metal_pdf.py | pyigm/pyigm | 8b4bc7f7f1c9f1c280720a4cc0693cd7cb79e9cb | [
"BSD-3-Clause"
] | 16 | 2015-12-06T23:27:49.000Z | 2021-11-01T10:08:24.000Z | # Module to run tests on FNModel
# TEST_UNICODE_LITERALS
import numpy as np
import os, pdb
from astropy import units as u
from ..pdf import MetallicityPDF
#def data_path(filename):
# data_dir = os.path.join(os.path.dirname(__file__), 'files')
# return os.path.join(data_dir, filename)
def test_init_pdf():
# Dummy data
ZH = np.linspace(-5, 0., 25)
pdf = np.exp(-(ZH+1.5)**2/0.2**2)
# Class
mpdf = MetallicityPDF(ZH, pdf)
def test_stats():
# Dummy data
ZH = np.linspace(-5, 0., 25)
pdf = np.exp(-(ZH+1.5)**2/0.2**2)
# Class
mpdf = MetallicityPDF(ZH, pdf)
# Test mean
np.testing.assert_allclose(mpdf.meanZH, -1.4998713559597918)
#
np.testing.assert_allclose(mpdf.medianZH, -1.5967047485449448)
def test_cl():
# Dummy data
ZH = np.linspace(-5, 0., 25)
pdf = np.exp(-(ZH+1.5)**2/0.2**2)
# Class
mpdf = MetallicityPDF(ZH, pdf)
cl_lim = mpdf.confidence_limits(0.68)
np.testing.assert_allclose(cl_lim, (-1.7738954485146285, -1.4708227781158598))
def test_add():
# Dummy data
ZH = np.linspace(-5, 0., 25)
pdf = np.exp(-(ZH+1.5)**2/0.2**2)
mpdf = MetallicityPDF(ZH, pdf)
pdf2 = np.exp(-(ZH+1.7)**2/0.3**2)
mpdf2 = MetallicityPDF(ZH, pdf2)
# Sum
sum_pdf = mpdf + mpdf2
sum_pdf.normalize()
# Test
np.testing.assert_allclose(sum_pdf.meanZH,-1.5999356764972328)
| 23.35 | 82 | 0.62955 |
acf1d121a9c16ee3a20fe3e57db38adc8463a807 | 11,082 | py | Python | turdshovel/core/parsing.py | daddycocoaman/turdshovel | 6f9d9b08734028fa819c590e8573ae49481dc769 | [
"MIT"
] | 39 | 2021-10-30T06:34:21.000Z | 2022-03-22T09:04:40.000Z | turdshovel/core/parsing.py | daddycocoaman/turdshovel | 6f9d9b08734028fa819c590e8573ae49481dc769 | [
"MIT"
] | null | null | null | turdshovel/core/parsing.py | daddycocoaman/turdshovel | 6f9d9b08734028fa819c590e8573ae49481dc769 | [
"MIT"
] | 3 | 2021-10-30T03:56:16.000Z | 2021-11-08T01:59:32.000Z | from functools import lru_cache
from typing import Dict
import System
from Microsoft.Diagnostics.Runtime import ClrElementType
from rich import inspect, print
def _remove_backing_field_string(name):
return name[1 : name.index(">")] if "k__backingfield" in name.lower() else name
def _convert_basic_fields(obj):
# Had to convert the type to string here for matching. Wild.
element_type = str(obj.get_Type())
if element_type in ["System.String", "System.Char"]:
return obj.AsString()
elif element_type == "System.Boolean":
return bool(obj)
elif element_type in [
"System.Byte",
"System.Int16",
"System.Int32",
"System.Int64",
"System.UInt16",
"System.UInt32",
"System.UInt64",
]:
return int(obj)
elif element_type in ["System.Decimal", "System.Double"]:
return float(obj)
elif element_type == "System.IntPtr":
return hex(obj.ToInt64())
elif element_type == "System.UIntPtr":
return hex(obj.ToUInt64())
else:
return None
def _check_basic_fields(obj, field, element_type=None):
""" "Check fields for basic values"""
# Check to see if element_type was passed or if we can get from field object
if not element_type:
element_type = field.get_ElementType()
field_data = None
if element_type in [ClrElementType.String]:
field_data = obj.ReadStringField(field.Name)
elif element_type in [ClrElementType.Char]:
field_data = obj.ReadField[System.Char](field.Name)
elif element_type == ClrElementType.Boolean:
field_data = obj.ReadField[bool](field.Name)
elif element_type == ClrElementType.Int8:
field_data = obj.ReadField[System.Int8](field.Name)
elif element_type == ClrElementType.Int16:
field_data = obj.ReadField[System.Int16](field.Name)
elif element_type == ClrElementType.Int32:
field_data = obj.ReadField[System.Int32](field.Name)
elif element_type == ClrElementType.Int64:
field_data = obj.ReadField[System.Int64](field.Name)
elif element_type == ClrElementType.UInt8:
field_data = obj.ReadField[System.Byte](field.Name)
elif element_type == ClrElementType.UInt16:
field_data = obj.ReadField[System.UInt16](field.Name)
elif element_type == ClrElementType.UInt32:
field_data = obj.ReadField[System.UInt32](field.Name)
elif element_type == ClrElementType.UInt64:
field_data = obj.ReadField[System.UInt64](field.Name)
elif element_type == ClrElementType.Float:
field_data = obj.ReadField[System.Single](field.Name)
elif element_type == ClrElementType.Double:
field_data = obj.ReadField[System.Double](field.Name)
elif element_type == ClrElementType.Pointer:
ptr = obj.ReadField[System.IntPtr](field.Name)
field_data = hex(ptr.ToInt64())
return field_data
def _iter_field(runtime, obj, field, visited_objects, is_dict=False):
"""Recursively iterates through fields"""
# Array = 20
# Boolean = 2
# Char = 3
# Class = 18
# Double = 13
# Float = 12
# FunctionPointer = 27
# GenericInstantiation = 21
# Int16 = 6
# Int32 = 8
# Int64 = 10
# Int8 = 4
# MVar = 30
# NativeInt = 24
# NativeUInt = 25
# Object = 28
# Pointer = 15
# String = 14
# Struct = 17
# SZArray = 29
# UInt16 = 7
# UInt32 = 9
# UInt64 = 11
# UInt8 = 5
# Unknown = 0
# Var = 19
# Void = 1
# Check to see if we've visited before in this iteration
# If we have, return the data
if visited_data := visited_objects.get(f"{obj.Address}_{field}", None):
return visited_data
# If we haven't seen this, set the data to obj.Address.
# Objects that cause recursion will print out an address instead
visited_objects[f"{obj.Address}_{field}"] = f"<!>"
if is_dict:
field_data = []
# We have to loop this way because ClrArray object is not iterable in Python
for idx in range(field.Length):
entry = field.GetStructValue(idx)
key_data = None
value_data = None
### HANDLE DICT KEY
try:
key_obj = entry.ReadObjectField("key")
key_data = _convert_basic_fields(key_obj)
except:
key_obj = entry.ReadValueTypeField("key")
if not key_data:
key_data = {}
if key_type := key_obj.get_Type():
for sub_field in key_type.Fields:
sub_field_name = _remove_backing_field_string(sub_field.Name)
key_data[sub_field_name] = _iter_field(
runtime, key_obj, sub_field, visited_objects
)
### HANDLE DICT VALUE
try:
value_obj = entry.ReadObjectField("value")
value_data = _convert_basic_fields(value_obj)
except:
value_obj = entry.ReadValueTypeField("value")
if not value_data:
value_data = {}
if type_ := value_obj.get_Type():
for sub_field in type_.Fields:
sub_field_name = _remove_backing_field_string(sub_field.Name)
value_data[sub_field_name] = _iter_field(
runtime, value_obj, sub_field, visited_objects
)
field_data.append(
{
"key": key_data,
"value": value_data,
}
)
else:
# Normal parsing
element_type = field.get_ElementType()
field_data = _check_basic_fields(obj, field)
# If it's not basic, be complex
if field_data is None:
if element_type in [ClrElementType.Class, ClrElementType.Object]:
field_data = {}
sub_obj = runtime.Heap.GetObject(
obj.ReadObjectField(field.Name).Address
)
if sub_obj.IsNull:
field_data = None
elif sub_obj.IsValid and not sub_obj.IsFree:
if sub_obj.Type:
# If a Dictionary, we don't want ALL the fields. Let the _iter_field handle this.
if sub_obj.Type.Name.startswith(
"System.Collections.Generic.Dictionary"
):
try:
entries = sub_obj.ReadObjectField("entries")
except:
try:
entries = sub_obj.ReadObjectField("_entries")
except:
# Weird case where sometimes it's referred to as _dictionary and _entries is a level below
# inspect(sub_obj)
# entries = sub_obj.ReadObjectField(
# "_dictionary"
# ).ReadObjectField("_entries")
entries = None
field_data = {}
if entries:
if entries.IsNull:
field_data = {}
else:
field_data = _iter_field(
runtime,
sub_obj,
entries.AsArray(),
visited_objects,
is_dict=True,
)
else:
for sub_field in sub_obj.Type.Fields:
sub_field_name = _remove_backing_field_string(
sub_field.Name
)
field_data[sub_field_name] = _iter_field(
runtime, sub_obj, sub_field, visited_objects
)
# If we get here, try to parse as a basic type. If not, then we need additional logic for element type
else:
field_data = _convert_basic_fields(sub_obj)
if not field_data:
inspect(ClrElementType)
inspect(sub_obj, all=True)
elif element_type == ClrElementType.SZArray:
field_data = []
sub_obj = obj.ReadObjectField(field.Name)
if sub_obj.IsNull:
field_data = None
elif sub_obj.IsValid and not sub_obj.IsFree:
if sub_obj.Type:
for sub_field in sub_obj.Type.Fields:
field_data.append(
_iter_field(
runtime, sub_obj, sub_field, visited_objects
)
)
else:
field_data = None
else:
field_data = None
elif field.IsValueType:
field_data = {}
value_obj = obj.ReadValueTypeField(field.Name)
for sub_field in value_obj.get_Type().Fields:
sub_field_name = _remove_backing_field_string(sub_field.Name)
field_data[sub_field_name] = _iter_field(
runtime, value_obj, sub_field, visited_objects
)
visited_objects[f"{obj.Address}_{field}"] = field_data
return field_data
@lru_cache
def parse_obj(runtime, obj, console) -> Dict:
output = {}
# Helps with recursion.
visited_objects = {}
try:
field_data = _convert_basic_fields(obj)
if not field_data:
for field in obj.Type.Fields:
field_name = _remove_backing_field_string(field.Name)
output[field_name] = _iter_field(runtime, obj, field, visited_objects)
return field_data or output
except RecursionError:
console.print_exception(show_locals=True)
console.print(
f"Recursion error happened in field {field.Name}. Logic needs to be fixed to handle this type"
)
except Exception:
console.print_exception()
| 38.213793 | 126 | 0.510106 |
acf1d1a673de31b2f3f5467c891de49e34099a9a | 17,846 | py | Python | mvregfus/imaris.py | max-brambach/MVRegFus | 31d7d2c3934b4ea596f29197de009ee8d3dfeb0f | [
"BSD-3-Clause"
] | 1 | 2021-05-11T10:20:38.000Z | 2021-05-11T10:20:38.000Z | mvregfus/imaris.py | max-brambach/MVRegFus | 31d7d2c3934b4ea596f29197de009ee8d3dfeb0f | [
"BSD-3-Clause"
] | null | null | null | mvregfus/imaris.py | max-brambach/MVRegFus | 31d7d2c3934b4ea596f29197de009ee8d3dfeb0f | [
"BSD-3-Clause"
] | null | null | null | """
file taken and modified from https://github.com/tlambert03/imarispy/blob/master/imarispy
(added functions from utils.py for easy single file import)
"""
# from .util import h5str, make_thumbnail, subsample_data
import logging
import os
import re
import h5py
import numpy as np
logger = logging.getLogger(__name__)
def np_to_ims(array, fname='myfile.ims',
subsamp=((1, 1, 1), (2, 2, 2), (4,4,4), (8,8,8)),
chunks=((16, 128, 128), (64, 64, 64), (32, 32, 32), (16, 16, 16)),
compression='gzip',
thumbsize=256,
dx=1, dz=1,
overwrite= False,
origin=[0.,0.,0.],
):
"""
modified by malbert:
- include nonzero origin
"""
assert len(subsamp) == len(chunks)
assert all([len(i) == 3 for i in subsamp]), 'Only deal with 3D chunks'
assert all([len(i) == len(x) for i, x in zip(subsamp, chunks)])
assert compression in (None, 'gzip', 'lzf', 'szip'), 'Unknown compression type'
if not fname.endswith('.ims'):
fname = fname + '.ims'
if overwrite:
if os.path.exists(fname):
os.remove(fname)
# force 5D
if not array.ndim == 5:
array = array.reshape(tuple([1] * (5 - array.ndim)) + array.shape)
nt, nc, nz, ny, nx = array.shape
nr = len(subsamp)
GROUPS = [
'DataSetInfo',
'Thumbnail',
'DataSetTimes',
'DataSetInfo/Imaris',
'DataSetInfo/Image',
'DataSetInfo/TimeInfo'
]
ATTRS = [
('/', ('ImarisDataSet', 'ImarisDataSet')),
('/', ('ImarisVersion', '5.5.0')),
('/', ('DataSetInfoDirectoryName', 'DataSetInfo')),
('/', ('ThumbnailDirectoryName', 'Thumbnail')),
('/', ('DataSetDirectoryName', 'DataSet')),
('DataSetInfo/Imaris', ('Version', '8.0')),
('DataSetInfo/Imaris', ('ThumbnailMode', 'thumbnailMIP')),
('DataSetInfo/Imaris', ('ThumbnailSize', thumbsize)),
('DataSetInfo/Image', ('X', nx)),
('DataSetInfo/Image', ('Y', ny)),
('DataSetInfo/Image', ('Z', nz)),
('DataSetInfo/Image', ('NumberOfChannels', nc)),
('DataSetInfo/Image', ('Noc', nc)),
('DataSetInfo/Image', ('Unit', 'um')),
('DataSetInfo/Image', ('Description', 'description not specified')),
('DataSetInfo/Image', ('MicroscopeModality', '',)),
('DataSetInfo/Image', ('RecordingDate', '2018-05-24 20:36:07.000')),
('DataSetInfo/Image', ('Name', 'name not specified')),
('DataSetInfo/Image', ('ExtMin0', origin[0])),
('DataSetInfo/Image', ('ExtMin1', origin[1])),
('DataSetInfo/Image', ('ExtMin2', origin[2])),
('DataSetInfo/Image', ('ExtMax0', origin[0] + nx * dx)),
('DataSetInfo/Image', ('ExtMax1', origin[1] + ny * dx)),
('DataSetInfo/Image', ('ExtMax2', origin[2] + nz * dz)),
('DataSetInfo/Image', ('LensPower', '63x')),
('DataSetInfo/TimeInfo', ('DatasetTimePoints', nt)),
('DataSetInfo/TimeInfo', ('FileTimePoints', nt)),
]
COLORS = ('0 1 0', '1 0 1', '1 1 0', '0 0 1')
for c in range(nc):
grp = 'DataSetInfo/Channel %s' % c
GROUPS.append(grp)
ATTRS.append((grp, ('ColorOpacity', 1)))
ATTRS.append((grp, ('ColorMode', 'BaseColor')))
ATTRS.append((grp, ('Color', COLORS[c % len(COLORS)])))
ATTRS.append((grp, ('GammaCorrection', 1)))
ATTRS.append((grp, ('ColorRange', '0 255')))
ATTRS.append((grp, ('Name', 'Channel %s' % c)))
# ATTRS.append(grp, ('LSMEmissionWavelength', 0))
# ATTRS.append(grp, ('LSMExcitationWavelength', ''))
# ATTRS.append(grp, ('Description', '(description not specified)'))
# TODO: create accurate timestamps
for t in range(nt):
m, s = divmod(t, 60)
h, m = divmod(m, 60)
strr = '2018-05-24 {:02d}:{:02d}:{:02d}.000'.format(h, m, s)
ATTRS.append(('DataSetInfo/TimeInfo', ('TimePoint{}'.format(t + 1), strr)))
with h5py.File(fname, 'a') as hf:
for grp in GROUPS:
hf.create_group(grp)
for grp, (key, value) in ATTRS:
hf[grp].attrs.create(key, h5str(value))
try:
thumb = make_thumbnail(array[0], thumbsize)
hf.create_dataset('Thumbnail/Data', data=thumb, dtype='u1')
except Exception:
logger.warn('Failed to generate Imaris thumbnail')
# add data
fmt = '/DataSet/ResolutionLevel {r}/TimePoint {t}/Channel {c}/'
for t in range(nt):
for c in range(nc):
data = np.squeeze(array[t, c])
for r in range(nr):
if any([i > 1 for i in subsamp[r]]):
data = subsample_data(data, subsamp[r])
hist, edges = np.histogram(data, 256)
grp = hf.create_group(fmt.format(r=r, t=t, c=c))
print("Writing: %s" % grp)
grp.create_dataset('Histogram', data=hist.astype(np.uint64))
grp.attrs.create('HistogramMin', h5str(edges[0]))
grp.attrs.create('HistogramMax', h5str(edges[-1]))
grp.create_dataset('Data', data=data,
chunks=tuple(min(*n) for n in zip(chunks[r], data.shape)),
compression=compression)
grp.attrs.create('ImageSizeX', h5str(data.shape[2]))
grp.attrs.create('ImageSizeY', h5str(data.shape[1]))
grp.attrs.create('ImageSizeZ', h5str(data.shape[0]))
return fname
def im_to_ims(filepattern, channels, tps, fname='myfile.ims', overwrite = True, copy_or_link = 'link'):
"""
- take imaris files of individual timepoints and channels and create a
master file which links to (or copies the data in) the individual files
- don't recalculate any thumbnails or histograms
- function added by malbert
- add rotation attribute
PROBLEM:
Fiji's hdf5 cannot load external links (https://forum.image.sc/t/does-hdf5-vibez-support-external-links-in-hdf5-files/10318)
Imaris however should be fine with it
filepattern example: 'mv_000_%(t)03d_c%(c)02d.ims'
"""
if not fname.endswith('.ims'):
fname = fname + '.ims'
if overwrite:
if os.path.exists(fname):
os.remove(fname)
# need: nr, nx, ny, nz, nt, nc, thumbsize, dx, dz
reffilepath = filepattern %{'t': tps[0], 'c': channels[0]}
reffile = h5py.File(reffilepath,mode='r')
nr = len(reffile['/DataSet'].keys())
nz, ny, nx = reffile['DataSet/ResolutionLevel 0/TimePoint 0/Channel 0/Data'].shape
nt = len(tps)
nc = len(channels)
# thumbsize = reffile['Thumbnail/Data'].shape[0]
thumbsize = 256
# dx = float(''.join([str(i)[-2] for i in reffile['DataSetInfo/Image'].attrs['ExtMax0']]))/nx
# dz = float(''.join([str(i)[-2] for i in reffile['DataSetInfo/Image'].attrs['ExtMax2']]))/nx
dx = 1
dz = 1
# thumbsize = float(''.join([str(i)[-2] for i in reffile['DataSetInfo/Image'].attrs['ExtMax2']]))/nx
# ('DataSetInfo/Imaris', ('ThumbnailSize', thumbsize)),
GROUPS = [
'DataSetInfo',
'Thumbnail',
'DataSetTimes',
'DataSetInfo/Imaris',
'DataSetInfo/Image',
'DataSetInfo/TimeInfo'
]
ATTRS = [
('/', ('ImarisDataSet', 'ImarisDataSet')),
('/', ('ImarisVersion', '5.5.0')),
('/', ('DataSetInfoDirectoryName', 'DataSetInfo')),
('/', ('ThumbnailDirectoryName', 'Thumbnail')),
('/', ('DataSetDirectoryName', 'DataSet')),
('DataSetInfo/Imaris', ('Version', '8.0')),
('DataSetInfo/Imaris', ('ThumbnailMode', 'thumbnailMIP')),
('DataSetInfo/Imaris', ('ThumbnailSize', thumbsize)),
('DataSetInfo/Image', ('X', nx)),
('DataSetInfo/Image', ('Y', ny)),
('DataSetInfo/Image', ('Z', nz)),
('DataSetInfo/Image', ('NumberOfChannels', nc)),
('DataSetInfo/Image', ('Noc', nc)),
('DataSetInfo/Image', ('Unit', 'um')),
('DataSetInfo/Image', ('Description', 'description not specified')),
('DataSetInfo/Image', ('MicroscopeModality', '',)),
('DataSetInfo/Image', ('RecordingDate', '2018-05-24 20:36:07.000')),
('DataSetInfo/Image', ('Name', 'name not specified')),
('DataSetInfo/Image', ('ExtMin0', '0')),
('DataSetInfo/Image', ('ExtMin1', '0')),
('DataSetInfo/Image', ('ExtMin2', '0')),
('DataSetInfo/Image', ('ExtMax0', nx * dx)),
('DataSetInfo/Image', ('ExtMax1', ny * dx)),
('DataSetInfo/Image', ('ExtMax2', nz * dz)),
('DataSetInfo/Image', ('LensPower', '63x')),
('DataSetInfo/TimeInfo', ('DatasetTimePoints', nt)),
('DataSetInfo/TimeInfo', ('FileTimePoints', nt)),
]
COLORS = ('0 1 0', '1 0 1', '1 1 0', '0 0 1')
for c in range(nc):
grp = 'DataSetInfo/Channel %s' % c
GROUPS.append(grp)
ATTRS.append((grp, ('ColorOpacity', 1)))
ATTRS.append((grp, ('ColorMode', 'BaseColor')))
ATTRS.append((grp, ('Color', COLORS[c % len(COLORS)])))
ATTRS.append((grp, ('GammaCorrection', 1)))
ATTRS.append((grp, ('ColorRange', '0 255')))
ATTRS.append((grp, ('Name', 'Channel %s' % c)))
# ATTRS.append(grp, ('LSMEmissionWavelength', 0))
# ATTRS.append(grp, ('LSMExcitationWavelength', ''))
# ATTRS.append(grp, ('Description', '(description not specified)'))
# TODO: create accurate timestamps
for t in range(nt):
m, s = divmod(t, 60)
h, m = divmod(m, 60)
strr = '2018-05-24 {:02d}:{:02d}:{:02d}.000'.format(h, m, s)
ATTRS.append(('DataSetInfo/TimeInfo', ('TimePoint{}'.format(t + 1), strr)))
with h5py.File(fname, 'a') as hf:
for grp in GROUPS:
hf.create_group(grp)
for grp, (key, value) in ATTRS:
hf[grp].attrs.create(key, h5str(value))
# try:
# # thumb = make_thumbnail(array[0], thumbsize)
# # thumb = h5py.SoftLink(filepattern)
# # hf.create_dataset('Thumbnail/Data', data=thumb, dtype='u1')
# hf['Thumbnail/Data'] = h5py.ExternalLink(reffilepath,'/Thumbnail/Data')
# except Exception:
# logger.warn('Failed to generate Imaris thumbnail')
# subsamp = subsamp=((1, 1, 1), (2, 2, 2), (4,4,4), (8,8,8))
# chunks = ((16, 128, 128), (64, 64, 64), (32, 32, 32), (16, 16, 16))
# compression = 'gzip'
# if copy_or_link == 'copy':
#
# # add data
# fmt = '/DataSet/ResolutionLevel {r}/TimePoint {t}/Channel {c}/'
# for t in range(nt):
# for c in range(nc):
# # data = np.squeeze(array[t, c])
# filepath = filepattern % {'t': t, 'c': c}
# srcfile = h5py.File(filepath)
# # data = np.squeeze(h5py.File(filepath)[fmt.format(r=0, t=0, c=0) + 'Data'][()])
# for r in range(nr):
#
# if any([i > 1 for i in subsamp[r]]):
# data = subsample_data(data, subsamp[r])
#
# hist, edges = np.histogram(data, 256)
#
# for key in ['Histogram','Dataset']:
# srcfmt = fmt.format(r=r, t=0, c=0)
# grp[key] = srcfile[srcfmt+key][()]
#
# grp = hf.create_group(fmt.format(r=r, t=t, c=c))
# print("Writing: %s" % grp)
# grp.create_dataset('Histogram', data=hist.astype(np.uint64))
# grp.attrs.create('HistogramMin', h5str(edges[0]))
# grp.attrs.create('HistogramMax', h5str(edges[-1]))
# grp['Data'] = h5py.ExternalLink(filepath, fmt.format(r=r, t=0, c=0) + 'Data')
# else:
# grp.create_dataset('Data', data=data,
# chunks=tuple(min(*n) for n in zip(chunks[r], data.shape)),
# compression=compression)
#
# grp.attrs.create('ImageSizeX', h5str(data.shape[2]))
# grp.attrs.create('ImageSizeY', h5str(data.shape[1]))
# grp.attrs.create('ImageSizeZ', h5str(data.shape[0]))
# # add data
# fmt = '/DataSet/ResolutionLevel {r}/TimePoint {t}/Channel {c}/'
# for t in range(nt):
# for c in range(nc):
# # data = np.squeeze(array[t, c])
# filepath = filepattern % {'t': t, 'c': c}
# data = np.squeeze(h5py.File(filepath)[fmt.format(r=0, t=0, c=0)+'Data'][()])
# for r in range(nr):
#
# if any([i > 1 for i in subsamp[r]]):
# data = subsample_data(data, subsamp[r])
#
# hist, edges = np.histogram(data, 256)
# grp = hf.create_group(fmt.format(r=r, t=t, c=c))
# print("Writing: %s" % grp)
# grp.create_dataset('Histogram', data=hist.astype(np.uint64))
# grp.attrs.create('HistogramMin', h5str(edges[0]))
# grp.attrs.create('HistogramMax', h5str(edges[-1]))
# if r>1:
# grp['Data'] = h5py.ExternalLink(filepath,fmt.format(r=r, t=0, c=0)+'Data')
# else:
# grp.create_dataset('Data', data=data,
# chunks=tuple(min(*n) for n in zip(chunks[r], data.shape)),
# compression=compression)
#
# grp.attrs.create('ImageSizeX', h5str(data.shape[2]))
# grp.attrs.create('ImageSizeY', h5str(data.shape[1]))
# grp.attrs.create('ImageSizeZ', h5str(data.shape[0]))
# elif copy_or_link == 'link':
# add data
fmt = '/DataSet/ResolutionLevel {r}/TimePoint {t}/Channel {c}'
for t in range(nt):
for c in range(nc):
for r in range(nr):
grppath = fmt.format(r=r, t=t, c=c)
dirpath = os.path.dirname(grppath)
# pdb.set_trace()
try:
hf.create_group(dirpath)
except:
pass
filepath = filepattern % {'t': t, 'c': c}
if copy_or_link == 'link':
print("Linking: %s" % grppath)
hf[grppath] = h5py.ExternalLink(filepath,fmt.format(r=r, t=0, c=0))
elif copy_or_link == 'copy':
print("Copying: %s" % grppath)
srcfile = h5py.File(filepath)
srcfile.copy(fmt.format(r=r, t=0, c=0),hf,grppath)
# hf.copy(filepath+':'+fmt.format(r=r, t=0, c=0),grppath)
# hf[grppath] = h5py.File(filepath)[fmt.format(r=r, t=0, c=0)]
else:
raise(Exception('copy or link?'))
# hf.close()
return fname
def unmap_bdv_from_imaris(hf):
for i in hf:
if re.match(r'^t\d{5}$', i) or re.match(r'^s\d{2}$', i):
del hf[i]
return
def make_thumbnail(array, size=256):
""" array should be 4D array """
# TODO: don't just crop to the upper left corner
mip = np.array(array).max(1)[:3, :size, :size].astype(np.float)
for i in range(mip.shape[0]):
mip[i] -= np.min(mip[i])
mip[i] *= 255 / np.max(mip[i])
mip = np.pad(mip, ((0, 3 - mip.shape[0]),
(0, size - mip.shape[1]),
(0, size - mip.shape[2])
), 'constant', constant_values=0)
mip = np.pad(mip, ((0, 1), (0, 0), (0, 0)), 'constant',
constant_values=255).astype('|u1')
return np.squeeze(mip.T.reshape(1, size, size * 4)).astype('|u1')
def h5str(s, coding='ASCII', dtype='S1'):
return np.frombuffer(str(s).encode(coding), dtype=dtype)
def get_meta_from_ims(filename):
"""
read metadata from imaris file
:param filename:
:return:
"""
f = h5py.File(filename)
meta_dict = dict()
def get_attr_string(file_obj,attr):
return float(''.join([i.decode('UTF-8') for i in file_obj['DataSetInfo/Image'].attrs[attr]]))
ns = np.zeros(3,dtype=np.float32)
ns[0] = get_attr_string(f,'X')#.astype(np.int64)
ns[1] = get_attr_string(f,'Y')#.astype(np.int64)
ns[2] = get_attr_string(f,'Z')#.astype(np.int64)
extmin = np.zeros(3,dtype=np.float32)
for i in range(3):
extmin[i] = get_attr_string(f,'ExtMin%s' %i)
extmax = np.zeros(3,dtype=np.float32)
for i in range(3):
extmax[i] = get_attr_string(f,'ExtMax%s' %i)
dx = (extmax[0]-extmin[0])/ns[0]
dz = (extmax[2]-extmin[2])/ns[2]
meta_dict['spacing'] = np.array([dx,dx,dz])
meta_dict['origin'] = extmin
return meta_dict
def subsample_data(data, subsamp):
return data[0::int(subsamp[0]), 0::int(subsamp[1]), 0::int(subsamp[2])]
if __name__ == "__main__":
tps = range(30)
channels = range(3)
file_pattern = '/tmp/im_%(t)03d_c%(c)02d.ims'
for t in tps:
for c in channels:
im = np.random.randint(0, 100, (1, 1, 100, 101, 102)).astype(np.float32)
np_to_ims(im, file_pattern %{'t':t,'c':c}, overwrite=True)
im_to_ims(file_pattern, channels, tps, '/tmp/im.ims', overwrite=True, copy_or_link='copy') | 39.834821 | 128 | 0.518492 |
acf1d1d0d2e834d5279a44eb0960626bfa0868e5 | 9,206 | py | Python | examples/openai-lander/evolve.py | RobertNiklasBock/neat-python | 845456698bb5c2f4a393676455fa45fcac76966d | [
"BSD-3-Clause"
] | null | null | null | examples/openai-lander/evolve.py | RobertNiklasBock/neat-python | 845456698bb5c2f4a393676455fa45fcac76966d | [
"BSD-3-Clause"
] | null | null | null | examples/openai-lander/evolve.py | RobertNiklasBock/neat-python | 845456698bb5c2f4a393676455fa45fcac76966d | [
"BSD-3-Clause"
] | null | null | null | # Evolve a control/reward estimation network for the OpenAI Gym
# LunarLander-v2 environment (https://gym.openai.com/envs/LunarLander-v2).
# Sample run here: https://gym.openai.com/evaluations/eval_FbKq5MxAS9GlvB7W6ioJkg
from __future__ import print_function
import gym
import gym.wrappers
import matplotlib.pyplot as plt
import multiprocessing
import neat
import numpy as np
import os
import pickle
import random
import time
import visualize
NUM_CORES = 8
env = gym.make('LunarLander-v2')
print("action space: {0!r}".format(env.action_space))
print("observation space: {0!r}".format(env.observation_space))
env = gym.wrappers.Monitor(env, 'results', force=True)
class LanderGenome(neatfast.DefaultGenome):
def __init__(self, key):
super().__init__(key)
self.discount = None
def configure_new(self, config):
super().configure_new(config)
self.discount = 0.01 + 0.98 * random.random()
def configure_crossover(self, genome1, genome2, config):
super().configure_crossover(genome1, genome2, config)
self.discount = random.choice((genome1.discount, genome2.discount))
def mutate(self, config):
super().mutate(config)
self.discount += random.gauss(0.0, 0.05)
self.discount = max(0.01, min(0.99, self.discount))
def distance(self, other, config):
dist = super().distance(other, config)
disc_diff = abs(self.discount - other.discount)
return dist + disc_diff
def __str__(self):
return "Reward discount: {0}\n{1}".format(self.discount,
super().__str__())
def compute_fitness(genome, net, episodes, min_reward, max_reward):
m = int(round(np.log(0.01) / np.log(genome.discount)))
discount_function = [genome.discount ** (m - i) for i in range(m + 1)]
reward_error = []
for score, data in episodes:
# Compute normalized discounted reward.
dr = np.convolve(data[:,-1], discount_function)[m:]
dr = 2 * (dr - min_reward) / (max_reward - min_reward) - 1.0
dr = np.clip(dr, -1.0, 1.0)
for row, dr in zip(data, dr):
observation = row[:8]
action = int(row[8])
output = net.activate(observation)
reward_error.append(float((output[action] - dr) ** 2))
return reward_error
class PooledErrorCompute(object):
def __init__(self):
self.pool = None if NUM_CORES < 2 else multiprocessing.Pool(NUM_CORES)
self.test_episodes = []
self.generation = 0
self.min_reward = -200
self.max_reward = 200
self.episode_score = []
self.episode_length = []
def simulate(self, nets):
scores = []
for genome, net in nets:
observation = env.reset()
step = 0
data = []
while 1:
step += 1
if step < 200 and random.random() < 0.2:
action = env.action_space.sample()
else:
output = net.activate(observation)
action = np.argmax(output)
observation, reward, done, info = env.step(action)
data.append(np.hstack((observation, action, reward)))
if done:
break
data = np.array(data)
score = np.sum(data[:,-1])
self.episode_score.append(score)
scores.append(score)
self.episode_length.append(step)
self.test_episodes.append((score, data))
print("Score range [{:.3f}, {:.3f}]".format(min(scores), max(scores)))
def evaluate_genomes(self, genomes, config):
self.generation += 1
t0 = time.time()
nets = []
for gid, g in genomes:
nets.append((g, neatfast.nn.FeedForwardNetwork.create(g, config)))
print("network creation time {0}".format(time.time() - t0))
t0 = time.time()
# Periodically generate a new set of episodes for comparison.
if 1 == self.generation % 10:
self.test_episodes = self.test_episodes[-300:]
self.simulate(nets)
print("simulation run time {0}".format(time.time() - t0))
t0 = time.time()
# Assign a composite fitness to each genome; genomes can make progress either
# by improving their total reward or by making more accurate reward estimates.
print("Evaluating {0} test episodes".format(len(self.test_episodes)))
if self.pool is None:
for genome, net in nets:
reward_error = compute_fitness(genome, net, self.test_episodes, self.min_reward, self.max_reward)
genome.fitness = -np.sum(reward_error) / len(self.test_episodes)
else:
jobs = []
for genome, net in nets:
jobs.append(self.pool.apply_async(compute_fitness,
(genome, net, self.test_episodes, self.min_reward, self.max_reward)))
for job, (genome_id, genome) in zip(jobs, genomes):
reward_error = job.get(timeout=None)
genome.fitness = -np.sum(reward_error) / len(self.test_episodes)
print("final fitness compute time {0}\n".format(time.time() - t0))
def run():
# Load the config file, which is assumed to live in
# the same directory as this script.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config')
config = neatfast.Config(LanderGenome, neatfast.DefaultReproduction,
neatfast.DefaultSpeciesSet, neatfast.DefaultStagnation,
config_path)
pop = neatfast.Population(config)
stats = neatfast.StatisticsReporter()
pop.add_reporter(stats)
pop.add_reporter(neatfast.StdOutReporter(True))
# Checkpoint every 25 generations or 900 seconds.
pop.add_reporter(neatfast.Checkpointer(25, 900))
# Run until the winner from a generation is able to solve the environment
# or the user interrupts the process.
ec = PooledErrorCompute()
while 1:
try:
gen_best = pop.run(ec.evaluate_genomes, 5)
#print(gen_best)
visualize.plot_stats(stats, ylog=False, view=False, filename="fitness.svg")
plt.plot(ec.episode_score, 'g-', label='score')
plt.plot(ec.episode_length, 'b-', label='length')
plt.grid()
plt.legend(loc='best')
plt.savefig("scores.svg")
plt.close()
mfs = sum(stats.get_fitness_mean()[-5:]) / 5.0
print("Average mean fitness over last 5 generations: {0}".format(mfs))
mfs = sum(stats.get_fitness_stat(min)[-5:]) / 5.0
print("Average min fitness over last 5 generations: {0}".format(mfs))
# Use the best genomes seen so far as an ensemble-ish control system.
best_genomes = stats.best_unique_genomes(3)
best_networks = []
for g in best_genomes:
best_networks.append(neatfast.nn.FeedForwardNetwork.create(g, config))
solved = True
best_scores = []
for k in range(100):
observation = env.reset()
score = 0
step = 0
while 1:
step += 1
# Use the total reward estimates from all five networks to
# determine the best action given the current state.
votes = np.zeros((4,))
for n in best_networks:
output = n.activate(observation)
votes[np.argmax(output)] += 1
best_action = np.argmax(votes)
observation, reward, done, info = env.step(best_action)
score += reward
env.render()
if done:
break
ec.episode_score.append(score)
ec.episode_length.append(step)
best_scores.append(score)
avg_score = sum(best_scores) / len(best_scores)
print(k, score, avg_score)
if avg_score < 200:
solved = False
break
if solved:
print("Solved.")
# Save the winners.
for n, g in enumerate(best_genomes):
name = 'winner-{0}'.format(n)
with open(name+'.pickle', 'wb') as f:
pickle.dump(g, f)
visualize.draw_net(config, g, view=False, filename=name+"-net.gv")
visualize.draw_net(config, g, view=False, filename=name+"-net-enabled.gv",
show_disabled=False)
visualize.draw_net(config, g, view=False, filename=name+"-net-enabled-pruned.gv",
show_disabled=False, prune_unused=True)
break
except KeyboardInterrupt:
print("User break.")
break
env.close()
if __name__ == '__main__':
run() | 35.137405 | 113 | 0.569737 |
acf1d25527ae9350b94e326cdacd300ab3b03ebc | 177 | py | Python | src/rest_client/config.py | university-my/ultimate-schedule-api | 6dbf2368da8751a8b6105c8d783a4b105f99866d | [
"MIT"
] | 5 | 2020-04-18T16:33:50.000Z | 2021-09-30T09:24:56.000Z | src/rest_client/config.py | university-my/ultimate-schedule-api | 6dbf2368da8751a8b6105c8d783a4b105f99866d | [
"MIT"
] | 15 | 2020-04-18T13:03:26.000Z | 2021-12-13T20:44:54.000Z | src/rest_client/config.py | university-my/ultimate-schedule-api | 6dbf2368da8751a8b6105c8d783a4b105f99866d | [
"MIT"
] | 2 | 2020-05-30T20:51:45.000Z | 2021-09-28T10:32:12.000Z | API_AJAX_FLAG = 701
GROUPS_AJAX_FLAG = 142
TEACHERS_AJAX_FLAG = 141
SCHEDULE_FLAG = 700
DEFAULT_ENCODING = "windows-1251"
STATISTICS_URL = "https://api.amplitude.com/2/httpapi"
| 25.285714 | 54 | 0.79661 |
acf1d27d761c99e8e6c5b91feb15b1fc83da5821 | 1,574 | py | Python | templates/qc_report.py | genomicsITER/NanoRTax | c466dbc1371f597a976d004bc0fb8d4251fe4b8f | [
"MIT"
] | 1 | 2021-08-12T15:22:41.000Z | 2021-08-12T15:22:41.000Z | templates/qc_report.py | genomicsITER/NanoRTax | c466dbc1371f597a976d004bc0fb8d4251fe4b8f | [
"MIT"
] | null | null | null | templates/qc_report.py | genomicsITER/NanoRTax | c466dbc1371f597a976d004bc0fb8d4251fe4b8f | [
"MIT"
] | 2 | 2021-08-04T13:15:08.000Z | 2022-03-16T16:36:58.000Z | #!/usr/bin/env python3
import datetime
import re
import pandas as pd
import skbio
import json
import os
import csv
qc_report_path = "$report_dir"
def load_new_report():
f = open("$report",)
data = json.load(f)
report = [int(data['summary']['before_filtering']['total_reads']),
int(data['summary']['after_filtering']['total_reads']),
int(data['filtering_result']['low_quality_reads']),
int(data['filtering_result']['too_short_reads'])
#int(data['filtering_result']['too_long_reads'])
]
f.close()
return report
#qc_report_path = "$projectDir/viz_webapp/data/$barcode/qc_report.csv"
qc_report_path = "$report_dir"
if(os.path.exists(qc_report_path)):
#Open the last report
last_report = pd.read_csv(qc_report_path).values.tolist()[0]
#Open the new qc data
new_report = load_new_report()
#Merge data
data = [int(x) + int(y) for x, y in zip(last_report, new_report)]
data_dict = {'Reads before QC': [data[0]], 'Reads after QC': [data[1]], 'Low quality reads': [data[2]], 'Filtered short reads': [data[3]]}
qc_df = pd.DataFrame.from_dict(data_dict)
qc_df.to_csv("qc_report.csv", index=False)
else:
new_report = load_new_report()
data_dict = {'Reads before QC': [new_report[0]], 'Reads after QC': [new_report[1]], 'Low quality reads': [new_report[2]], 'Filtered short reads': [new_report[3]]}
qc_df = pd.DataFrame.from_dict(data_dict)
qc_df.to_csv("qc_report.csv", index=False)
| 30.269231 | 167 | 0.639136 |
acf1d299128bff20aa60fbdcf1d109f3e197a0e9 | 6,595 | py | Python | grax/projects/gat/modules.py | jackd/grax | 99baaea786c59c1f5fe4314ba26d04b9a69499d6 | [
"Apache-2.0"
] | 6 | 2021-02-18T08:21:02.000Z | 2021-07-29T09:09:30.000Z | grax/projects/gat/modules.py | jackd/grax | 99baaea786c59c1f5fe4314ba26d04b9a69499d6 | [
"Apache-2.0"
] | null | null | null | grax/projects/gat/modules.py | jackd/grax | 99baaea786c59c1f5fe4314ba26d04b9a69499d6 | [
"Apache-2.0"
] | null | null | null | from functools import partial
import gin
import haiku as hk
import jax
import jax.numpy as jnp
import spax
from huf import initializers
from huf.module_ops import dropout
from jax.experimental.sparse.ops import COO
from grax.projects.gat import ops as gat_ops
configurable = partial(gin.configurable, module="gat")
@configurable
class GATConv(hk.Module):
def __init__(
self,
filters: int,
dropout_rate: float = 0.0,
with_bias: bool = True,
b_init=jnp.zeros,
name=None,
):
super().__init__(name=name)
self.filters = filters
self.dropout_rate = dropout_rate
self.with_bias = with_bias
self.b_init = b_init
def __call__(self, graph: COO, node_features: jnp.ndarray, is_training: bool):
x = node_features
del node_features
x = dropout(x, self.dropout_rate, is_training)
x = hk.Linear(
self.filters,
name="values",
with_bias=False,
w_init=initializers.glorot_uniform,
)(x)
# values = hk.Linear(self.filters + 2, w_init=initializers.lecun_uniform)(x)
# key, query, values = jnp.split(values, (1, 2), axis=1)
query = hk.Linear(
1, name="query", with_bias=False, w_init=initializers.glorot_uniform
)(x)
key = hk.Linear(
1, name="key", with_bias=False, w_init=initializers.glorot_uniform
)(x)
query = jnp.squeeze(query, axis=1)
key = jnp.squeeze(key, axis=1)
row, col = graph.row, graph.col
query = query[row]
key = key[col]
attn = jax.nn.leaky_relu(key + query, negative_slope=0.2)
attn = spax.utils.segment_softmax(attn, row, num_segments=graph.shape[0])
attn = dropout(attn, self.dropout_rate, is_training)
x = dropout(x, self.dropout_rate, is_training)
# x = spax.ops.matmul(spax.ops.with_data(graph, attn), x)
x = gat_ops.graph_conv(graph, attn, x)
if self.with_bias:
x = x + hk.get_parameter(
"b", shape=(self.filters,), dtype=x.dtype, init=self.b_init
)
return x
# class GATConvBlock(hk.Module):
# def __init__(self, filters: int, dropout_rate: float = 0.0, name=None):
# super().__init__(name=name)
# self.filters = filters
# self.dropout_rate = dropout_rate
# def __call__(
# self, graph: COO, node_features: jnp.ndarray, is_training: bool
# ):
# x = dropout(node_features, self.dropout_rate, is_training)
# x = hk.Linear(self.filters)(x)
# return GATConv(self.filters, self.dropout_rate)(graph, x, is_training)
# @configurable
# class MultiHeadGATConv(hk.Module):
# def __init__(
# self, filters: int, num_heads: int, dropout_rate: float = 0.0, name=None,
# ):
# super().__init__(name=name)
# self.filters = filters
# self.num_heads = num_heads
# self.dropout_rate = dropout_rate
# def __call__(
# self,
# graph: JAXSparse,
# node_features: jnp.ndarray,
# is_training: tp.Optional[bool] = None,
# ):
# # no initial dropout / dense
# # coords = spax.ops.to_coo(graph).coords
# row, col = graph.row, graph.col
# values = hk.Linear(
# (self.filters + 2) * self.num_heads, w_init=initializers.lecun_uniform,
# )(node_features)
# values = values.reshape(values.shape[0], self.num_heads, self.filters + 2)
# key, query, values = jnp.split(values, (1, 2), axis=2)
# key = jnp.squeeze(key, axis=2)
# query = jnp.squeeze(query, axis=2)
# key = key[row]
# query = key[col]
# attn = jax.nn.leaky_relu(key + query, negative_slope=0.2)
# attn = spax.utils.segment_softmax(attn, row, num_segments=graph.shape[0])
# attn = dropout(attn, self.dropout_rate, is_training)
# values = dropout(values, self.dropout_rate, is_training)
# out = gat_ops.multi_head_graph_conv(graph, attn, values)
# assert out.shape == (graph.shape[0], self.num_heads, self.filters)
# return out
class MultiHeadGATConv(hk.Module):
def __init__(self, *args, num_heads=1, name=None, **kwargs):
super().__init__(name=name)
self.num_heads = num_heads
self._head_fun = partial(GATConv, *args, **kwargs)
def __call__(self, graph: COO, node_features: jnp.ndarray, is_training: bool):
heads = [
self._head_fun(name=f"head{i}")(graph, node_features, is_training)
for i in range(self.num_heads)
]
return jnp.stack(heads, axis=1)
@configurable
class GAT(hk.Module):
def __init__(
self,
num_classes: int,
hidden_filters: int = 8,
hidden_heads: int = 8,
final_heads: int = 1,
dropout_rate: float = 0.6,
name=None,
):
super().__init__(name=name)
self.num_classes = num_classes
self.hidden_filters = hidden_filters
self.hidden_heads = hidden_heads
self.final_heads = final_heads
self.dropout_rate = dropout_rate
def __call__(
self, graph: COO, node_features: jnp.ndarray, is_training: bool,
):
# x = dropout(node_features, self.dropout_rate, training)
# x = hk.Linear(self.hidden_filters)(x)
# x = MultiHeadGATConv(
# self.hidden_filters, self.hidden_heads, self.dropout_rate
# )(graph, x, training)
# x = x.reshape(graph.shape[0], self.hidden_filters * self.hidden_heads)
# x = jax.nn.elu(x)
# x = dropout(x, self.dropout_rate, training)
# x = MultiHeadGATConv(self.num_classes, self.final_heads, self.dropout_rate)(
# graph, x, training
# ) # [N, heads, cls]
# x = x.sum(axis=1) # [N, cls]
# return x
x = node_features
x = MultiHeadGATConv(
num_heads=self.hidden_heads,
filters=self.hidden_filters,
dropout_rate=self.dropout_rate,
)(graph, x, is_training)
assert x.shape == (
node_features.shape[0],
self.hidden_heads,
self.hidden_filters,
)
x = x.reshape(x.shape[0], self.hidden_filters * self.hidden_heads)
x = jax.nn.elu(x)
x = MultiHeadGATConv(
num_heads=self.final_heads,
filters=self.num_classes,
dropout_rate=self.dropout_rate,
)(graph, x, is_training)
x = x.mean(axis=1)
return x
| 33.477157 | 86 | 0.59909 |
acf1d4564b884a25ca161a3d5947bc41b2b5515b | 8,347 | py | Python | test/functional/feature_proxy.py | EcoDollar/EcoDollar | 282e0ac689e589a8c41adca3045014eb66662f68 | [
"MIT"
] | 1 | 2021-05-03T14:38:19.000Z | 2021-05-03T14:38:19.000Z | test/functional/feature_proxy.py | EcoDollar/EcoDollar | 282e0ac689e589a8c41adca3045014eb66662f68 | [
"MIT"
] | null | null | null | test/functional/feature_proxy.py | EcoDollar/EcoDollar | 282e0ac689e589a8c41adca3045014eb66662f68 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind with different proxy configuration.
Test plan:
- Start ecodollard's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on ecodollard side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create ecodollards that connect to them
- Manipulate the ecodollards using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import EcodollarTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(EcodollarTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.321782 | 121 | 0.626213 |
acf1d478f34e7b80a51375e4ff5fa48640601d99 | 13,583 | py | Python | driftbase/tests/test_matches.py | dgnorth/drift-base | 9825cb22b26b577b715f2ce95453363bf90ecc7e | [
"MIT"
] | 1 | 2016-09-24T20:48:46.000Z | 2016-09-24T20:48:46.000Z | driftbase/tests/test_matches.py | dgnorth/drift-base | 9825cb22b26b577b715f2ce95453363bf90ecc7e | [
"MIT"
] | 9 | 2017-02-14T05:49:16.000Z | 2022-03-07T08:29:18.000Z | driftbase/tests/test_matches.py | dgnorth/drift-base | 9825cb22b26b577b715f2ce95453363bf90ecc7e | [
"MIT"
] | 2 | 2016-10-31T09:47:41.000Z | 2019-06-06T03:06:21.000Z | from collections import defaultdict
from six.moves import http_client
from drift.systesthelper import uuid_string
from driftbase.utils.test_utils import BaseMatchTest
class MatchesTest(BaseMatchTest):
"""
Tests for the /matches service endpoints
"""
def test_access(self):
self.auth()
resp = self.get("/matches", expected_status_code=http_client.UNAUTHORIZED)
self.assertIn("You do not have access", resp.json()["error"]["description"])
resp = self.get("/matches/1", expected_status_code=http_client.UNAUTHORIZED)
self.assertIn("You do not have access", resp.json()["error"]["description"])
resp = self.post("/matches", expected_status_code=http_client.UNAUTHORIZED)
self.assertIn("You do not have access", resp.json()["error"]["description"])
resp = self.put("/matches/1", expected_status_code=http_client.UNAUTHORIZED)
self.assertIn("You do not have access", resp.json()["error"]["description"])
def test_get_matches(self):
self.auth_service()
resp = self.get("/matches")
self.assertTrue(isinstance(resp.json(), list))
resp = self.get("/matches?server_id=1")
self.assertTrue(isinstance(resp.json(), list))
resp = self.get("/matches/999999", expected_status_code=http_client.NOT_FOUND)
resp = self.put("/matches/999999", data={"status": "bla"},
expected_status_code=http_client.NOT_FOUND)
def test_create_match(self):
self.auth_service()
match = self._create_match()
match_url = match["url"]
server_id = match["server_id"]
resp = self.get(match_url)
self.assertEqual(resp.json()["num_players"], 0)
self.assertEqual(resp.json()["teams"], [])
self.assertEqual(resp.json()["players"], [])
self.assertEqual(resp.json()["status"], "idle")
self.assertEqual(resp.json()["server_id"], server_id)
self.assertIsNone(resp.json()["start_date"])
# create a match with some predefined teams
num_teams = 3
data = {"server_id": server_id,
"status": "active",
"map_name": "map_name",
"game_mode": "game_mode",
"num_teams": num_teams
}
resp = self.post("/matches", data=data, expected_status_code=http_client.CREATED)
resp = self.get(resp.json()["url"])
self.assertEqual(len(resp.json()["teams"]), num_teams)
def test_create_team(self):
self.auth_service()
match = self._create_match()
match_id = match["match_id"]
teams_url = match["teams_url"]
resp = self.get(teams_url)
self.assertTrue(isinstance(resp.json(), list))
resp = self.post(teams_url, data={}, expected_status_code=http_client.CREATED)
team_url = resp.json()["url"]
resp = self.get(team_url)
self.assertTrue(isinstance(resp.json()["players"], list))
self.assertEqual(len(resp.json()["players"]), 0)
resp = self.get("/matches/%s/teams/99999" % match_id,
expected_status_code=http_client.NOT_FOUND)
new_name = "new name"
resp = self.put("/matches/%s/teams/99999" % match_id, data={"name": new_name},
expected_status_code=http_client.NOT_FOUND)
resp = self.put(team_url, data={"name": new_name})
resp = self.get(team_url)
self.assertEqual(resp.json()["name"], new_name)
def test_add_player_to_match(self):
self.auth()
player_id = self.player_id
team_id = 0
self.auth_service()
match = self._create_match()
match_id = match["match_id"]
match_url = match["url"]
teams_url = match["teams_url"]
resp = self.get(match_url)
matchplayers_url = resp.json()["matchplayers_url"]
resp = self.post(teams_url, data={}, expected_status_code=http_client.CREATED)
team_id = resp.json()["team_id"]
self.get(teams_url)
data = {"player_id": player_id,
"team_id": team_id
}
self.post(matchplayers_url, data=data, expected_status_code=http_client.CREATED)
resp = self.get(match_url)
self.assertEqual(len(resp.json()["teams"]), 1)
self.assertIsNotNone(resp.json()["start_date"])
self.assertEqual(resp.json()["num_players"], 1)
resp = self.get(teams_url)
team_url = resp.json()[0]["url"]
self.get(team_url)
resp = self.get(matchplayers_url)
matchplayer_url = resp.json()[0]["matchplayer_url"]
self.get(matchplayer_url)
self.get("/matches/%s/players/9999999" % match_id, expected_status_code=http_client.NOT_FOUND)
def test_active_matches(self):
self.auth(username=uuid_string())
player_id = self.player_id
team_id = 0
self.auth(username=uuid_string())
other_player_id = self.player_id
team_id = 0
self.auth_service()
match = self._create_match(max_players=3)
match_url = match["url"]
teams_url = match["teams_url"]
resp = self.get(match_url)
matchplayers_url = resp.json()["matchplayers_url"]
resp = self.post(teams_url, data={}, expected_status_code=http_client.CREATED)
team_id = resp.json()["team_id"]
resp = self.get(teams_url)
data = {"player_id": player_id,
"team_id": team_id
}
self.post(matchplayers_url, data=data, expected_status_code=http_client.CREATED)
data = {"player_id": other_player_id,
"team_id": team_id
}
self.post(matchplayers_url, data=data, expected_status_code=http_client.CREATED)
resp = self.get(match_url)
self.assertEqual(len(resp.json()["teams"]), 1)
resp = self.get(teams_url)
team_url = resp.json()[0]["url"]
resp = self.get(team_url)
resp = self.get(matchplayers_url)
matchplayer_url = resp.json()[0]["matchplayer_url"]
self.get(matchplayer_url)
resp = self.get(self.endpoints["active_matches"])
players = resp.json()[0]["players"]
self.assertEqual(len(players), 2)
self.assertEqual(players[0]["player_id"], player_id)
resp = self.get(self.endpoints["active_matches"] + "?player_id=9999999&player_id=9999998")
self.assertEqual(len(resp.json()), 0)
resp = self.get(self.endpoints["active_matches"] + "?player_id=9999999&player_id=%s" %
other_player_id)
self.assertEqual(len(resp.json()), 1)
players = resp.json()[0]["players"]
self.assertEqual(players[1]["player_id"], other_player_id)
def players_by_status(self, players):
ret = defaultdict(list)
for player in players:
ret[player["status"]].append(player)
return ret
def test_remove_player_from_match(self):
self.auth()
player_id = self.player_id
self.auth_service()
match = self._create_match()
match_url = match["url"]
teams_url = match["teams_url"]
matchplayers_url = match["matchplayers_url"]
resp = self.post(teams_url, data={}, expected_status_code=http_client.CREATED)
team_id = resp.json()["team_id"]
data = {"player_id": player_id,
"team_id": team_id
}
resp = self.post(matchplayers_url, data=data, expected_status_code=http_client.CREATED)
matchplayer_url = resp.json()["url"]
resp = self.get(match_url)
self.assertEqual(resp.json()["num_players"], 1)
self.delete(matchplayer_url)
resp = self.get(match_url)
self.assertEqual(resp.json()["num_players"], 1)
pbs = self.players_by_status(resp.json()["players"])
self.assertEqual(len(pbs["active"]), 0)
self.assertEqual(len(pbs["quit"]), 1)
# you cannot quit twice
self.delete(matchplayer_url, expected_status_code=http_client.BAD_REQUEST)
resp = self.get(match_url)
self.assertEqual(resp.json()["num_players"], 1)
pbs = self.players_by_status(resp.json()["players"])
self.assertEqual(len(pbs["active"]), 0)
self.assertEqual(len(pbs["quit"]), 1)
# join the fight again
self.post(matchplayers_url, data=data, expected_status_code=http_client.CREATED)
resp = self.get(match_url)
self.assertEqual(resp.json()["num_players"], 1)
pbs = self.players_by_status(resp.json()["players"])
self.assertEqual(len(pbs["active"]), 1)
self.assertEqual(len(pbs["quit"]), 0)
# now you can quit again
self.delete(matchplayer_url)
def test_match_start_date_is_set_when_first_player_joins(self):
self.auth("player_1")
player1_id = self.player_id
self.auth("player_2")
player2_id = self.player_id
self.auth_service()
match = self._create_match()
match_url = match["url"]
teams_url = match["teams_url"]
resp = self.get(match_url)
self.assertEqual(resp.json()["start_date"], None)
matchplayers_url = match["matchplayers_url"]
resp = self.post(teams_url, data={}, expected_status_code=http_client.CREATED)
team_id = resp.json()["team_id"]
data1 = {
"player_id": player1_id,
"team_id": team_id
}
resp = self.post(matchplayers_url, data=data1, expected_status_code=http_client.CREATED)
matchplayer1_url = resp.json()["url"]
resp = self.get(match_url)
match_start = resp.json()["start_date"]
data2 = {
"player_id": player2_id,
"team_id": team_id
}
resp = self.post(matchplayers_url, data=data2, expected_status_code=http_client.CREATED)
matchplayer2_url = resp.json()["url"]
resp = self.get(match_url)
self.assertEqual(match_start, resp.json()["start_date"])
self.delete(matchplayer1_url)
self.delete(matchplayer2_url)
resp = self.get(match_url)
self.assertEqual(match_start, resp.json()["start_date"])
self.post(matchplayers_url, data=data1, expected_status_code=http_client.CREATED)
resp = self.get(match_url)
self.assertEqual(match_start, resp.json()["start_date"])
def test_change_match(self):
self.auth_service()
match = self._create_match()
match_url = match["url"]
self.put(match_url, data={"status": "new_status"})
self.put(match_url, data={"status": "started"})
self.put(match_url, data={"status": "completed"})
resp = self.put(match_url, data={"status": "active"},
expected_status_code=http_client.BAD_REQUEST)
self.assertIn("already been completed", resp.json()["error"]["description"])
def test_max_players(self):
player_ids = []
for i in range(3):
self.auth(username="user_%s" % i)
player_ids.append(self.player_id)
self.auth_service()
match = self._create_match(num_teams=2)
matchplayers_url = match["matchplayers_url"]
match_url = match["url"]
resp = self.get(match_url)
team_id = resp.json()["teams"][0]["team_id"]
for player_id in player_ids[0:2]:
data = {"player_id": player_id,
"team_id": team_id
}
resp = self.post(matchplayers_url, data=data, expected_status_code=http_client.CREATED)
data = {"player_id": player_ids[-1],
"team_id": team_id
}
self.post(matchplayers_url, data=data, expected_status_code=http_client.BAD_REQUEST)
def test_active_matches_depend_on_match_status(self):
self.auth_service()
match = self._create_match(max_players=4)
match_url = match["url"]
match_id = match["match_id"]
server_id = match["server_id"]
resp = self.get(self.endpoints["active_matches"])
self.assertEqual(len(self._filter_matches(resp, [match_id])), 1)
self.put(match_url, data={"status": "ended"})
resp = self.get(self.endpoints["active_matches"])
self.assertEqual(len(self._filter_matches(resp, [match_id])), 0)
match = self._create_match(max_players=4, server_id=server_id)
match_url = match["url"]
self.put(match_url, data={"status": "completed"})
resp = self.get(self.endpoints["active_matches"])
self.assertEqual(len(self._filter_matches(resp, [match_id])), 0)
def test_active_matches_depend_on_server_status(self):
self.auth_service()
match = self._create_match(max_players=4)
match_id = match["match_id"]
server_url = match["server_url"]
resp = self.get(self.endpoints["active_matches"])
self.assertEqual(len(self._filter_matches(resp, [match_id])), 1)
self.put(server_url, data={"status": "quit"})
resp = self.get(self.endpoints["active_matches"])
self.assertEqual(len(self._filter_matches(resp, [match_id])), 0)
| 38.588068 | 103 | 0.603328 |
acf1d4a7c0b51b9eeafeeaeaa065dc60309a093f | 1,719 | py | Python | tests/items/steps/send_new_items_to_dest_test.py | umd-lib/caia | b6fc6b7ceb1987e67d593d2a19d64f16645f6f7b | [
"Apache-2.0"
] | null | null | null | tests/items/steps/send_new_items_to_dest_test.py | umd-lib/caia | b6fc6b7ceb1987e67d593d2a19d64f16645f6f7b | [
"Apache-2.0"
] | 1 | 2020-06-16T11:22:58.000Z | 2020-06-16T11:22:58.000Z | tests/items/steps/send_new_items_to_dest_test.py | umd-lib/caia | b6fc6b7ceb1987e67d593d2a19d64f16645f6f7b | [
"Apache-2.0"
] | 1 | 2020-05-11T19:13:42.000Z | 2020-05-11T19:13:42.000Z | from hamcrest import assert_that
import tempfile
from mbtest.imposters import Imposter, Predicate, Response, Stub
from mbtest.matchers import had_request
from caia.items.items_job_config import ItemsJobConfig
from caia.items.steps.send_new_items_to_dest import SendNewItemsToDest
def test_send_new_items_to_dest_valid_response(mock_server):
with open("tests/resources/items/valid_dest_new_items_response.json") as file:
valid_dest_response = file.read()
# Set up mock server with required behavior
imposter = Imposter(Stub(Predicate(path="/items/incoming", method="POST"),
Response(body=valid_dest_response)))
with mock_server(imposter) as server:
config = {
'dest_new_url': f"{imposter.url}/items/incoming",
'storage_dir': '/tmp',
'last_success_lookup': 'tests/storage/items/items_last_success.txt',
'caiasoft_api_key': "SOME_SECRET_KEY"
}
job_config = ItemsJobConfig(config, 'test')
# Override dest_new_items_request_body_filepath
job_config["dest_new_items_request_body_filepath"] = "tests/resources/items/valid_dest_new_items_request.json"
with tempfile.TemporaryDirectory() as temp_storage_dir:
job_config["dest_new_items_response_body_filepath"] = temp_storage_dir + "/dest_new_items_response.json"
send_new_items_to_dest = SendNewItemsToDest(job_config)
step_result = send_new_items_to_dest.execute()
assert step_result.was_successful() is True
assert_that(server, had_request().with_path("/items/incoming").and_method("POST"))
assert valid_dest_response == step_result.get_result()
| 45.236842 | 118 | 0.722513 |
acf1d683005ac3874ad9f5b523e88e6ad54e89ad | 533 | py | Python | python_crash_course/ch11/language_survey.py | tangentspire/Python_Practice | e7f22303230a2ffa4e3f5ae57854bac9c4c3bc34 | [
"Apache-2.0"
] | null | null | null | python_crash_course/ch11/language_survey.py | tangentspire/Python_Practice | e7f22303230a2ffa4e3f5ae57854bac9c4c3bc34 | [
"Apache-2.0"
] | 3 | 2020-02-11T22:58:27.000Z | 2021-06-10T20:30:42.000Z | python_crash_course/ch11/language_survey.py | tangentspire/Python_Practice | e7f22303230a2ffa4e3f5ae57854bac9c4c3bc34 | [
"Apache-2.0"
] | null | null | null | from survey import AnonymousSurvey
# Define a question, and make a survey.
question = "What language did you first learn to speak?"
my_survey = AnonymousSurvey(question)
# Show the question, and store responses to the question.
my_survey.show_question(question)
print("Enter 'q' at any time to quit.\n")
while True:
response = input("Language: ")
if response == 'q':
break
my_survey.store_response(response)
# Show the survey results.
print("\nThank you to everyone who participated in the survey!")
my_survey.show_results()
| 28.052632 | 64 | 0.75985 |
acf1d6d59c787acf7f6b23c3a61fe5b4bc5befb4 | 2,393 | py | Python | easy_rec/python/model/mmoe.py | xia-huang-411303/EasyRec | 7b2050dddc0bfec9e551e2199a36414a3ee82588 | [
"Apache-2.0"
] | 285 | 2021-10-11T03:39:43.000Z | 2022-03-31T09:12:33.000Z | easy_rec/python/model/mmoe.py | xia-huang-411303/EasyRec | 7b2050dddc0bfec9e551e2199a36414a3ee82588 | [
"Apache-2.0"
] | 84 | 2021-10-15T03:48:58.000Z | 2022-03-31T12:38:53.000Z | easy_rec/python/model/mmoe.py | xia-huang-411303/EasyRec | 7b2050dddc0bfec9e551e2199a36414a3ee82588 | [
"Apache-2.0"
] | 71 | 2021-10-15T03:33:44.000Z | 2022-03-31T08:37:11.000Z | # -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
import tensorflow as tf
from easy_rec.python.layers import dnn
from easy_rec.python.layers import mmoe
from easy_rec.python.model.multi_task_model import MultiTaskModel
from easy_rec.python.protos.mmoe_pb2 import MMoE as MMoEConfig
if tf.__version__ >= '2.0':
tf = tf.compat.v1
class MMoE(MultiTaskModel):
def __init__(self,
model_config,
feature_configs,
features,
labels=None,
is_training=False):
super(MMoE, self).__init__(model_config, feature_configs, features, labels,
is_training)
assert self._model_config.WhichOneof('model') == 'mmoe', \
'invalid model config: %s' % self._model_config.WhichOneof('model')
self._model_config = self._model_config.mmoe
assert isinstance(self._model_config, MMoEConfig)
self._features, _ = self._input_layer(self._feature_dict, 'all')
self._init_towers(self._model_config.task_towers)
def build_predict_graph(self):
if self._model_config.HasField('expert_dnn'):
mmoe_layer = mmoe.MMOE(
self._model_config.expert_dnn,
l2_reg=self._l2_reg,
num_task=self._task_num,
num_expert=self._model_config.num_expert)
else:
# For backward compatibility with original mmoe layer config
mmoe_layer = mmoe.MMOE([x.dnn for x in self._model_config.experts],
l2_reg=self._l2_reg,
num_task=self._task_num)
task_input_list = mmoe_layer(self._features)
tower_outputs = {}
for i, task_tower_cfg in enumerate(self._model_config.task_towers):
tower_name = task_tower_cfg.tower_name
if task_tower_cfg.HasField('dnn'):
tower_dnn = dnn.DNN(
task_tower_cfg.dnn,
self._l2_reg,
name=tower_name,
is_training=self._is_training)
tower_output = tower_dnn(task_input_list[i])
else:
tower_output = task_input_list[i]
tower_output = tf.layers.dense(
inputs=tower_output,
units=task_tower_cfg.num_class,
kernel_regularizer=self._l2_reg,
name='dnn_output_%d' % i)
tower_outputs[tower_name] = tower_output
self._add_to_prediction_dict(tower_outputs)
return self._prediction_dict
| 35.191176 | 79 | 0.662349 |
acf1d78bbf4e4392a68912806da9f94dc13d4c76 | 3,488 | py | Python | api/caching/tasks.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | 1 | 2015-10-02T18:35:53.000Z | 2015-10-02T18:35:53.000Z | api/caching/tasks.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | 4 | 2016-05-13T14:24:16.000Z | 2017-03-30T15:28:31.000Z | api/caching/tasks.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | import urlparse
import requests
import logging
from website.project.model import Comment
from website import settings
logger = logging.getLogger(__name__)
def get_varnish_servers():
# TODO: this should get the varnish servers from HAProxy or a setting
return settings.VARNISH_SERVERS
def get_bannable_urls(instance):
bannable_urls = []
parsed_absolute_url = {}
if not hasattr(instance, 'absolute_api_v2_url'):
logger.warning('Tried to ban {}:{} but it didn\'t have a absolute_api_v2_url method'.format(instance.__class__, instance))
return [], ''
for host in get_varnish_servers():
# add instance url
varnish_parsed_url = urlparse.urlparse(host)
parsed_absolute_url = urlparse.urlparse(instance.absolute_api_v2_url)
url_string = '{scheme}://{netloc}{path}.*'.format(scheme=varnish_parsed_url.scheme,
netloc=varnish_parsed_url.netloc,
path=parsed_absolute_url.path)
bannable_urls.append(url_string)
if isinstance(instance, Comment):
try:
parsed_target_url = urlparse.urlparse(instance.target.referent.absolute_api_v2_url)
except AttributeError:
# some referents don't have an absolute_api_v2_url
# I'm looking at you NodeWikiPage
pass
else:
url_string = '{scheme}://{netloc}{path}.*'.format(scheme=varnish_parsed_url.scheme,
netloc=varnish_parsed_url.netloc,
path=parsed_target_url.path)
bannable_urls.append(url_string)
try:
parsed_root_target_url = urlparse.urlparse(instance.root_target.referent.absolute_api_v2_url)
except AttributeError:
# some root_targets don't have an absolute_api_v2_url
pass
else:
url_string = '{scheme}://{netloc}{path}.*'.format(scheme=varnish_parsed_url.scheme,
netloc=varnish_parsed_url.netloc,
path=parsed_root_target_url.path)
bannable_urls.append(url_string)
return bannable_urls, parsed_absolute_url.hostname
def ban_url(instance):
# TODO: Refactor; Pull url generation into postcommit_task handling so we only ban urls once per request
timeout = 0.3 # 300ms timeout for bans
if settings.ENABLE_VARNISH:
bannable_urls, hostname = get_bannable_urls(instance)
for url_to_ban in set(bannable_urls):
try:
response = requests.request('BAN', url_to_ban, timeout=timeout, headers=dict(
Host=hostname
))
except Exception as ex:
logger.error('Banning {} failed: {}'.format(
url_to_ban,
ex.message
))
else:
if not response.ok:
logger.error('Banning {} failed: {}'.format(
url_to_ban,
response.text
))
else:
logger.info('Banning {} succeeded'.format(
url_to_ban
))
| 40.55814 | 130 | 0.551319 |
acf1d7930db64bd0df02801e9a94cd5cd7749d8f | 2,368 | py | Python | examples/contour_angelier_data.py | JNDanielson/mplstereonet | 6196e3fd8fff5b2868f50dbcc96eef804024f62e | [
"MIT"
] | 120 | 2015-07-09T21:18:39.000Z | 2022-03-10T14:29:02.000Z | examples/contour_angelier_data.py | JNDanielson/mplstereonet | 6196e3fd8fff5b2868f50dbcc96eef804024f62e | [
"MIT"
] | 32 | 2015-01-09T21:52:30.000Z | 2021-12-15T20:53:37.000Z | examples/contour_angelier_data.py | JNDanielson/mplstereonet | 6196e3fd8fff5b2868f50dbcc96eef804024f62e | [
"MIT"
] | 49 | 2015-02-21T21:55:05.000Z | 2021-09-27T12:13:29.000Z | """
Reproduce Figure 5 from Vollmer, 1995 to illustrate different density contouring
methods.
"""
import matplotlib.pyplot as plt
import mplstereonet
import parse_angelier_data
def plot(ax, strike, dip, rake, **kwargs):
ax.rake(strike, dip, rake, 'ko', markersize=2)
ax.density_contour(strike, dip, rake, measurement='rakes', linewidths=1,
cmap='jet', **kwargs)
# Load data from Angelier, 1979
strike, dip, rake = parse_angelier_data.load()
# Setup a subplot grid
fig, axes = mplstereonet.subplots(nrows=3, ncols=4)
# Hide azimuth tick labels
for ax in axes.flat:
ax.set_azimuth_ticks([])
contours = [range(2, 18, 2), range(1, 21, 2), range(1, 22, 2)]
# "Standard" Kamb contouring with different confidence levels.
for sigma, ax, contour in zip([3, 2, 1], axes[:, 0], contours):
# We're reducing the gridsize to more closely match a traditional
# hand-contouring grid, similar to Kamb's original work and Vollmer's
# Figure 5. `gridsize=10` produces a 10x10 grid of density estimates.
plot(ax, strike, dip, rake, method='kamb', sigma=sigma,
levels=contour, gridsize=10)
# Kamb contouring with inverse-linear smoothing (after Vollmer, 1995)
for sigma, ax, contour in zip([3, 2, 1], axes[:, 1], contours):
plot(ax, strike, dip, rake, method='linear_kamb', sigma=sigma,
levels=contour)
template = r'$E={}\sigma$ Contours: ${}\sigma,{}\sigma,\ldots$'
ax.set_xlabel(template.format(sigma, *contour[:2]))
# Kamb contouring with exponential smoothing (after Vollmer, 1995)
for sigma, ax, contour in zip([3, 2, 1], axes[:, 2], contours):
plot(ax, strike, dip, rake, method='exponential_kamb', sigma=sigma,
levels=contour)
# Title the different methods
methods = ['Kamb', 'Linear\nSmoothing', 'Exponential\nSmoothing']
for ax, title in zip(axes[0, :], methods):
ax.set_title(title)
# Hide top-right axis... (Need to implement Diggle & Fisher's method)
axes[0, -1].set_visible(False)
# Schmidt contouring (a.k.a. 1%)
plot(axes[1, -1], strike, dip, rake, method='schmidt', gridsize=25,
levels=range(3, 20, 3))
axes[1, -1].set_title('Schmidt')
axes[1, -1].set_xlabel(r'Contours: $3\%,6\%,\ldots$')
# Raw data.
axes[-1, -1].set_azimuth_ticks([])
axes[-1, -1].rake(strike, dip, rake, 'ko', markersize=2)
axes[-1, -1].set_xlabel('N={}'.format(len(strike)))
plt.show()
| 34.823529 | 80 | 0.679054 |
acf1d8bd0ba60b3743c954fab4a81dce56ce95cd | 6,142 | py | Python | sample_data.py | integral001/tenhou_haiyama | 65bec663d4ceb35523e587b245b34f20ca03a079 | [
"MIT"
] | 1 | 2020-06-30T15:04:25.000Z | 2020-06-30T15:04:25.000Z | sample_data.py | integral001/tenhou_haiyama | 65bec663d4ceb35523e587b245b34f20ca03a079 | [
"MIT"
] | null | null | null | sample_data.py | integral001/tenhou_haiyama | 65bec663d4ceb35523e587b245b34f20ca03a079 | [
"MIT"
] | null | null | null | ver_test_samples = {
"mt19937ar":{
"shuffle_ver":"mt19937ar",
"seed":[0xBEF2A12D, 0x33A53D54, 0x2E284410, 0xD68656A3, 0xA9B60976, 0x39483C8E, 0x24706954, 0x22661D0E],
"yama":[10, 134, 52, 127, 75, 34, 92, 17, 50, 19, 103, 64, 79, 21, 89, 51, 54, 14, 97, 23, 68, 65, 41, 1, 53, 118, 48, 38, 49, 83, 129, 126, 76, 4, 108, 44, 13, 91, 107, 43, 96, 128, 8, 60, 73, 61, 9, 67, 37, 18, 131, 35, 90, 58, 46, 106, 78, 85, 69, 27, 31, 32, 22, 93, 122, 100, 114, 11, 33, 45, 7, 66, 95, 39, 116, 132, 55, 2, 6, 123, 56, 86, 109, 12, 81, 124, 0, 5, 110, 82, 3, 120, 20, 70, 133, 99, 77, 15, 80, 42, 84, 40, 59, 94, 119, 98, 117, 87, 28, 16, 57, 26, 36, 25, 104, 74, 101, 113, 102, 125, 121, 63, 62, 115, 71, 112, 47, 135, 111, 72, 130, 105, 30, 24, 29, 88],
"dice":[4, 5],
"url":"http://tenhou.net/0/?log=2008092000gm-0009-0000-10db094d"
},
"mt19937ar-sha512-n288":{
"shuffle_ver":"mt19937ar-sha512-n288",
"seed":[0xAFA75809,0x084799AA,0xFEF333B8,0xD22FF70F,0x15C86695,0xDD38FB6D,0x1937EFBC,0x8EB7E0DF,0xA7C60900,0xD33ACE31,0x66E3A306,0xA8E84DF0,0xD74ED188,0xA47CF339,0xC6420DCD,0xCEEB2D9C,0x7DF4CEE4,0xE4320830,0x92B61E6E,0x7A9A3911,0xB2FC0183,0x51448179,0xD397A099,0x0E0ED1F8,0x60DCFEB4,0x87795A79,0x09F0A764,0xCEF67F99,0x9F10212E,0x7CEEC581,0x8DB04598,0x675EF780],
"yama":[122, 99, 92, 116, 11, 115, 128, 63, 107, 98, 129, 10, 12, 1, 75, 34, 90, 6, 85, 24, 55, 131, 77, 110, 102, 65, 7, 0, 38, 78, 22, 60, 93, 105, 51, 117, 48, 70, 100, 42, 111, 56, 8, 106, 52, 108, 113, 18, 31, 3, 104, 19, 66, 53, 94, 17, 40, 49, 109, 118, 13, 96, 2, 112, 135, 87, 130, 69, 5, 133, 68, 27, 126, 121, 73, 114, 132, 16, 71, 45, 76, 61, 91, 39, 28, 37, 41, 26, 119, 101, 43, 21, 79, 46, 9, 86, 35, 50, 89, 95, 33, 4, 124, 47, 72, 44, 62, 123, 127, 59, 25, 29, 134, 15, 20, 64, 80, 36, 81, 103, 83, 84, 32, 57, 120, 67, 82, 14, 54, 88, 74, 97, 125, 23, 58, 30],
"dice":[5, 2],
"url":"http://tenhou.net/0/?log=2009090100gm-00a9-0000-6920f1ac"
},
"mt19937ar-sha512-n288-base64":{
"shuffle_ver":"mt19937ar-sha512-n288-base64",
"seed":"lFMmGcbVp9UtkFOWd6eDLxicuIFw2eWpoxq/3uzaRv3MHQboS6pJPx3LCxBR2Yionfv217Oe2vvC2LCVNnl+8YxCjunLHFb2unMaNzBvHWQzMz+6f3Che7EkazzaI9InRy05MXkqHOLCtVxsjBdIP13evJep6NnEtA79M+qaEHKUOKo+qhJOwBBsHsLVh1X1Qj93Sm6nNcB6Xy3fCTPp4rZLzRQsnia9d6vE0RSM+Mu2Akg5w/QWDbXxFpsVFlElfLJL+OH0vcjICATfV3RVEgKR10037B1I2zDRF3r9AhXnz+2FIdu9qWjI/YNza3Q/6X429oNBXKLSvZb8ePGJAyXabp2IbrQPX2acLhW5FqdLZAWt504fBO6tb7w41iuDh1NoZUodzgw5hhpAZ2UjznTIBiHSfL1T8L2Ho5tHN4SoZJ62xdfzLPU6Rts9pkIgWOgTfN35FhJ+6e7QYhl2x6OXnYDkbcZQFVKWfm9G6gA/gC4DjPAfBdofnJp4M+vi3YctG5ldV88A89CFRhOPP96w6m2mwUjgUmdNnWUyM7LQnYWOBBdZkTUo4eWaNC1R2zVxDSG4TCROlc/CaoHJBxcSWg+8IQb2u/Gaaj8y+9k0G4k5TEeaY3+0r0h9kY6T0p/rEk8v95aElJJU79n3wH24q3jD8oCuTNlC50sAqrnw+/GP5XfmqkVv5O/YYReSay5kg83j8tN+H+YDyuX3q+tsIRvXX5KGOTgjobknkdJcpumbHXJFle9KEQKi93f6SZjCjJvvaz/FJ4qyAeUmzKDhiM3V2zBX8GWP0Kfm9Ovs8TfCSyt6CH3PLFpnV94WDJ/Hd1MPQ3ASWUs78V3yi8XEvMc8g5l9U1MYIqVIbvU7JNY9PAB04xTbm6Orb+7sFiFLzZ4P/Xy4bdyGNmN4LbduYOjsIn4Sjetf/wxqK4tFnaw9aYlo3r6ksvZzFQl6WI1xqZlB10G9rD297A5vn5mc2mqpDnEGnOExMx8HA7MQqfPM5AYDQmOKy9VYkiiLqHk2nj4lqVeo5vvkvM1hBy+rqcabdF6XNYA2W5v0Mu3OaQuPjN75A7vjGd2t9J5t2erSmHT1WI0RCrUiensUha5obn+sZSiA8FFtSiUAtpGC7+jYRKP7EHhDwPvpUvjoQIg/vgFb5FvT4AzGcr4kxhKlaS2eofgC7Q7u/A329Kxpf54Pi7wVNvHtDkmQBFSLcMN50asBtFlg7CO+N1/nmClmfGSmBkI/SsX8WKbr0vKaFSnKmt8a19hOimJ0/G0Lj+yizqWPQ4fuoRzEwv41utfrySrzR3iLJrhk29dzUgSFaGScylepk/+RX3nge2TyqHNqOAUol4/bH4KDyDGP4QxrBYXE1qSPG+/6QECYmZh/c3I7qBSLnJ+XWqUzH0wih7bkjJWYv1gNPp6gDOFDWXimDtcnU5A2sF3vW2ui6scAnRV47DgzWk4d94uFTzXNNTDbGX1k1ZPnOlWwVLP0ojeFCrirccHui7MRov+JTd8j8iAXRykCFcD79+mB7zs/1E69rCxbuu4msBjdBFUs+ACN3D4d14EUgDNDw8lrX23g9orTMtey8/s6XmumvRRUT86wc/E3piUHyUgnELNM1UaXVL/I+zkqISjuSdLqrb+CVZ10s0ttwbEtt1CMEVN9bVLUGZzTAgwEsuYchVrdgjJY4puNJc2DNwiPFc63ek9ZsXLmF1ljVXJPXpNJhX8B0HUCNVvkzeqR5uNcUDdzYJPlZIcmNO8NW9InK0b3z3y0rfTK8jnqDDYmeLFtVonjP5rPgK3g4LvWuTmjisQIceuPjdVSZChx7lfaCopzM83rV3dPOuQOGOvVwLqzvYY5Hj4GUZ7tXtDzKRaHSkniheRU0LOmQ3Na3rUAfRzr4QFC36++FPtHoUKx4ozQB9LWjirQejsjp/Of6FZ+VWionwpT1aP87ks+Sgg0Ubpe8dccJIVLfsbcAB2i0FDWuslcFy2T7NY6+YJdj8Dcp62ZNRBxl5AANWD51wfmkcxWU+JPoC2zOVetAOEQiA4ntfkF3Xui5a9T/ovuhTzBbI2XN3P2iZStarYMWqj0QyT5tdNdj1UfCI8NN6iIFvUBzsSwX1lhDiC+FSh6c+xDOr8tnVh6PfENwIHhfqC2cCTCLujeYno6xQvWlogN68DtqQhwdiBMe6BHX76o4RYADbiszd3h2+XRpqlc3j7OI5DDUL/GEEq13Q97Eub6VETe5LY4YIF+Y9z4B8rKMEOn15pehYymdovidT7xiZd88VFonXNJmWh9KI4+z5MxEwhT/dsCty+mxpBmOUpCPPMkLuRyd4VjH+eGnUc3BDo4og0D+vEsKbOqAT1da/dgE0XrxTsiliqNyw/6DHUB5jnKYrlcUNJb0QCpBag8b2m2/yH7dFbiK1utbnI6AoELbEDhPhfUr6cjgM07ju6xarzEMse0zN3c0w58l063I2Rf2lefFW7cU0Jc5Rh10+QKQpmiMYySYybGlt9eMMEdNrU+AhTRacGozxFRi+ij9zRoZ+X+4NIARqQJfdhV+w2365XS9bzG92weHlIJgpS0Mq+/KjLpWKh6HTeXmdGCq07/ZBx/zw9lkmQXnw3ydcpyplk8GblKn1H4jdkSIz5E3RSWzb+8C7BVcpaBcHfDejvbGU5zxT8Vq50oS1c7V9tDzhAoyYZPahgO0MSB1zMyBKfDcfHIPdoSMv+a4QL1mpSWa6NuwumWSIghOKam2bFNedHqlbrBglpfabTKSnYIibBrZCNhDtm/vG0DUtjEXx4ixM1NaYuMU7qiCmTkU3pK3BYqNXTlhK8kwZD72UkR4lzB9th5eqDsW2blED8evnujJtlTptYvoHqcNFHjnNvtuaNUWqcBXKFIl+I+PSuDaIO/paWJO0kf5VbVFpZdgvnimHZbY8uJ7s4w9W8XoegGqrVIlAT/PjE/2HdPfy75QatjPr8g0Q88wa5BpkWJeOv42NuEWKaVCK55S/kyVUkxcgNop6jWecsjjdmLoGqcaCiA18aKr6MYCtFCxMqW780AKFSUCXKI5obp1DoSsRn24Gd5ww5S74vT99VcBECDMYlvisIKe07dApsRPOhR7Z4Kt6lSelmjI6vLG0Dri1HjkiAFy8TT6Uoi+JqOBS6tv40dvPknRWyU7MmZugaZ0davAjEbvvlOiKVjkYyh7q+uh4eZ/qN2kAs/n6RyJaL4v+mx1jlQ1HvOOc+meQoXpedLt0aGMt1QU7Jh4EV68Xz6JLge+h+867RmmvkyWc8qU8GiSwbUXqIBPcKZVZgfP6nPtI7AXq1syVdQkEy2Rus1Csuf0uts",
"yama":[22, 91, 36, 115, 56, 19, 60, 16, 124, 35, 59, 43, 107, 9, 5, 11, 57, 73, 18, 41, 42, 20, 25, 30, 103, 100, 126, 130, 77, 109, 17, 15, 67, 46, 72, 65, 131, 118, 102, 61, 113, 123, 89, 122, 92, 3, 129, 81, 97, 28, 24, 76, 37, 69, 31, 26, 66, 78, 51, 54, 112, 64, 94, 38, 88, 128, 13, 133, 87, 21, 27, 114, 105, 50, 10, 29, 1, 4, 48, 70, 32, 14, 86, 33, 23, 84, 93, 12, 117, 47, 75, 96, 44, 111, 95, 62, 74, 39, 116, 63, 53, 6, 2, 58, 79, 71, 108, 68, 121, 8, 49, 55, 34, 135, 82, 125, 90, 98, 83, 45, 132, 106, 0, 101, 134, 40, 7, 85, 110, 99, 52, 80, 120, 104, 119, 127],
"dice":[6, 2],
"url":"http://tenhou.net/0/?log=2016022509gm-0009-0000-b327da61"
}
} | 267.043478 | 3,346 | 0.790948 |
acf1d8fd5133f6bae75bb93c82f2f90704b2b6bf | 1,848 | py | Python | utils.py | zonghan0904/Online-Realtime-Action-Recognition-based-on-OpenPose | 7d81a84a46ab3a0f6aafc4734cd24e2bbc164a97 | [
"Apache-2.0"
] | 552 | 2019-02-14T13:14:56.000Z | 2022-03-28T08:46:51.000Z | utils.py | zsscode/Online-Realtime-Action-Recognition-based-on-OpenPose | 33664be6ae7a26e9875f9771dc43d05a8b071dce | [
"Apache-2.0"
] | 83 | 2019-02-21T10:08:04.000Z | 2022-03-05T19:32:29.000Z | utils.py | zsscode/Online-Realtime-Action-Recognition-based-on-OpenPose | 33664be6ae7a26e9875f9771dc43d05a8b071dce | [
"Apache-2.0"
] | 234 | 2019-02-22T01:29:41.000Z | 2022-03-31T10:35:32.000Z | # -*- coding: UTF-8 -*-
import cv2 as cv
import os
import sys
from pathlib import Path
from Pose.pose_visualizer import TfPoseVisualizer
file_path = Path.cwd()
out_file_path = Path(file_path / "test_out/")
# camera resolution setting
cam_width, cam_height = 1280, 720
# input size to the model
# VGG trained in 656*368; mobilenet_thin trained in 432*368 (from tf-pose-estimation)
input_width, input_height = 656, 368
def choose_run_mode(args):
"""
video or webcam
"""
global out_file_path
if args.video:
# Open the video file
if not os.path.isfile(args.video):
print("Input video file ", args.video, " doesn't exist")
sys.exit(1)
cap = cv.VideoCapture(args.video)
out_file_path = str(out_file_path / (args.video[:-4] + '_tf_out.mp4'))
else:
# Webcam input
cap = cv.VideoCapture(0)
# 设置摄像头像素值
cap.set(cv.CAP_PROP_FRAME_WIDTH, cam_width)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, cam_height)
out_file_path = str(out_file_path / 'webcam_tf_out.mp4')
return cap
def load_pretrain_model(model):
dyn_graph_path = {
'VGG_origin': str(file_path / "Pose/graph_models/VGG_origin/graph_opt.pb"),
'mobilenet_thin': str(file_path / "Pose/graph_models/mobilenet_thin/graph_opt.pb")
}
graph_path = dyn_graph_path[model]
if not os.path.isfile(graph_path):
raise Exception('Graph file doesn\'t exist, path=%s' % graph_path)
return TfPoseVisualizer(graph_path, target_size=(input_width, input_height))
def set_video_writer(cap, write_fps=15):
return cv.VideoWriter(out_file_path,
cv.VideoWriter_fourcc(*'mp4v'),
write_fps,
(round(cap.get(cv.CAP_PROP_FRAME_WIDTH)), round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))))
| 33 | 110 | 0.661797 |
acf1d92e4b3ecff5b9d1dd18417d0758c2dc0536 | 1,970 | py | Python | glossy-gorillas/tests/integration/core/views/test_listing_create.py | fliepeltje/summer-code-jam-2020 | de1287b643b610d9c5df49778bfbeae5dd115df1 | [
"MIT"
] | 40 | 2020-08-02T07:38:22.000Z | 2021-07-26T01:46:50.000Z | glossy-gorillas/tests/integration/core/views/test_listing_create.py | fliepeltje/summer-code-jam-2020 | de1287b643b610d9c5df49778bfbeae5dd115df1 | [
"MIT"
] | 134 | 2020-07-31T12:15:45.000Z | 2020-12-13T04:42:19.000Z | glossy-gorillas/tests/integration/core/views/test_listing_create.py | fliepeltje/summer-code-jam-2020 | de1287b643b610d9c5df49778bfbeae5dd115df1 | [
"MIT"
] | 101 | 2020-07-31T12:00:47.000Z | 2021-11-01T09:06:58.000Z | import pytest
from django.shortcuts import reverse
from django.contrib.messages import get_messages
from core.models import Listing
from core.factories import InventoryRecordFactory, TraderFactory
@pytest.mark.django_db
def test_get_create_view_with_valid_item_and_looged_in_user_id_is_ok(client):
trader = TraderFactory()
record = InventoryRecordFactory(owner=trader)
user = trader.user
user.set_password("password")
user.save()
client.login(username=user.username, password="password")
response = client.get(reverse("listing-create", kwargs={"item_id": record.id}))
assert response.status_code == 200
@pytest.mark.django_db
def test_get_create_view_with_not_owned_item_redirects_to_dashboard_with_message(
client,
):
trader = TraderFactory()
record = InventoryRecordFactory()
user = trader.user
user.set_password("password")
user.save()
client.login(username=user.username, password="password")
response = client.get(reverse("listing-create", kwargs={"item_id": record.id}))
assert response.status_code == 302
messages = messages = [
str(message) for message in get_messages(response.wsgi_request)
]
assert messages == ["The record you want to list does not exist"]
@pytest.mark.django_db
def test_valid_post_to_create_view_creates_listing(client):
trader = TraderFactory()
record = InventoryRecordFactory(owner=trader)
user = trader.user
user.set_password("password")
user.save()
client.login(username=user.username, password="password")
data = {"silver_price": 30}
assert Listing.objects.count() == 0
response = client.post(
reverse("listing-create", kwargs={"item_id": record.id}), data=data
)
assert response.status_code == 302
messages = messages = [
str(message) for message in get_messages(response.wsgi_request)
]
assert messages == ["Listing created!"]
assert Listing.objects.count() == 1
| 34.561404 | 83 | 0.729442 |
acf1d9ba5255c28414dfa3530fc82047f1f2b95a | 111 | py | Python | entity/__init__.py | ryancollingwood/rabbit-herder | f79138d4bef4c35556738d84e36a45a6b349158a | [
"MIT"
] | null | null | null | entity/__init__.py | ryancollingwood/rabbit-herder | f79138d4bef4c35556738d84e36a45a6b349158a | [
"MIT"
] | 1 | 2018-11-03T01:10:10.000Z | 2018-11-03T01:11:24.000Z | entity/__init__.py | ryancollingwood/pyxeltest | f79138d4bef4c35556738d84e36a45a6b349158a | [
"MIT"
] | null | null | null | from .entity import Entity
from .moveable_entity import MovableEntity
from .moveable_entity import MovementType | 37 | 42 | 0.873874 |
acf1db074a09c20839ef60bedd6920f5e9e0654a | 26,797 | py | Python | bot/architecture.py | jstaker7/halite3_pytorch | dabda0f96975e32865e8f44d214b1c1ee7098af8 | [
"MIT"
] | null | null | null | bot/architecture.py | jstaker7/halite3_pytorch | dabda0f96975e32865e8f44d214b1c1ee7098af8 | [
"MIT"
] | null | null | null | bot/architecture.py | jstaker7/halite3_pytorch | dabda0f96975e32865e8f44d214b1c1ee7098af8 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch import nn
from torch.nn import BatchNorm1d as BN1d
from torch.nn import BatchNorm2d as BN2d
import torch.nn.functional as F
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
DILATION = 1
SIZE = 16
L_SIZE = 32
F_SIZE = 16
class Tower(torch.nn.Module):
def __init__(self, num_players, window_size):
super(Tower, self).__init__()
self.conv1d_op1, self.bn_op1 = nn.Conv1d(3*window_size, F_SIZE, 1), BN1d(F_SIZE)
#self.conv1d_op2, self.bn_op2 = nn.Conv1d(F_SIZE, F_SIZE, 1), BN1d(F_SIZE)
self.dense_p1, self.bn_p1 = nn.Linear(12*window_size, F_SIZE), BN1d(F_SIZE)
#self.dense_p2, self.bn_p2 = nn.Linear(F_SIZE, F_SIZE), BN1d(F_SIZE)
#self.conv2d_1a, self.bn_1a = nn.Conv2d(7*window_size + 6*(window_size-1) + F_SIZE*2, SIZE, 1), BN2d(SIZE) # includes previous moves
self.conv2d_1a, self.bn_1a = nn.Conv2d(7*window_size + 6*(window_size-1), SIZE, 1), BN2d(SIZE)
#self.conv2d_1b, self.bn_1b = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
self.conv2d_1d, self.bn_1d = nn.Conv2d(SIZE, SIZE, 3, padding=1, stride=2), BN2d(SIZE)
#self.conv2d_2a, self.bn_2a = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
self.conv2d_2d, self.bn_2d = nn.Conv2d(SIZE, SIZE, 3, padding=1, stride=2), BN2d(SIZE)
#self.conv2d_3a, self.bn_3a = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
self.conv2d_3d, self.bn_3d = nn.Conv2d(SIZE, SIZE, 3, padding=1, stride=2), BN2d(SIZE)
#self.conv2d_4a, self.bn_4a = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
self.conv2d_4d, self.bn_4d = nn.Conv2d(SIZE, SIZE, 3, padding=1, stride=2), BN2d(SIZE)
#self.conv2d_5a, self.bn_5a = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
self.conv2d_5d, self.bn_5d = nn.Conv2d(SIZE, SIZE, 3, padding=1, stride=2), BN2d(SIZE)
#self.conv2d_6a, self.bn_6a = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
self.conv2d_6d, self.bn_6d = nn.Conv2d(SIZE, SIZE, 3, padding=1, stride=2), BN2d(SIZE)
#self.conv2d_7a, self.bn_7a = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
#self.conv2d_7d, self.bn_7d = nn.Conv2d(SIZE, SIZE, 3, padding=1, stride=2), BN2d(SIZE)
self.conv2d_l1, self.bn_l1 = nn.Conv2d(SIZE + F_SIZE*2, L_SIZE, 1), BN2d(L_SIZE)
#self.conv2dt_1 = torch.nn.ConvTranspose2d(L_SIZE, SIZE, 3, stride=2, padding=1, output_padding=1)
#self.conv2d_u1, self.bn_u1 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
self.conv2dt_2 = torch.nn.ConvTranspose2d(L_SIZE, SIZE, 3, stride=2, padding=1, output_padding=1)
#self.conv2d_u2, self.bn_u2 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
self.conv2dt_3 = torch.nn.ConvTranspose2d(SIZE, SIZE, 3, stride=2, padding=1, output_padding=1)
#self.conv2d_u3, self.bn_u3 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
self.conv2dt_4 = torch.nn.ConvTranspose2d(SIZE, SIZE, 3, stride=2, padding=1, output_padding=1)
#self.conv2d_u4, self.bn_u4 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
self.conv2dt_5 = torch.nn.ConvTranspose2d(SIZE, SIZE, 3, stride=2, padding=1, output_padding=1)
#self.conv2d_u5, self.bn_u5 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
self.conv2dt_6 = torch.nn.ConvTranspose2d(SIZE, SIZE, 3, stride=2, padding=1, output_padding=1)
#self.conv2d_u6, self.bn_u6 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
self.conv2dt_7 = torch.nn.ConvTranspose2d(SIZE, SIZE, 3, stride=2, padding=1, output_padding=1)
#self.conv2d_f1, self.bn_f1 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
#self.conv2d_f2, self.bn_f2 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
#self.conv2d_f3, self.bn_f3 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
#self.conv2d_f4, self.bn_f4 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
#self.gen_l1s = nn.ModuleList([nn.Linear(L_SIZE, SIZE) for _ in range(num_players)])
#self.gen_bn1s = nn.ModuleList([BN1d(SIZE) for _ in range(num_players)])
#self.gen_l2s = nn.ModuleList([nn.Linear(SIZE, 32) for _ in range(num_players)])
#self.gen_bn2s = nn.ModuleList([BN1d(32) for _ in range(num_players)])
self.gen_l3s = nn.ModuleList([nn.Linear(L_SIZE, 1) for _ in range(num_players)])
#self.move_l1s = nn.ModuleList([nn.Conv2d(SIZE, SIZE, 1) for _ in range(num_players)])
#self.move_bn1s = nn.ModuleList([BN2d(SIZE) for _ in range(num_players)])
self.move_l2s = nn.ModuleList([nn.Conv2d(SIZE, 6, 1) for _ in range(num_players)])
#self.should_l1s = nn.ModuleList([nn.Linear(L_SIZE, 32) for _ in range(num_players)])
#self.should_l2s = nn.ModuleList([nn.Linear(32, 1) for _ in range(num_players)])
#self.will_have_l1s = nn.ModuleList([nn.Conv2d(SIZE, SIZE, 1) for _ in range(num_players)])
#self.will_have_l2s = nn.ModuleList([nn.Conv2d(SIZE, 1, 1) for _ in range(num_players)])
#self.did_win_l1s = nn.ModuleList([nn.Linear(L_SIZE, L_SIZE) for _ in range(num_players)])
#self.did_win_l2s = nn.ModuleList([nn.Linear(L_SIZE, L_SIZE) for _ in range(num_players)])
#self.did_win_l3s = nn.ModuleList([nn.Linear(L_SIZE, 1) for _ in range(num_players)])
#self.cell = torch.nn.LSTMCell(128, 128)
def forward(self, frames, my_player_features, opponent_features,
train=False, num_players=1,
valid=False,
prev_state=None):
# shape = frames.size()
#
# batch_size = shape[0]
# num_frames = shape[1]
#
# frames = frames.view(batch_size * num_frames, *[x for x in shape[2:]])
# my_player_features = my_player_features.view(batch_size * num_frames, *[x for x in shape[2:]])
# opponent_features = opponent_features.view(batch_size * num_frames, *[x for x in shape[2:]])
# if train or valid:
# generate = generate.view(batch_size * num_frames, *[x for x in shape[2:]])
# moves = moves.view(batch_size * num_frames, *[x for x in shape[2:]])
# my_ships = my_ships.view(batch_size * num_frames, *[x for x in shape[2:]])
# will_have_ship = will_have_ship.view(batch_size * num_frames, *[x for x in shape[2:]])
# should_construct = should_construct.view(batch_size * num_frames, *[x for x in shape[2:]])
# did_win = did_win.view(batch_size * num_frames, *[x for x in shape[2:]])
opponent_features = opponent_features.permute(0, 2, 1)
#ca = self.bn_op1(F.relu(self.conv1d_op1(opponent_features)))
ca = self.bn_op1(torch.sum(F.relu(self.conv1d_op1(opponent_features)), 2)) # TODO: Sum before the relu?
#ca = self.bn_op2(torch.sum(F.relu(self.conv1d_op2(ca)), 2))
ca = ca.unsqueeze(-1).unsqueeze(-1)
tl = self.bn_p1(F.relu(self.dense_p1(my_player_features)))
#tl = self.bn_p2(F.relu(self.dense_p2(tl)))
tl = tl.unsqueeze(-1).unsqueeze(-1)
#frames = frames.permute(0, 3, 1, 2)
s1 = ca.size()
s2 = tl.size()
s3 = frames.size()
#expanded = torch.cat([ca.expand(s1[0], s1[1], s3[2], s3[3]), tl.expand(s2[0], s2[1], s3[2], s3[3]), frames], 1)
d_l2_a_1_pre = self.conv2d_1a(frames) # 128
#d_l2_a_1_pre = self.conv2d_1a(expanded) # 128
d_l2_a_1 = self.bn_1a(F.relu(d_l2_a_1_pre))
#_d_l2_a_1 = self.bn_1b(F.relu(self.conv2d_1b(d_l2_a_1)))
d_l2_p = self.bn_1d(F.relu(self.conv2d_1d(d_l2_a_1))) # 64
#d_l3_a_pre = self.conv2d_2a(d_l2_p)
#d_l3_a = self.bn_2a(F.relu(d_l3_a_pre))
#d_l3_a = d_l3_a_pre
d_l3_a_pre = d_l2_p
d_l3_a = d_l2_p
d_l3_p = self.bn_2d(F.relu(self.conv2d_2d(d_l3_a))) # 32
d_l4_a_pre = d_l3_p#self.conv2d_3a(d_l3_p)
d_l4_a = d_l3_p#self.bn_3a(F.relu(d_l4_a_pre))
#d_l4_a = d_l4_a_pre
d_l4_p = self.bn_3d(F.relu(self.conv2d_3d(d_l4_a))) # 16
d_l5_a_pre = d_l4_p#self.conv2d_4a(d_l4_p)
d_l5_a = d_l4_p#self.bn_4a(F.relu(d_l5_a_pre))
#d_l5_a = d_l5_a_pre
d_l5_p = self.bn_4d(F.relu(self.conv2d_4d(d_l5_a))) # 8
d_l6_a_pre = d_l5_p#self.conv2d_5a(d_l5_p)
d_l6_a = d_l5_p#self.bn_5a(F.relu(d_l6_a_pre))
#d_l6_a = d_l6_a_pre
d_l6_p = self.bn_5d(F.relu(self.conv2d_5d(d_l6_a))) # 4
d_l7_a_pre = d_l6_p#self.conv2d_6a(d_l6_p)
d_l7_a = d_l6_p#self.bn_6a(F.relu(d_l7_a_pre))
#d_l7_a = d_l7_a_pre
d_l7_p = self.bn_6d(F.relu(self.conv2d_6d(d_l7_a))) # 2
# d_l8_a_2_pre = self.conv2d_7a(d_l7_p)
# #d_l8_a_2 = self.bn_7a(F.relu(d_l8_a_2_pre))
# d_l8_a_2 = d_l8_a_2_pre
# d_l8_p = self.bn_7d(F.relu(self.conv2d_7d(d_l8_a_2))) # 1
final_state = torch.cat([d_l7_p, ca, tl], 1)
latent = self.bn_l1(F.relu(self.conv2d_l1(final_state)))
#latent = self.cell(latent, prev_state)
#u_l8_a = self.conv2dt_1(latent) # 2
#u_l8_c = F.relu(u_l8_a + d_l8_a_2_pre)
#u_l8_s = self.bn_u1(F.relu(self.conv2d_u1(u_l8_c)))
u_l7_a = self.conv2dt_2(latent) # 4
u_l7_c = F.relu(u_l7_a + d_l7_a_pre)
u_l7_s = u_l7_c#self.bn_u2(F.relu(self.conv2d_u2(u_l7_c)))
u_l6_a = self.conv2dt_3(u_l7_s) # 8
u_l6_c = F.relu(u_l6_a + d_l6_a_pre)
u_l6_s = u_l6_c#self.bn_u3(F.relu(self.conv2d_u3(u_l6_c)))
u_l5_a = self.conv2dt_4(u_l6_s) # 16
u_l5_c = F.relu(u_l5_a + d_l5_a_pre)
u_l5_s = u_l5_c#self.bn_u4(F.relu(self.conv2d_u4(u_l5_c)))
u_l4_a = self.conv2dt_5(u_l5_s) # 32
u_l4_c = F.relu(u_l4_a + d_l4_a_pre)
u_l4_s = u_l4_c#self.bn_u5(F.relu(self.conv2d_u5(u_l4_c)))
u_l3_a = self.conv2dt_6(u_l4_s) # 64
u_l3_c = F.relu(u_l3_a + d_l3_a_pre)
u_l3_s = u_l3_c#self.bn_u6(F.relu(self.conv2d_u6(u_l3_c)))
u_l2_a = self.conv2dt_7(u_l3_s) # 128
u_l2_c = F.relu(u_l2_a + d_l2_a_1_pre)
u_l2_s_2 = u_l2_c#self.bn_f1(F.relu(self.conv2d_f1(u_l2_c)))
#u_l2_s_2 = self.bn_f2(F.relu(self.conv2d_f2(u_l2_s_2)))
#u_l2_s_2 = self.bn_f3(F.relu(self.conv2d_f3(u_l2_s_2)))
#u_l2_s_2 = self.bn_f4(F.relu(self.conv2d_f4(u_l2_s_2)))
player_generate_logits = []
player_move_logits = []
player_will_have_ship_logits = []
player_should_construct_logits = []
player_did_win_logits = []
latent = torch.squeeze(latent, 2)
latent = torch.squeeze(latent, 2)
# TODO: more layers here that can be removed
for i in range(num_players):
#gen_latent1 = F.relu(self.gen_l1s[i](latent))
#gen_latent1 = self.gen_bn1s[i](gen_latent1)
#gen_latent = F.relu(self.gen_l2s[i](gen_latent1))
#gen_latent = self.gen_bn2s[i](gen_latent)
#generate_logits = self.gen_l3s[i](gen_latent1)
generate_logits = self.gen_l3s[i](latent)
# moves_latent = F.relu(self.move_l1s[i](u_l2_s_2))
# moves_latent = self.move_bn1s[i](moves_latent)
# moves_logits = self.move_l2s[i](moves_latent)
moves_logits = self.move_l2s[i](u_l2_s_2)
# if train or valid:
# should_construct_latent = F.relu(self.should_l1s[i](latent))
# should_construct_logits = self.should_l2s[i](should_construct_latent)
#
# will_have_ship_latent = F.relu(self.will_have_l1s[i](u_l2_s_2))
# will_have_ship_logits = self.will_have_l2s[i](will_have_ship_latent)
#
# did_win_latent1 = F.relu(self.did_win_l1s[i](latent))
# did_win_latent = F.relu(self.did_win_l2s[i](did_win_latent1))
# did_win_logits = self.did_win_l3s[i](did_win_latent)
#
# player_will_have_ship_logits.append(will_have_ship_logits)
# player_should_construct_logits.append(should_construct_logits)
# player_did_win_logits.append(did_win_logits)
player_generate_logits.append(generate_logits)
player_move_logits.append(moves_logits)
m_logits = torch.stack(player_move_logits)
g_logits = torch.stack(player_generate_logits)
#print(torch.stack(player_move_logits).size())
m_probs = F.softmax(torch.stack(player_move_logits), dim=2)
return m_logits, g_logits, m_probs, latent, player_move_logits, player_generate_logits
class Model(torch.nn.Module):
def __init__(self, num_players, window_size):
super(Model, self).__init__()
self.tower = Tower(num_players, window_size)
kernel = [[[0, 0, 0], [0, 1, 0], [0, 0, 0]],
[[0, 1, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 1, 0]],
[[0, 0, 0], [1, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 1], [0, 0, 0]]]
kernel = np.transpose(kernel, (0, 1, 2))
kernel = np.expand_dims(kernel, 1)
self.kernel = torch.from_numpy(kernel).float().to(device)
self.criterion = nn.CrossEntropyLoss(reduction='none')
self.criterion2 = torch.nn.BCEWithLogitsLoss(reduction='none')
def forward(self, frames, my_player_features, opponent_features,
train=False, num_players=1,
learning_rate=None, my_ships=None, moves=None, generate=None,
will_have_ship=None, should_construct=None, did_win=None,
valid=False,
init_state=None,
m_weights=None):
# These are labels and we don't use the history for now
if generate is not None:
generate = generate[:, -1:]
if will_have_ship is not None:
will_have_ship = will_have_ship[:, -1:]
if should_construct is not None:
should_construct = should_construct[:, -1:]
frames = torch.tensor(frames, dtype=torch.float, device=device)
my_player_features = torch.tensor(my_player_features, dtype=torch.float, device=device)
opponent_features = torch.tensor(opponent_features, dtype=torch.float, device=device)
if m_weights is not None:
m_weights = torch.tensor(m_weights, dtype=torch.long, device=device)
if moves is not None:
moves = torch.tensor(moves, dtype=torch.long, device=device)
moves = torch.unsqueeze(moves, -1)
has_construct = moves == 5
#moves = moves[:, -1] # Just takes last for now
else:
assert False # Assumed to have them for now
moves, prev_moves = moves[:, -1:], moves[:, :-1]
#print((moves == 5).float().sum())
#return
# Simply stack features for now (1 dim still kept to keep it compatible
# with below loop)
b, n, h, w, f = frames.size()
frames = frames.permute(0, 1, 4, 2, 3).contiguous().view(b, 1, n*f, h, w)
b, n, f = my_player_features.size()
my_player_features = my_player_features.view(b, 1, n*f)
b, n, o, f = opponent_features.size()
opponent_features = opponent_features.permute(0, 2, 3, 1).contiguous().view(b, 1, o, n*f)
if prev_moves.size()[1] != 0:
pm_onehot = prev_moves.clone().repeat(1, 1, 1, 1, 6) # 6 moves
pm_onehot.zero_()
pm_onehot.scatter_(4, prev_moves, 1)
pm_onehot = pm_onehot.permute(0, 1, 4, 2, 3)
b, n, m, h, w = pm_onehot.size()
pm_onehot = pm_onehot.contiguous().view(b, 1, n*m, h, w)
frames = torch.cat([frames, pm_onehot.float()], 2)
if train or valid:
generate = torch.tensor(generate, dtype=torch.float, device=device)
my_ships = torch.tensor(my_ships, dtype=torch.float, device=device)
will_have_ship = torch.tensor(will_have_ship, dtype=torch.float, device=device)
should_construct = torch.tensor(should_construct, dtype=torch.float, device=device)
did_win = torch.tensor(did_win, dtype=torch.float, device=device)
generate = torch.unsqueeze(generate, -1)
my_ships = torch.unsqueeze(my_ships, 2)
will_have_ship = torch.unsqueeze(will_have_ship, -1)
should_construct = torch.unsqueeze(should_construct, -1)
did_win = torch.unsqueeze(did_win, -1)
# Because we are simply concating the features, we only need
# the last mask
my_ships = my_ships[:, -1:]
if init_state is not None:
prev_state = init_state
else:
# TODO: zero state
prev_state = torch.zeros(1, 2)
loss_history = []
player_gen_losses_history = []
player_average_frame_losses_history = []
#player_have_ship_average_frame_losses_history = []
player_total_losses_history = []
#player_should_construct_losses_history = []
#did_win_losses_history = []
# NOTE: This will always be iter in loop; features are combined above.
for i in range(frames.size()[1]):
# m_logits, g_logits, m_probs, new_state, player_will_have_ship_logits, player_did_win_logits, player_should_construct_logits, player_move_logits, player_generate_logits = self.tower(frames[:, i], my_player_features[:, i], opponent_features[:, i],
# train=train, num_players=num_players,
# valid=valid,
# prev_state=prev_state)
m_logits, g_logits, m_probs, new_state, player_move_logits, player_generate_logits = self.tower(frames[:, i], my_player_features[:, i], opponent_features[:, i],
train=train, num_players=num_players,
valid=valid,
prev_state=prev_state)
prev_state = new_state
if not train and not valid:
m_logits = m_logits.cpu().data.numpy()
g_logits = g_logits.cpu().data.numpy()
m_probs = m_probs.cpu().data.numpy()
new_state = new_state.cpu().data.numpy()
return m_logits, g_logits, m_probs, new_state
#h_logits = torch.sigmoid(torch.stack(player_will_have_ship_logits))
#h_logits_raw = torch.stack(player_will_have_ship_logits)
#b_logits = torch.stack(player_should_construct_logits)
#w_logits = torch.stack(player_did_win_logits)
cs = int(player_move_logits[0].size()[0] / num_players) # chunk size
# TODO: Can be improved with gather_nd
moves_logits = [torch.split(x, cs) for x in player_move_logits]
generate_logits = [torch.split(x, cs) for x in player_generate_logits]
#will_have_ship_logits = [torch.split(x, cs) for x in player_will_have_ship_logits]
#should_construct_logits = [torch.split(x, cs) for x in player_should_construct_logits]
#did_win_logits = [torch.split(x, cs) for x in player_did_win_logits]
moves_logits = [x[i] for x, i in zip(moves_logits, range(num_players))]
generate_logits = [x[i] for x, i in zip(generate_logits, range(num_players))]
#will_have_ship_logits = [x[i] for x, i in zip(will_have_ship_logits, range(num_players))]
#should_construct_logits = [x[i] for x, i in zip(should_construct_logits, range(num_players))]
#did_win_logits = [x[i] for x, i in zip(did_win_logits, range(num_players))]
moves_logits = torch.cat(moves_logits, 0)
generate_logits = torch.cat(generate_logits, 0)
#will_have_ship_logits = torch.cat(will_have_ship_logits, 0)
#should_construct_logits = torch.cat(should_construct_logits, 0)
#did_win_logits = torch.cat(did_win_logits, 0)
frame_moves = torch.squeeze(moves[:, i], 3) # Too many dimensions
#torch.set_printoptions(profile="full")
losses = self.criterion(moves_logits, frame_moves)
losses = losses * m_weights.float() # Weights for class balancing
#frame_will_have_ship = will_have_ship[:, i].permute(0, 3, 1, 2)
#have_ship_losses = self.criterion2(will_have_ship_logits, frame_will_have_ship)
losses = torch.unsqueeze(losses, -1)
frame_my_ships = my_ships[:, i]
# with torch.no_grad():
# have_ship_mask = torch.nn.functional.conv2d(frame_my_ships, self.kernel, padding=1)
#
# have_ship_mask = have_ship_mask.sum(-1)
#
# have_ship_mask = (have_ship_mask > 0.5).float()
#
# have_ship_mask = torch.unsqueeze(have_ship_mask, -1)
frame_my_ships = torch.squeeze(frame_my_ships, 1)
frame_my_ships = torch.unsqueeze(frame_my_ships, -1)
masked_loss = losses * frame_my_ships
_, arged = torch.max(moves_logits, 1)
frame_moves = frame_moves.unsqueeze(-1)
arged = arged.unsqueeze(-1)
is_correct = (arged == frame_moves).float()
o_m = (frame_moves == 0).float() * frame_my_ships
n_m = (frame_moves == 1).float() * frame_my_ships
e_m = (frame_moves == 2).float() * frame_my_ships
s_m = (frame_moves == 3).float() * frame_my_ships
w_m = (frame_moves == 4).float() * frame_my_ships
c_m = (frame_moves == 5).float() * frame_my_ships
#print(c_m.sum())
o_a = (o_m * is_correct).sum()/o_m.sum()
n_a = (n_m * is_correct).sum()/n_m.sum()
e_a = (e_m * is_correct).sum()/e_m.sum()
s_a = (s_m * is_correct).sum()/s_m.sum()
w_a = (w_m * is_correct).sum()/w_m.sum()
c_a = (c_m * is_correct).sum()/c_m.sum()
#c_a = (c_m * is_correct).sum()/torch.max(c_m.sum(), 1e-13*torch.ones_like(c_m[:, 0, 0, 0]))
# Not storing history here; assuming 1 frame
accuracies = torch.stack([o_a, n_a, e_a, s_a, w_a, c_a])
#have_ship_losses = have_ship_losses * have_ship_mask
ships_per_frame = frame_my_ships.sum(2).sum(1)
#ship_positions_per_frame = have_ship_mask.sum(2).sum(1)
frame_loss = masked_loss.sum(2).sum(1)
#have_ship_frame_loss = have_ship_losses.sum(2).sum(1)
average_frame_loss = frame_loss / torch.max(ships_per_frame, 1e-13*torch.ones_like(ships_per_frame)) # First frames have no ship
#have_ship_average_frame_loss = have_ship_frame_loss / torch.max(ship_positions_per_frame, 1e-13*torch.ones_like(ship_positions_per_frame)) # First frames have no ship
generate_losses = self.criterion2(generate_logits, generate[:, i])
#should_construct_losses = self.criterion2(should_construct_logits, should_construct[:, i])
#did_win_losses = self.criterion2(did_win_logits, did_win)
# Individual losses for validation
player_gen_losses = [x.mean() for x in torch.split(generate_losses, cs)]
#player_should_construct_losses = [x.mean() for x in torch.split(should_construct_losses, cs)]
#player_did_win_losses = [x.mean() for x in torch.split(did_win_losses, cs)]
# print(average_frame_loss)
player_average_frame_losses = [x.mean() for x in torch.split(average_frame_loss, cs)]
# print(player_average_frame_losses)
# print(average_frame_loss.size())
# print(torch.stack(player_average_frame_losses))
# dsfsf
#player_have_ship_average_frame_losses = [x.mean() for x in torch.split(have_ship_average_frame_loss, cs)]
#player_total_losses = [x+0.02*y+0.00*z+0.0*w+0.00*k for x,y,z,w,k in zip(player_average_frame_losses, player_gen_losses, player_have_ship_average_frame_losses, player_should_construct_losses, player_did_win_losses)]
player_total_losses = [x+0.02*y for x,y in zip(player_average_frame_losses, player_gen_losses)]
generate_losses = generate_losses.mean()
#should_construct_losses = should_construct_losses.mean()
#did_win_losses = did_win_losses.mean()
loss = average_frame_loss.mean() + 0.05 * generate_losses #+ 0.001 * have_ship_average_frame_loss.mean() + 0.0000000000001 * should_construct_losses + 0.000000000001 * did_win_losses
player_gen_losses = torch.stack(player_gen_losses)
player_average_frame_losses = torch.stack(player_average_frame_losses)
#player_have_ship_average_frame_losses = torch.stack(player_have_ship_average_frame_losses)
player_total_losses = torch.stack(player_total_losses)
#player_should_construct_losses = torch.stack(player_should_construct_losses)
#did_win_losses = torch.stack(player_did_win_losses)
player_gen_losses_history.append(player_gen_losses)
player_average_frame_losses_history.append(player_average_frame_losses)
#player_have_ship_average_frame_losses_history.append(player_have_ship_average_frame_losses)
player_total_losses_history.append(player_total_losses)
#player_should_construct_losses_history.append(player_should_construct_losses)
#did_win_losses_history.append(did_win_losses)
loss_history.append(loss)
loss = torch.mean(loss)
player_gen_losses = torch.mean(torch.stack(player_gen_losses_history), 0)
player_average_frame_losses = torch.mean(torch.stack(player_average_frame_losses_history), 0)
#player_have_ship_average_frame_losses = torch.mean(torch.stack(player_have_ship_average_frame_losses_history), 0)
player_total_losses = torch.mean(torch.stack(player_total_losses_history), 0)
#player_should_construct_losses = torch.mean(torch.stack(player_should_construct_losses_history), 0)
#did_win_losses = torch.mean(torch.stack(did_win_losses_history), 0)
if train:
return loss
else:
return loss, player_gen_losses, player_average_frame_losses, player_total_losses, accuracies
| 47.012281 | 258 | 0.624025 |
acf1db1dd205ba346cd0eaebca7dc1035baa2985 | 3,767 | py | Python | test/functional/llmq-signing.py | BayerTM/DraftCoinZ | 217db2822a320d278d93dda4d3cd5dc4d01764f2 | [
"MIT"
] | 3 | 2021-03-13T23:51:40.000Z | 2021-07-09T19:15:32.000Z | test/functional/llmq-signing.py | BayerTM/DraftCoinZ | 217db2822a320d278d93dda4d3cd5dc4d01764f2 | [
"MIT"
] | null | null | null | test/functional/llmq-signing.py | BayerTM/DraftCoinZ | 217db2822a320d278d93dda4d3cd5dc4d01764f2 | [
"MIT"
] | 1 | 2021-04-27T21:33:59.000Z | 2021-04-27T21:33:59.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The DFTz Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import time
from test_framework.mininode import *
from test_framework.test_framework import DFTzTestFramework
from test_framework.util import *
'''
llmq-signing.py
Checks LLMQs signing sessions
'''
class LLMQSigningTest(DFTzTestFramework):
def set_test_params(self):
self.set_dftz_test_params(6, 5, fast_dip3_enforcement=True)
def run_test(self):
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
self.mine_quorum()
id = "0000000000000000000000000000000000000000000000000000000000000001"
msgHash = "0000000000000000000000000000000000000000000000000000000000000002"
msgHashConflict = "0000000000000000000000000000000000000000000000000000000000000003"
def check_sigs(hasrecsigs, isconflicting1, isconflicting2):
for mn in self.mninfo:
if mn.node.quorum("hasrecsig", 100, id, msgHash) != hasrecsigs:
return False
if mn.node.quorum("isconflicting", 100, id, msgHash) != isconflicting1:
return False
if mn.node.quorum("isconflicting", 100, id, msgHashConflict) != isconflicting2:
return False
return True
def wait_for_sigs(hasrecsigs, isconflicting1, isconflicting2, timeout):
t = time.time()
while time.time() - t < timeout:
if check_sigs(hasrecsigs, isconflicting1, isconflicting2):
return
time.sleep(0.1)
raise AssertionError("wait_for_sigs timed out")
def assert_sigs_nochange(hasrecsigs, isconflicting1, isconflicting2, timeout):
t = time.time()
while time.time() - t < timeout:
assert(check_sigs(hasrecsigs, isconflicting1, isconflicting2))
time.sleep(0.1)
# Initial state
wait_for_sigs(False, False, False, 1)
# Sign 2 shares, should not result in recovered sig
for i in range(2):
self.mninfo[i].node.quorum("sign", 100, id, msgHash)
assert_sigs_nochange(False, False, False, 3)
# Sign one more share, should result in recovered sig and conflict for msgHashConflict
self.mninfo[2].node.quorum("sign", 100, id, msgHash)
wait_for_sigs(True, False, True, 15)
# Mine one more quorum, so that we have 2 active ones, nothing should change
self.mine_quorum()
assert_sigs_nochange(True, False, True, 3)
# Mine 2 more quorums, so that the one used for the the recovered sig should become inactive, nothing should change
self.mine_quorum()
self.mine_quorum()
assert_sigs_nochange(True, False, True, 3)
# fast forward 6.5 days, recovered sig should still be valid
self.bump_mocktime(int(60 * 60 * 24 * 6.5))
set_node_times(self.nodes, self.mocktime)
# Cleanup starts every 5 seconds
wait_for_sigs(True, False, True, 15)
# fast forward 1 day, recovered sig should not be valid anymore
self.bump_mocktime(int(60 * 60 * 24 * 1))
set_node_times(self.nodes, self.mocktime)
# Cleanup starts every 5 seconds
wait_for_sigs(False, False, False, 15)
for i in range(2):
self.mninfo[i].node.quorum("sign", 100, id, msgHashConflict)
for i in range(2, 5):
self.mninfo[i].node.quorum("sign", 100, id, msgHash)
wait_for_sigs(True, False, True, 15)
if __name__ == '__main__':
LLMQSigningTest().main()
| 38.438776 | 123 | 0.650916 |
acf1dca5c3856b67cf19f73ac21ff0117a3be522 | 3,335 | py | Python | huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/create_grant_request.py | githubmilesma/huaweicloud-sdk-python-v3 | 9d9449ed68a609ca65f0aa50b5b2a1c28445bf03 | [
"Apache-2.0"
] | 1 | 2021-04-16T07:59:28.000Z | 2021-04-16T07:59:28.000Z | huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/create_grant_request.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/create_grant_request.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | 1 | 2022-01-17T02:24:18.000Z | 2022-01-17T02:24:18.000Z | # coding: utf-8
import pprint
import re
import six
class CreateGrantRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'version_id': 'str',
'body': 'CreateGrantRequestBody'
}
attribute_map = {
'version_id': 'version_id',
'body': 'body'
}
def __init__(self, version_id=None, body=None):
"""CreateGrantRequest - a model defined in huaweicloud sdk"""
self._version_id = None
self._body = None
self.discriminator = None
self.version_id = version_id
if body is not None:
self.body = body
@property
def version_id(self):
"""Gets the version_id of this CreateGrantRequest.
:return: The version_id of this CreateGrantRequest.
:rtype: str
"""
return self._version_id
@version_id.setter
def version_id(self, version_id):
"""Sets the version_id of this CreateGrantRequest.
:param version_id: The version_id of this CreateGrantRequest.
:type: str
"""
self._version_id = version_id
@property
def body(self):
"""Gets the body of this CreateGrantRequest.
:return: The body of this CreateGrantRequest.
:rtype: CreateGrantRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateGrantRequest.
:param body: The body of this CreateGrantRequest.
:type: CreateGrantRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateGrantRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.075188 | 74 | 0.546927 |
acf1dd354141506074b5f635f798321e9edf2f47 | 289 | py | Python | server/helper-code/clustering/termslist2json.py | malakhovks/word2cluster | 5d191a6dff1bea9d1e4ad024a004f7a2a4f1f130 | [
"MIT"
] | null | null | null | server/helper-code/clustering/termslist2json.py | malakhovks/word2cluster | 5d191a6dff1bea9d1e4ad024a004f7a2a4f1f130 | [
"MIT"
] | null | null | null | server/helper-code/clustering/termslist2json.py | malakhovks/word2cluster | 5d191a6dff1bea9d1e4ad024a004f7a2a4f1f130 | [
"MIT"
] | null | null | null | import json
import codecs
with codecs.open('./termslist.txt', encoding='UTF-8') as f:
text = f.read()
words = text.split()
print(words)
jsonStr = json.dumps(words)
with open('./termslist.json', 'w', encoding='utf-8') as fout:
json.dump(words, fout, ensure_ascii=False, indent=4) | 24.083333 | 61 | 0.685121 |
acf1dd56eb7660c6ad80f0ba46321abc64c95e3d | 2,602 | py | Python | setup.py | patel26jay/CONFIG | 6cd139415f18df3e6e41f12fa0e38d239f14d6b8 | [
"MIT"
] | null | null | null | setup.py | patel26jay/CONFIG | 6cd139415f18df3e6e41f12fa0e38d239f14d6b8 | [
"MIT"
] | null | null | null | setup.py | patel26jay/CONFIG | 6cd139415f18df3e6e41f12fa0e38d239f14d6b8 | [
"MIT"
] | null | null | null | from setuptools import setup
import os
import re
def find_version(*file_paths):
"""
This pattern was modeled on a method from the Python Packaging User Guide:
https://packaging.python.org/en/latest/single_source_version.html
We read instead of importing so we don't get import errors if our code
imports from dependencies listed in install_requires.
"""
base_module_file = os.path.join(*file_paths)
with open(base_module_file) as f:
base_module_data = f.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
base_module_data, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='netmiko',
version=find_version('netmiko', '__init__.py'),
description='Multi-vendor library to simplify Paramiko SSH connections to network devices',
url='https://github.com/ktbyers/netmiko',
author='Kirk Byers',
author_email='ktbyers@twb-tech.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
packages=['netmiko',
'netmiko/a10',
'netmiko/accedian',
'netmiko/alcatel',
'netmiko/arista',
'netmiko/aruba',
'netmiko/avaya',
'netmiko/brocade',
'netmiko/calix',
'netmiko/ciena',
'netmiko/cisco',
'netmiko/dell',
'netmiko/eltex',
'netmiko/enterasys',
'netmiko/extreme',
'netmiko/f5',
'netmiko/fortinet',
'netmiko/checkpoint',
'netmiko/hp',
'netmiko/huawei',
'netmiko/juniper',
'netmiko/linux',
'netmiko/mellanox',
'netmiko/mrv',
'netmiko/netapp',
'netmiko/ovs',
'netmiko/paloalto',
'netmiko/pluribus',
'netmiko/quanta',
'netmiko/ruckus',
'netmiko/terminal_server',
'netmiko/ubiquiti',
'netmiko/vyos'],
install_requires=['paramiko>=1.13.0', 'scp>=0.10.0', 'pyyaml'],
extras_require={
'test': ['pytest>=2.6.0', ]
},
)
| 32.936709 | 95 | 0.543812 |
acf1ddaf5240c58f792833e3eb49c46f06c049ed | 1,217 | py | Python | examples/search_for_studies_dicom_qr.py | sjoerdk/dicomtrolley | 9cdc769c5add188a2c93aff9c0d9f37429c477ae | [
"Apache-2.0"
] | 2 | 2021-05-11T10:50:08.000Z | 2022-03-22T20:45:06.000Z | examples/search_for_studies_dicom_qr.py | sjoerdk/dicomtrolley | 9cdc769c5add188a2c93aff9c0d9f37429c477ae | [
"Apache-2.0"
] | 5 | 2021-03-18T14:06:08.000Z | 2022-03-29T13:25:04.000Z | examples/search_for_studies_dicom_qr.py | sjoerdk/dicomtrolley | 9cdc769c5add188a2c93aff9c0d9f37429c477ae | [
"Apache-2.0"
] | 3 | 2021-03-15T02:06:37.000Z | 2021-09-27T12:55:39.000Z | """Finding studies with DICOM-QR
This example read the following variables from system environment:
HOST # Server to use for DICOM-QR
PORT # Port to use on host
AET # Application Entity Title - What to call yourself
AEC # Application Entity Called - The name of the server you are calling
Please set these before running this example
"""
from datetime import datetime
from os import environ
from dicomtrolley.dicom_qr import DICOMQR, DICOMQuery, QueryRetrieveLevels
print("Setting up DICOM query-retrieve")
dicom_qr = DICOMQR(
host=environ["HOST"],
port=int(environ["PORT"]),
aet=environ["AET"],
aec=environ["AEC"],
)
print("Perform a search")
studies = dicom_qr.find_studies(
DICOMQuery(
PatientName="BAL*",
ProtocolName="Thorax",
minStudyDate=datetime(year=2015, month=3, day=1),
maxStudyDate=datetime(year=2015, month=4, day=1),
includeFields=[
"PatientBirthDate",
"SOPClassesInStudy",
"Modality",
"StudyDescription",
"SeriesDescription",
"ProtocolName",
],
QueryRetrieveLevel=QueryRetrieveLevels.SERIES,
)
)
print(f"Found {len(studies)} studies")
| 26.456522 | 74 | 0.66968 |
acf1dea76c8178c6bbb317dc610c1367a906a553 | 1,714 | py | Python | src/hank/plans.py | npilon/hank | 6efa17e987d9e70fe71b20d483b9c74f96afca17 | [
"Apache-2.0"
] | null | null | null | src/hank/plans.py | npilon/hank | 6efa17e987d9e70fe71b20d483b9c74f96afca17 | [
"Apache-2.0"
] | 2 | 2021-03-25T21:15:05.000Z | 2021-04-27T23:13:08.000Z | src/hank/plans.py | npilon/hank | 6efa17e987d9e70fe71b20d483b9c74f96afca17 | [
"Apache-2.0"
] | 1 | 2021-03-27T00:05:47.000Z | 2021-03-27T00:05:47.000Z | """Plans are decorated Python callables.
Each is a specification of how to do work.
Plans can be used to create tasks,
or can receive tasks from a dispatcher to execute.
"""
from __future__ import annotations
from typing import Any, Callable, Protocol, runtime_checkable, Union
from .task import Task
@runtime_checkable
class Plan(Protocol):
plan_path: str
def task(*args, **kwargs) -> Task:
raise NotImplementedError
def receive(task: Task):
raise NotImplementedError
def _derive_plan_path(fn: Callable) -> str:
if fn.__module__ != "__main__":
return f"/{fn.__module__}.{fn.__name__}".replace(".", "/")
else:
return f"/{fn.__name__}"
def plan(fn: Callable) -> Callable:
"""A basic plan. Expects to receive a ``Task`` as an argument."""
def _plan(
params: dict[str, Any],
queue: str = None,
store_result: Union[bool, str, None] = False,
):
return Task(
plan=fn.plan_path, params=params, queue=queue, store_result=store_result
)
fn.task = _plan
fn.receive = fn
fn.plan_path = _derive_plan_path(fn)
return fn
def argument_unpacking_plan(fn: Callable) -> Callable:
"""A plan that follows python conventions around argument passing
and returning results."""
def argument_unpacking_task(*args, **kwargs):
return Task(plan=fn.plan_path, params={"args": args, "kwargs": kwargs})
def argument_unpacking_receive(task: Task):
task.worker.store_result(fn(*task.params["args"], **task.params["kwargs"]))
fn.task = argument_unpacking_task
fn.receive = argument_unpacking_receive
fn.plan_path = _derive_plan_path(fn)
return fn
| 25.58209 | 84 | 0.670362 |
acf1df343df202df52e9fe8494df8a86a9e5fede | 194 | py | Python | Chapter 12/ch12_1.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | Chapter 12/ch12_1.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | Chapter 12/ch12_1.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | class Human:
def __init__(self, name, age):
self.name = name
self.age = age
def display(self):
print ("Name : ", name, ", Age: ", age)
e1 = Human("Sandeep", 30)
e1.display()
| 17.636364 | 42 | 0.57732 |
acf1df69e16b1b984ed39b3e9b16efd1eca21ff1 | 490 | py | Python | backend/heroku/app/__init__.py | singapore19/team-5 | c57f2b604702d2e27caa8c8af3a497e299ed7608 | [
"CC-BY-3.0"
] | null | null | null | backend/heroku/app/__init__.py | singapore19/team-5 | c57f2b604702d2e27caa8c8af3a497e299ed7608 | [
"CC-BY-3.0"
] | null | null | null | backend/heroku/app/__init__.py | singapore19/team-5 | c57f2b604702d2e27caa8c8af3a497e299ed7608 | [
"CC-BY-3.0"
] | 1 | 2020-01-07T15:16:42.000Z | 2020-01-07T15:16:42.000Z | from flask import Flask
from dotenv import load_dotenv
from os.path import join, dirname
import logging
app = Flask(__name__)
# the .env file is one directory before this
env_path = join(dirname(__file__), '..', '.env')
load_dotenv(env_path)
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
# import from the package to avoid namespace conflict with app variable
# if you rename the package, there's no need to do this
from app import routes
| 28.823529 | 77 | 0.740816 |
acf1e11e98a33b229ba58c4a6cee1e0223fc2155 | 767 | py | Python | tests/factories.py | pygabo/renegaitor | a2265f6084a898d206e7e396eb51203c935fd5c2 | [
"MIT"
] | null | null | null | tests/factories.py | pygabo/renegaitor | a2265f6084a898d206e7e396eb51203c935fd5c2 | [
"MIT"
] | null | null | null | tests/factories.py | pygabo/renegaitor | a2265f6084a898d206e7e396eb51203c935fd5c2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Factories to help in tests."""
from factory import PostGenerationMethodCall, Sequence
from factory.alchemy import SQLAlchemyModelFactory
from renegaitor.database import db
from renegaitor.user.models import User
class BaseFactory(SQLAlchemyModelFactory):
"""Base factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
"""User factory."""
username = Sequence(lambda n: 'user{0}'.format(n))
email = Sequence(lambda n: 'user{0}@example.com'.format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
"""Factory configuration."""
model = User
| 23.96875 | 66 | 0.680574 |
acf1e281cf92a437a2cb97d2011168f116513e64 | 95,970 | py | Python | tensorflow/python/keras/engine/base_layer.py | dbuades/tensorflow | d18186f2e05cd8ce92351c4647c549f7b52d550f | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/engine/base_layer.py | dbuades/tensorflow | d18186f2e05cd8ce92351c4647c549f7b52d550f | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/engine/base_layer.py | dbuades/tensorflow | d18186f2e05cd8ce92351c4647c549f7b52d550f | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Contains the base Layer class, from which all layers inherit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import inspect # Necessary supplement to tf_inspect to deal with variadic args.
import itertools
import numpy as np
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.core.framework import node_def_pb2
from tensorflow.python import autograph
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import values as distribute_values
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import function
from tensorflow.python.framework import auto_control_deps
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.mixed_precision.experimental import autocast_variable
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
# A module that only depends on `keras.layers` import these from here.
from tensorflow.python.keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import
from tensorflow.python.keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils
from tensorflow.python.training.tracking import object_identity
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
# Prefix that is added to the TF op layer names.
_TF_OP_LAYER_NAME_PREFIX = 'tf_op_layer_'
@keras_export('keras.layers.Layer')
class Layer(module.Module):
"""Base layer class.
This is the class from which all layers inherit.
A layer is a class implementing common neural networks operations, such
as convolution, batch norm, etc. These operations require managing weights,
losses, updates, and inter-layer connectivity.
Users will just instantiate a layer and then treat it as a callable.
We recommend that descendants of `Layer` implement the following methods:
* `__init__()`: Save configuration in member variables
* `build()`: Called once from `__call__`, when we know the shapes of inputs
and `dtype`. Should have the calls to `add_weight()`, and then
call the super's `build()` (which sets `self.built = True`, which is
nice in case the user wants to call `build()` manually before the
first `__call__`).
* `call()`: Called in `__call__` after making sure `build()` has been called
once. Should actually perform the logic of applying the layer to the
input tensors (which should be passed in as the first argument).
Arguments:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
dynamic: Set this to `True` if your layer should only be run eagerly, and
should not be used to generate a static computation graph.
This would be the case for a Tree-RNN or a recursive network,
for example, or generally for any layer that manipulates tensors
using Python control flow. If `False`, we assume that the layer can
safely be used to generate a static computation graph.
Read-only properties:
name: The name of the layer (string).
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
updates: List of update ops of this layer.
losses: List of losses added by this layer.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
Mutable properties:
trainable: Whether the layer should be trained (boolean).
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
"""
# See tf.Module for the usage of this property.
# The key for _obj_reference_counts_dict is a Trackable, which could be a
# variable or layer etc. tf.Module._flatten will fail to flatten the key
# since it is trying to convert Trackable to a string. This attribute can be
# ignored even after the fix of nest lib, since the trackable object should
# already been available as individual attributes. _obj_reference_counts_dict
# just contains a copy of them.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_obj_reference_counts_dict',),
module.Module._TF_MODULE_IGNORED_PROPERTIES
))
@trackable.no_automatic_dependency_tracking
def __init__(self, trainable=True, name=None, dtype=None, dynamic=False,
**kwargs):
# These properties should be set by the user via keyword arguments.
# note that 'dtype', 'input_shape' and 'batch_input_shape'
# are only applicable to input layers: do not pass these keywords
# to non-input layers.
allowed_kwargs = {
'input_shape',
'batch_input_shape',
'batch_size',
'weights',
'activity_regularizer',
}
# Validate optional keyword arguments.
generic_utils.validate_kwargs(kwargs, allowed_kwargs)
# Mutable properties
# Indicates whether the layer's weights are updated during training
# and whether the layer's updates are run during training.
self._trainable = trainable
# A stateful layer is a layer whose updates are run during inference too,
# for instance stateful RNNs.
self.stateful = False
# Indicates whether `build` needs to be called upon layer call, to create
# the layer's weights.
self.built = False
# Provides information about which inputs are compatible with the layer.
self.input_spec = None
self.supports_masking = False
self._init_set_name(name)
self._activity_regularizer = kwargs.pop('activity_regularizer', None)
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
self._updates = []
# A list of zero-argument lambdas which return Tensors, used for variable
# regularizers.
self._callable_losses = []
# A list of symbolic Tensors containing activity regularizers and losses
# manually added through `add_loss` in graph-building mode.
self._losses = []
# A list of loss values containing activity regularizers and losses
# manually added through `add_loss` during eager execution. It is cleared
# after every batch.
# Because we plan on eventually allowing a same model instance to be trained
# in eager mode or graph mode alternatively, we need to keep track of
# eager losses and symbolic losses via separate attributes.
self._eager_losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
# TODO(psv): Remove this property.
# A dictionary that maps metric names to metric result tensors. The results
# are the running averages of metric values over an epoch.
self._metrics_tensors = {}
self._set_dtype_and_policy(dtype)
self._call_convention = (base_layer_utils
.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
# Dependencies tracked via attribute assignment.
self._maybe_create_attribute('_layers', [])
# These lists will be filled via successive calls
# to self._add_inbound_node().
self._inbound_nodes = []
self._outbound_nodes = []
call_argspec = tf_inspect.getfullargspec(self.call)
self._expects_training_arg = 'training' in call_argspec.args
# Whether the `call` method can be used to build a TF graph without issues.
self._dynamic = dynamic
# Manage input shape information if passed.
if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:
# In this case we will later create an input layer
# to insert before the current layer
if 'batch_input_shape' in kwargs:
batch_input_shape = tuple(kwargs['batch_input_shape'])
elif 'input_shape' in kwargs:
if 'batch_size' in kwargs:
batch_size = kwargs['batch_size']
else:
batch_size = None
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
self._batch_input_shape = batch_input_shape
# Manage initial weight values if passed.
if 'weights' in kwargs:
self._initial_weights = kwargs['weights']
else:
self._initial_weights = None
def build(self, input_shape):
"""Creates the variables of the layer (optional, for subclass implementers).
This is a method that implementers of subclasses of `Layer` or `Model`
can override if they need a state-creation step in-between
layer instantiation and layer call.
This is typically used to create the weights of `Layer` subclasses.
Arguments:
input_shape: Instance of `TensorShape`, or list of instances of
`TensorShape` if the layer expects a list of inputs
(one instance per input).
"""
self.built = True
@doc_controls.for_subclass_implementers
def call(self, inputs, **kwargs): # pylint: disable=unused-argument
"""This is where the layer's logic lives.
Arguments:
inputs: Input tensor, or list/tuple of input tensors.
**kwargs: Additional keyword arguments.
Returns:
A tensor or list/tuple of tensors.
"""
return inputs
@doc_controls.for_subclass_implementers
def add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
partitioner=None,
use_resource=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
**kwargs):
"""Adds a new variable to the layer.
Arguments:
name: Variable name.
shape: Variable shape. Defaults to scalar if unspecified.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: initializer instance (callable).
regularizer: regularizer instance (callable).
trainable: whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
marked as non-trainable. `trainable` defaults to `True` unless
`synchronization` is set to `ON_READ`.
constraint: constraint instance (callable).
partitioner: Partitioner to be passed to the `Trackable` API.
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
**kwargs: Additional keyword arguments. Accepted values are `getter` and
`collections`.
Returns:
The created variable. Usually either a `Variable` or `ResourceVariable`
instance. If `partitioner` is not `None`, a `PartitionedVariable`
instance is returned.
Raises:
RuntimeError: If called with partioned variable regularization and
eager execution is enabled.
ValueError: When giving unsupported dtype and no initializer or when
trainable has been set to True with synchronization set as `ON_READ`.
"""
if shape is None:
shape = ()
# Validate optional keyword arguments.
for kwarg in kwargs:
if kwarg not in ['getter', 'collections', 'experimental_autocast']:
raise TypeError('Unknown keyword argument:', kwarg)
getter = kwargs.pop('getter', None)
collections_arg = kwargs.pop('collections', None)
# 'experimental_autocast' can be set to False by the caller to indicate an
# AutoCastVariable should never be created.
autocast = kwargs.pop('experimental_autocast', True)
if dtype is None:
dtype = self.dtype or backend.floatx()
dtype = dtypes.as_dtype(dtype)
if self._dtype is None:
self._dtype = dtype.base_dtype.name
initializer = initializers.get(initializer)
regularizer = regularizers.get(regularizer)
constraint = constraints.get(constraint)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
# Initialize variable when no initializer provided
if initializer is None:
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = initializers.glorot_uniform()
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = initializers.zeros()
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError('An initializer for variable %s of type %s is required'
' for layer %s' % (name, dtype.base_dtype, self.name))
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
# TODO(allenl): a `make_variable` equivalent should be added as a
# `Trackable` method.
getter=getter or base_layer_utils.make_variable,
# Manage errors in Layer rather than Trackable.
overwrite=True,
initializer=initializer,
dtype=dtype,
constraint=constraint,
trainable=trainable,
partitioner=partitioner,
use_resource=use_resource,
collections=collections_arg,
synchronization=synchronization,
aggregation=aggregation)
backend.track_variable(variable)
if autocast and self._mixed_precision_policy.should_cast_variables:
if isinstance(variable, distribute_values.DistributedVariable):
variable = autocast_variable.AutoCastDistributedVariable(variable)
else:
variable = autocast_variable.AutoCastVariable(variable)
if regularizer is not None:
# TODO(fchollet): in the future, this should be handled at the
# level of variable creation, and weight regularization losses
# should be variable attributes.
name_in_scope = variable.name[:variable.name.find(':')]
self._handle_weight_regularization(name_in_scope,
variable,
regularizer)
if trainable:
self._trainable_weights.append(variable)
else:
self._non_trainable_weights.append(variable)
return variable
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable)
containing the configuration of a layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
The config of a layer does not include connectivity
information, nor the layer class name. These are handled
by `Network` (one layer of abstraction above).
Returns:
Python dictionary.
"""
config = {'name': self.name, 'trainable': self.trainable}
if hasattr(self, '_batch_input_shape'):
config['batch_input_shape'] = self._batch_input_shape
if hasattr(self, 'dtype'):
config['dtype'] = self.dtype
# TODO(reedwm): Handle serializing self._mixed_precision_policy.
return config
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same layer from the config
dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`).
Arguments:
config: A Python dictionary, typically the
output of get_config.
Returns:
A layer instance.
"""
return cls(**config)
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
Assumes that the layer will be built
to match that input shape provided.
Arguments:
input_shape: Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.
Returns:
An input shape tuple.
"""
if context.executing_eagerly():
# In this case we build the model first in order to do shape inference.
# This is acceptable because the framework only calls
# `compute_output_shape` on shape values that the layer would later be
# built for. It would however cause issues in case a user attempts to
# use `compute_output_shape` manually (these users will have to
# implement `compute_output_shape` themselves).
self.build(input_shape)
with context.graph_mode():
graph = func_graph.FuncGraph('graph')
with graph.as_default():
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
inputs = nest.map_structure(
base_layer_utils.generate_placeholders_from_shape, input_shape)
try:
if self._expects_training_arg:
outputs = self(inputs, training=False)
else:
outputs = self(inputs)
except TypeError:
raise NotImplementedError('We could not automatically infer '
'the static shape of the layer\'s output.'
' Please implement the '
'`compute_output_shape` method on your '
'layer (%s).' % self.__class__.__name__)
return nest.map_structure(lambda t: t.shape, outputs)
raise NotImplementedError
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
"""Computes an output mask tensor.
Arguments:
inputs: Tensor or list of tensors.
mask: Tensor or list of tensors.
Returns:
None or a tensor (or list of tensors,
one per output tensor of the layer).
"""
if not self.supports_masking:
if any(m is not None for m in nest.flatten(mask)):
raise TypeError('Layer ' + self.name + ' does not support masking, '
'but was passed an input_mask: ' + str(mask))
# masking not explicitly supported: return None as mask.
return None
# if masking is explicitly supported, by default
# carry over the input mask
return mask
def __call__(self, inputs, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Arguments:
inputs: input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
Note:
- The following optional keyword arguments are reserved for specific uses:
* `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
* `mask`: Boolean input mask.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
"""
input_list = nest.flatten(inputs)
# Accept NumPy inputs by converting to Tensors.
if any(isinstance(x, (np.ndarray, float, int)) for x in input_list):
# Don't call `ops.convert_to_tensor` on all `inputs` because
# `SparseTensors` can't be converted to `Tensor`.
def _convert_non_tensor(x):
if isinstance(x, (np.ndarray, float, int)):
return ops.convert_to_tensor(x)
return x
inputs = nest.map_structure(_convert_non_tensor, inputs)
input_list = nest.flatten(inputs)
# We will attempt to build a TF graph if & only if all inputs are symbolic.
# This is always the case in graph mode. It can also be the case in eager
# mode when all inputs can be traced back to `keras.Input()` (when building
# models using the functional API).
build_graph = tf_utils.are_all_symbolic_tensors(input_list)
if build_graph:
# Only create Keras history if at least one tensor originates from a
# `keras.Input`. Otherwise this Layer may be being used outside the Keras
# framework.
if base_layer_utils.needs_keras_history(inputs):
base_layer_utils.create_keras_history(inputs)
# Handle Keras mask propagation from previous layer to current layer.
previous_mask = None
if self._should_compute_mask:
previous_mask = base_layer_utils.collect_previous_mask(inputs)
if ('mask' in self._call_fn_args and 'mask' not in kwargs and
not generic_utils.is_all_none(previous_mask)):
# The previous layer generated a mask, and mask was not explicitly
# pass to __call__, hence we set previous_mask as the default value.
kwargs['mask'] = previous_mask
# Clear eager losses on top level model call.
# We are clearing the losses only on the top level model call and not on
# every layer/mode call because layer/model may be reused.
if (base_layer_utils.is_in_eager_or_tf_function() and
not base_layer_utils.call_context().in_call):
self._clear_losses()
with base_layer_utils.call_context().enter(self, inputs, build_graph):
# Check input assumptions set after layer building, e.g. input shape.
if build_graph:
# Symbolic execution on symbolic tensors. We will attempt to build
# the corresponding TF subgraph inside `backend.get_graph()`
input_spec.assert_input_compatibility(self.input_spec, inputs,
self.name)
graph = backend.get_graph()
with graph.as_default(), backend.name_scope(self._name_scope()):
# Build layer if applicable (if the `build` method has been
# overridden).
self._maybe_build(inputs)
# Wrapping `call` function in autograph to allow for dynamic control
# dependencies in call. We are limiting this to subclassed layers as
# autograph is strictly needed only for subclassed layers.
# As an additional optimizatio, we avoid calling autograph if the
# function is already converted or marked for no conversion. The
# effect is largely cosmetic - it avoid four extra frames in the call
# stack.
if (base_layer_utils.is_subclassed(self)
and not hasattr(self.call, '__ag_compiled')):
decorators, original_func = tf_decorator.unwrap(self.call)
converted_func = autograph.convert(recursive=True)(original_func)
if decorators:
call_fn = tf_decorator.rewrap(self.call, original_func,
converted_func)
else:
call_fn = converted_func
else:
call_fn = self.call
# Explicitly pass the learning phase placeholder to `call` if
# the `training` argument was left unspecified by the user.
# This behavior is restricted to the managed Keras FuncGraph.
# TODO(omalleyt): Reconcile this with new `trainable` behavior
# when available.
learning_phase_passed_by_framework = False
if (self._expects_training_arg and
not base_layer_utils.training_arg_passed_to_call(
tf_inspect.getfullargspec(self.call), args, kwargs) and
base_layer_utils.is_in_keras_graph()):
learning_phase_passed_by_framework = True
kwargs['training'] = backend.learning_phase()
if not self.dynamic:
try:
with base_layer_utils.autocast_context_manager(
input_list,
self._mixed_precision_policy.should_cast_variables):
# Add auto_control_deps in V2 when they are not already added by
# a `tf.function`.
if (ops.executing_eagerly_outside_functions() and
not base_layer_utils.is_in_eager_or_tf_function()):
with auto_control_deps.AutomaticControlDependencies() as acd:
outputs = call_fn(inputs, *args, **kwargs)
# Wrap Tensors in `outputs` in `tf.identity` to avoid
# circular dependencies.
outputs = base_layer_utils.mark_as_return(outputs, acd)
else:
outputs = call_fn(inputs, *args, **kwargs)
except TypeError as e:
exception_str = str(e)
exception_msg = 'Tensor objects are only iterable when eager'
if exception_msg in exception_str:
raise TypeError('You are attempting to use Python control '
'flow in a layer that was not declared to be '
'dynamic. Pass `dynamic=True` to the class '
'constructor.\nEncountered error:\n"""\n' +
exception_str + '\n"""')
raise
else:
# We will use static shape inference to return symbolic tensors
# matching the specifications of the layer outputs.
# Since `self.dynamic` is True, we will never attempt to
# run the underlying TF graph (which is disconnected).
# TODO(fchollet): consider py_func as an alternative, which
# would enable us to run the underlying graph if needed.
outputs = self._symbolic_call(inputs)
if outputs is None:
raise ValueError('A layer\'s `call` method should return a '
'Tensor or a list of Tensors, not None '
'(layer: ' + self.name + ').')
if base_layer_utils.have_all_keras_metadata(inputs):
if learning_phase_passed_by_framework:
kwargs.pop('training')
inputs, outputs = self._set_connectivity_metadata_(
inputs, outputs, args, kwargs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, previous_mask)
if hasattr(self, '_set_inputs') and not self.inputs:
# Subclassed network: explicitly set metadata normally set by
# a call to self._set_inputs().
# TODO(b/120997007): This should be done in Eager as well, but
# causes garbage collection issues because of the placeholders
# created on the default Keras graph.
self._set_inputs(inputs, outputs)
else:
# Eager execution on data tensors.
with backend.name_scope(self._name_scope()):
self._maybe_build(inputs)
with base_layer_utils.autocast_context_manager(
input_list, self._mixed_precision_policy.should_cast_variables):
outputs = self.call(inputs, *args, **kwargs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, previous_mask)
return outputs
@property
def dtype(self):
return self._dtype
@property
def name(self):
return self._name
@property
def dynamic(self):
return self._dynamic
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = value
for layer in getattr(self, '_layers', []):
layer.trainable = value
@property
def activity_regularizer(self):
"""Optional regularizer function for the output of this layer."""
return self._activity_regularizer
@activity_regularizer.setter
def activity_regularizer(self, regularizer):
"""Optional regularizer function for the output of this layer."""
self._activity_regularizer = regularizer
@property
def trainable_weights(self):
if self.trainable:
nested = self._gather_children_attribute('trainable_weights')
return self._trainable_weights + nested
else:
return []
@property
def non_trainable_weights(self):
if self.trainable:
nested = self._gather_children_attribute('non_trainable_weights')
return self._non_trainable_weights + nested
else:
nested = self._gather_children_attribute('weights')
return self._trainable_weights + self._non_trainable_weights + nested
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.trainable_weights + self.non_trainable_weights
@property
def updates(self):
if not self.trainable and not self.stateful:
return []
with backend.get_graph().as_default():
updates = []
for u in self._updates:
# Filter out updates created in a cross-replica context when in a
# replica context and vice versa.
if (getattr(u, '_in_cross_replica_context', False) !=
ds_context.in_cross_replica_context()):
continue
if callable(u):
try:
u = u()
except ValueError as e:
if 'Trying to capture a tensor from an inner function' in str(e):
base_layer_utils.check_graph_consistency(
method='add_update', force_raise=True)
raise
base_layer_utils.check_graph_consistency(u, method='add_update')
updates.append(u)
return updates + self._gather_children_attribute('updates')
@property
def losses(self):
"""Losses which are associated with this `Layer`.
Variable regularization tensors are created when this property is accessed,
so it is eager safe: accessing `losses` under a `tf.GradientTape` will
propagate gradients back to the corresponding variables.
Returns:
A list of tensors.
"""
collected_losses = []
# If any eager losses are present, we assume the model to be part of an
# eager training loop (either a custom one or the one used when
# `run_eagerly=True`), and so we always return just the eager losses in that
# case.
if self._eager_losses:
collected_losses.extend(self._eager_losses)
else:
collected_losses.extend(self._losses)
for regularizer in self._callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
collected_losses.append(loss_tensor)
return collected_losses + self._gather_children_attribute('losses')
@doc_controls.for_subclass_implementers
def add_loss(self, losses, inputs=None):
"""Add loss tensor(s), potentially dependent on layer inputs.
Some losses (for instance, activity regularization losses) may be dependent
on the inputs passed when calling a layer. Hence, when reusing the same
layer on different inputs `a` and `b`, some entries in `layer.losses` may
be dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
This method can be used inside a subclassed layer or model's `call`
function, in which case `losses` should be a Tensor or list of Tensors.
Example:
```python
class MyLayer(tf.keras.layers.Layer):
def call(inputs, self):
self.add_loss(tf.abs(tf.reduce_mean(inputs)), inputs=True)
return inputs
```
This method can also be called directly on a Functional Model during
construction. In this case, any loss Tensors passed to this Model must
be symbolic and be able to be traced back to the model's `Input`s. These
losses become part of the model's topology and are tracked in `get_config`.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Actvity regularization.
model.add_loss(tf.abs(tf.reduce_mean(x)))
```
If this is not the case for your loss (if, for example, your loss references
a `Variable` of one of the model's layers), you can wrap your loss in a
zero-argument lambda. These losses are not tracked as part of the model's
topology since they can't be serialized.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Weight regularization.
model.add_loss(lambda: tf.reduce_mean(x.kernel))
```
The `get_losses_for` method allows to retrieve the losses relevant to a
specific set of inputs.
Arguments:
losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses
may also be zero-argument callables which create a loss tensor.
inputs: Ignored when executing eagerly. If anything other than None is
passed, it signals the losses are conditional on some of the layer's
inputs, and thus they should only be run where these inputs are
available. This is the case for activity regularization losses, for
instance. If `None` is passed, the losses are assumed
to be unconditional, and will apply across all dataflows of the layer
(e.g. weight regularization losses).
"""
def _tag_unconditional(loss):
if callable(loss):
loss = loss()
if loss is None:
return None # Will be filtered out when computing the .losses property
if not tensor_util.is_tensor(loss):
loss = ops.convert_to_tensor(loss, dtype=backend.floatx())
loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access
return loss
losses = nest.flatten(losses)
callable_losses = []
eager_losses = []
symbolic_losses = []
for loss in losses:
if callable(loss):
callable_losses.append(functools.partial(_tag_unconditional, loss))
continue
if loss is None:
continue
if not tensor_util.is_tensor(loss):
loss = ops.convert_to_tensor(loss, dtype=backend.floatx())
# TF Functions should take the eager path.
if (tf_utils.is_symbolic_tensor(loss) and
not base_layer_utils.is_in_tf_function()):
symbolic_losses.append(_tag_unconditional(loss))
base_layer_utils.check_graph_consistency(loss, method='add_loss')
elif tensor_util.is_tensor(loss):
eager_losses.append(_tag_unconditional(loss))
self._callable_losses += callable_losses
in_call_context = base_layer_utils.call_context().in_call
if eager_losses and not in_call_context:
raise ValueError(
'Expected a symbolic Tensors or a callable for the loss value. '
'Please wrap your loss computation in a zero argument `lambda`.')
self._eager_losses += eager_losses
if in_call_context:
for symbolic_loss in symbolic_losses:
self._losses.append(symbolic_loss)
else:
for symbolic_loss in symbolic_losses:
if getattr(self, '_is_graph_network', False):
new_layers = base_layer_utils.create_keras_history(symbolic_loss)
# Losses must be keyed on inputs no matter what in order to
# be supported in DistributionStrategy.
add_loss_layer = AddLoss(unconditional=False)
add_loss_layer(symbolic_loss)
new_layers.append(add_loss_layer)
self._insert_layers(new_layers)
else:
# Possible a loss was added in a Layer's `build`.
self._losses.append(symbolic_loss)
@trackable.no_automatic_dependency_tracking
def _clear_losses(self):
"""Used every step in eager to reset losses."""
self._eager_losses = []
if hasattr(self, '_layers'):
for layer in trackable_layer_utils.filter_empty_layer_containers(
self._layers):
layer._clear_losses()
@property
def metrics(self):
return self._metrics + self._gather_children_attribute('metrics')
@doc_controls.for_subclass_implementers
def add_metric(self, value, aggregation=None, name=None):
"""Adds metric tensor to the layer.
Args:
value: Metric tensor.
aggregation: Sample-wise metric reduction function. If `aggregation=None`,
it indicates that the metric tensor provided has been aggregated
already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by
`model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the
given metric tensor will be sample-wise reduced using `mean` function.
eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean',
aggregation='mean')`.
name: String metric name.
Raises:
ValueError: If `aggregation` is anything other than None or `mean`.
"""
if aggregation is not None and aggregation != 'mean':
raise ValueError(
'We currently support only `mean` sample-wise metric aggregation. '
'You provided aggregation=`%s`' % aggregation)
from_metric_obj = hasattr(value, '_metric_obj')
is_symbolic = tf_utils.is_symbolic_tensor(value)
in_call_context = base_layer_utils.call_context().in_call
if name is None and not from_metric_obj:
# Eg. `self.add_metric(math_ops.reduce_sum(x), aggregation='mean')`
# In eager mode, we use metric name to lookup a metric. Without a name,
# a new Mean metric wrapper will be created on every model/layer call.
# So, we raise an error when no name is provided.
# We will do the same for symbolic mode for consistency although a name
# will be generated if no name is provided.
# We will not raise this error in the foll use case for the sake of
# consistency as name in provided in the metric constructor.
# mean = metrics.Mean(name='my_metric')
# model.add_metric(mean(outputs))
raise ValueError('Please provide a name for your metric like '
'`self.add_metric(tf.reduce_sum(inputs), '
'name=\'mean_activation\', aggregation=\'mean\')`')
if in_call_context:
# TF Function path should take the eager path.
if is_symbolic and not base_layer_utils.is_in_tf_function():
self._symbolic_add_metric(value, aggregation, name)
else:
self._eager_add_metric(value, aggregation, name)
else:
if not is_symbolic:
raise ValueError('Expected a symbolic Tensor for the metric value, '
'received: ' + str(value))
# Possible a metric was added in a Layer's `build`.
if not getattr(self, '_is_graph_network', False):
with backend.get_graph().as_default():
self._symbolic_add_metric(value, aggregation, name)
return
if from_metric_obj:
raise ValueError('Using the result of calling a `Metric` object '
'when calling `add_metric` on a Functional '
'Model is not supported. Please pass the '
'Tensor to monitor directly.')
# Insert layers into the Keras Graph Network.
new_layers = base_layer_utils.create_keras_history(value)
add_metric_layer = AddMetric(aggregation, name)
add_metric_layer(value)
new_layers.append(add_metric_layer)
self._insert_layers(new_layers)
@deprecation.deprecated_args(None, '`inputs` is now automatically inferred',
'inputs')
@doc_controls.for_subclass_implementers
def add_update(self, updates, inputs=None):
"""Add update op(s), potentially dependent on layer inputs.
Weight updates (for instance, the updates of the moving mean and variance
in a BatchNormalization layer) may be dependent on the inputs passed
when calling a layer. Hence, when reusing the same layer on
different inputs `a` and `b`, some entries in `layer.updates` may be
dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
The `get_updates_for` method allows to retrieve the updates relevant to a
specific set of inputs.
This call is ignored when eager execution is enabled (in that case, variable
updates are run on the fly and thus do not need to be tracked for later
execution).
Arguments:
updates: Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting `trainable=False`
on this Layer, when executing in Eager mode.
inputs: If anything other than None is passed, it signals the updates
are conditional on some of the layer's inputs,
and thus they should only be run where these inputs are available.
This is the case for BatchNormalization updates, for instance.
If None, the updates will be taken into account unconditionally,
and you are responsible for making sure that any dependency they might
have is available at runtime.
A step counter might fall into this category.
"""
updates = generic_utils.to_list(updates)
call_context = base_layer_utils.call_context()
# All updates can be run immediately in Eager or in a tf.function.
if base_layer_utils.is_in_eager_or_tf_function():
if not call_context.frozen:
for update in updates:
if callable(update):
update()
return
if call_context.in_call:
relevant_inputs = call_context.inputs
else:
inbound_nodes = getattr(self, '_inbound_nodes', [])
relevant_inputs = [node.input_tensors for node in inbound_nodes]
def process_update(x):
"""Standardize update ops.
Arguments:
x: Tensor, op, or callable.
Returns:
An update op.
"""
if callable(x):
update = lambda: process_update(x())
if not ops.executing_eagerly_outside_functions():
# In V1 mode, call the callable right away and process. This is needed
# for TPU strategy.
return update()
elif isinstance(x, ops.Operation):
update = x
elif hasattr(x, 'op'):
update = x.op
else:
update = ops.convert_to_tensor(x)
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])
update._unconditional_update = update not in reachable
update._in_cross_replica_context = (
ds_context.has_strategy() and ds_context.in_cross_replica_context())
return update
updates = [process_update(x) for x in updates]
# Non-callable Updates are run automatically inside `call` in V2, so
# they do not need to be tracked later.
if ops.executing_eagerly_outside_functions() and call_context.in_call:
updates = [u for u in updates if callable(u)]
self._updates += updates
def set_weights(self, weights):
"""Sets the weights of the layer, from Numpy arrays.
Arguments:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError('You called `set_weights(weights)` on layer "' +
self.name + '" with a weight list of length ' +
str(len(weights)) + ', but the layer was expecting ' +
str(len(params)) + ' weights. Provided weights: ' +
str(weights)[:50] + '...')
if not params:
return
weight_value_tuples = []
for p, w in zip(params, weights):
ref_shape = p.shape
if not ref_shape.is_compatible_with(w.shape):
raise ValueError('Layer weight shape ' + str(ref_shape) +
' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
backend.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current weights of the layer.
Returns:
Weights values as a list of numpy arrays.
"""
params = self.weights
return backend.batch_get_value(params)
def get_updates_for(self, inputs):
"""Retrieves updates relevant to a specific set of inputs.
Arguments:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of update ops of the layer that depend on `inputs`.
"""
if inputs is None:
# Requesting unconditional updates.
return [u for u in self.updates if u._unconditional_update]
# Requesting input-conditional updates.
updates = [u for u in self.updates if not u._unconditional_update]
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, updates)
return [u for u in updates if u in reachable]
def get_losses_for(self, inputs):
"""Retrieves losses relevant to a specific set of inputs.
Arguments:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of loss tensors of the layer that depend on `inputs`.
"""
if inputs is None:
# Requesting unconditional losses.
return [l for l in self.losses if l._unconditional_loss]
# Requesting input-conditional losses.
losses = [l for l in self.losses if not l._unconditional_loss]
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, losses)
return [l for l in losses if l in reachable]
def get_input_mask_at(self, node_index):
"""Retrieves the input mask tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple inputs).
"""
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
def get_output_mask_at(self, node_index):
"""Retrieves the output mask tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple outputs).
"""
output = self.get_output_at(node_index)
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@property
def input_mask(self):
"""Retrieves the input mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Input mask tensor (potentially None) or list of input
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
inputs = self.input
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@property
def output_mask(self):
"""Retrieves the output mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Output mask tensor (potentially None) or list of output
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
output = self.output
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
def get_input_shape_at(self, node_index):
"""Retrieves the input shape(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_shapes',
'input shape')
def get_output_shape_at(self, node_index):
"""Retrieves the output shape(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_shapes',
'output shape')
def get_input_at(self, node_index):
"""Retrieves the input tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A tensor (or list of tensors if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_tensors',
'input')
def get_output_at(self, node_index):
"""Retrieves the output tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A tensor (or list of tensors if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_tensors',
'output')
@property
def input(self):
"""Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name +
' is not connected, no input to return.')
return self._get_node_attribute_at_index(0, 'input_tensors', 'input')
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only applicable if the layer has exactly one output,
i.e. if it is connected to one incoming layer.
Returns:
Output tensor or list of output tensors.
Raises:
AttributeError: if the layer is connected to more than one incoming
layers.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')
return self._get_node_attribute_at_index(0, 'output_tensors', 'output')
@property
def input_shape(self):
"""Retrieves the input shape(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer, or if all inputs
have the same shape.
Returns:
Input shape, as an integer shape tuple
(or list of shape tuples, one tuple per input tensor).
Raises:
AttributeError: if the layer has no defined input_shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined input shape.')
all_input_shapes = set(
[str(node.input_shapes) for node in self._inbound_nodes])
if len(all_input_shapes) == 1:
return self._inbound_nodes[0].input_shapes
else:
raise AttributeError('The layer "' + str(self.name) +
' has multiple inbound nodes, '
'with different input shapes. Hence '
'the notion of "input shape" is '
'ill-defined for the layer. '
'Use `get_input_shape_at(node_index)` '
'instead.')
def count_params(self):
"""Count the total number of scalars composing the weights.
Returns:
An integer count.
Raises:
ValueError: if the layer isn't yet built
(in which case its weights aren't yet defined).
"""
if not self.built:
if self.__class__.__name__ == 'Sequential':
with tf_utils.maybe_init_scope(self):
self.build() # pylint: disable=no-value-for-parameter
else:
raise ValueError('You tried to call `count_params` on ' + self.name +
', but the layer isn\'t built. '
'You can build it manually via: `' + self.name +
'.build(batch_input_shape)`.')
return int(sum(np.prod(w.shape.as_list()) for w in self.weights))
@property
def output_shape(self):
"""Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined output shape.')
all_output_shapes = set(
[str(node.output_shapes) for node in self._inbound_nodes])
if len(all_output_shapes) == 1:
return self._inbound_nodes[0].output_shapes
else:
raise AttributeError('The layer "%s"'
' has multiple inbound nodes, '
'with different output shapes. Hence '
'the notion of "output shape" is '
'ill-defined for the layer. '
'Use `get_output_shape_at(node_index)` '
'instead.' % self.name)
@property
@doc_controls.do_not_doc_inheritable
def inbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._inbound_nodes
@property
@doc_controls.do_not_doc_inheritable
def outbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._outbound_nodes
##############################################################################
# Methods & attributes below are public aliases of other methods. #
##############################################################################
def apply(self, inputs, *args, **kwargs):
"""Apply the layer on a input.
This is an alias of `self.__call__`.
Arguments:
inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
return self.__call__(inputs, *args, **kwargs)
@doc_controls.for_subclass_implementers
def add_variable(self, *args, **kwargs):
"""Alias for `add_weight`."""
return self.add_weight(*args, **kwargs)
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Alias of `self.weights`.
Returns:
A list of variables.
"""
return self.weights
@property
def trainable_variables(self):
return self.trainable_weights
@property
def non_trainable_variables(self):
return self.non_trainable_weights
##############################################################################
# Methods & attributes below are all private and only used by the framework. #
##############################################################################
def _set_dtype_and_policy(self, dtype):
"""Sets self._dtype and self._mixed_precision_policy."""
if dtype:
if isinstance(dtype, policy.Policy):
self._mixed_precision_policy = dtype
self._dtype = self._mixed_precision_policy.default_variable_dtype
else:
# If a non-policy dtype is passed, no casting should be done. So we use
# the "infer" policy, which does no casting.
self._mixed_precision_policy = policy.Policy('infer')
self._dtype = dtypes.as_dtype(dtype).name
else:
self._mixed_precision_policy = policy.global_policy()
# If the global policy has not been set, it will be an "infer" policy
# without a default variable dtype, and so self._dtype will be None. In
# that case, self._dtype will be set when the layer is built or called.
self._dtype = self._mixed_precision_policy.default_variable_dtype
def _name_scope(self):
return self.name
def _init_set_name(self, name, zero_based=True):
if not name:
self._name = backend.unique_object_name(
generic_utils.to_snake_case(self.__class__.__name__),
zero_based=zero_based)
else:
self._name = name
def _get_existing_metric(self, name=None):
match = [m for m in self._metrics if m.name == name]
if not match:
return
if len(match) > 1:
raise ValueError(
'Please provide different names for the metrics you have added. '
'We found {} metrics with the name: "{}"'.format(len(match), name))
return match[0]
def _eager_add_metric(self, value, aggregation=None, name=None):
# If the given metric is available in `metrics` list we just update state
# on it, otherwise we create a new metric instance and
# add it to the `metrics` list.
metric_obj = getattr(value, '_metric_obj', None)
if metric_obj:
name = metric_obj.name
match = self._get_existing_metric(name)
if match:
# Tensors that come from a Metric object already updated the Metric state.
if not metric_obj:
match(value)
return
if not metric_obj:
assert aggregation is not None
metric_obj, _ = base_layer_utils.create_mean_metric(value, name)
self._metrics.append(metric_obj)
def _symbolic_add_metric(self, value, aggregation=None, name=None):
base_layer_utils.check_graph_consistency(value, method='add_metric')
match = self._get_existing_metric(name)
if aggregation is None:
# Iterate over the metrics and check if the given metric exists already.
# This can happen when a metric instance is created in subclassed model
# layer `__init__` and we have tracked that instance already in
# model.__setattr__.
if match:
result_tensor = value
metric_obj = match
elif hasattr(value, '_metric_obj'):
# We track the instance using the metadata on the result tensor.
result_tensor = value
metric_obj = result_tensor._metric_obj
self._metrics.append(metric_obj)
else:
raise ValueError(
'We do not support adding an aggregated metric result tensor that '
'is not the output of a `tf.keras.metrics.Metric` metric instance. '
'Without having access to the metric instance we cannot reset the '
'state of a metric after every epoch during training. You can '
'create a `tf.keras.metrics.Metric` instance and pass the result '
'here or pass an un-aggregated result with `aggregation` parameter '
'set as `mean`. For example: `self.add_metric(tf.reduce_sum(inputs)'
', name=\'mean_activation\', aggregation=\'mean\')`')
else:
# If a non-aggregated tensor is given as input (ie. `aggregation` is
# explicitly set to `mean`), we wrap the tensor in `Mean` metric.
if match:
result_tensor = match(value)
metric_obj = match
else:
metric_obj, result_tensor = base_layer_utils.create_mean_metric(
value, name)
self._metrics.append(metric_obj)
self._metrics_tensors[metric_obj.name] = result_tensor
def _handle_weight_regularization(self, name, variable, regularizer):
"""Create lambdas which compute regularization losses."""
def _loss_for_variable(v):
"""Creates a regularization loss `Tensor` for variable `v`."""
with backend.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
return regularization
if isinstance(variable, tf_variables.PartitionedVariable):
for v in variable:
self.add_loss(functools.partial(_loss_for_variable, v))
else:
self.add_loss(functools.partial(_loss_for_variable, variable))
def _handle_activity_regularization(self, inputs, outputs):
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if self._activity_regularizer:
output_list = nest.flatten(outputs)
with backend.name_scope('ActivityRegularizer'):
for output in output_list:
activity_loss = self._activity_regularizer(output)
batch_size = math_ops.cast(
array_ops.shape(output)[0], activity_loss.dtype)
# Make activity regularization strength batch-agnostic.
mean_activity_loss = activity_loss / batch_size
base_layer_utils.check_graph_consistency(
mean_activity_loss, method='activity_regularizer')
self.add_loss(mean_activity_loss, inputs=inputs)
def _set_mask_metadata(self, inputs, outputs, previous_mask):
flat_outputs = nest.flatten(outputs)
mask_already_computed = (
getattr(self, '_compute_output_and_mask_jointly', False) or
all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs))
if not mask_already_computed:
if hasattr(self, 'compute_mask'):
output_masks = self.compute_mask(inputs, previous_mask)
# `compute_mask` can return a single `None` even when a Layer
# has multiple outputs.
if output_masks is None:
flat_masks = [None for _ in flat_outputs]
else:
flat_masks = nest.flatten(output_masks)
else:
flat_masks = [None for _ in flat_outputs]
for output, mask in zip(flat_outputs, flat_masks):
try:
output._keras_mask = mask
except AttributeError:
# C Type such as np.ndarray.
pass
if tf_utils.are_all_symbolic_tensors(flat_outputs):
for output in flat_outputs:
if getattr(output, '_keras_mask', None) is not None:
# Do not track masks for `TensorFlowOpLayer` construction.
output._keras_mask._keras_history_checked = True
def _set_connectivity_metadata_(self, inputs, outputs, args, kwargs):
call_convention = getattr(
self, '_call_convention',
base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
if args:
if call_convention == (base_layer_utils
.CallConvention.EXPLICIT_INPUTS_ARGUMENT):
raise TypeError(
'This layer ("{}") takes an `inputs` argument in `call()`, '
'and only the `inputs` argument may be specified as a positional '
'argument. Pass everything else as a keyword argument '
'(those arguments will not be tracked '
'as inputs to the layer).'.format(self.name))
elif call_convention == (base_layer_utils
.CallConvention.SINGLE_POSITIONAL_ARGUMENT):
raise TypeError(
'This layer ("{}") takes a single positional argument in `call()`,'
' which is by convention the `inputs` argument, '
'and only this argument may be specified as a positional argument. '
'Pass everything else as a keyword argument '
'(those arguments will not be tracked '
'as inputs to the layer).'.format(self.name))
# If the layer returns tensors from its inputs, unmodified,
# we copy them to avoid loss of tensor metadata.
output_ls = nest.flatten(outputs)
inputs_ls = nest.flatten(inputs)
output_ls_copy = []
for x in output_ls:
if x in inputs_ls:
with backend.name_scope(self.name):
x = array_ops.identity(x)
output_ls_copy.append(x)
outputs = nest.pack_sequence_as(outputs, output_ls_copy)
inputs, kwargs = self._inputs_from_call_args(
call_args=(inputs,) + args, call_kwargs=kwargs)
# Add an inbound node to the layer, so it can keep track of this call.
# This updates the layer history of the output tensor(s).
kwargs.pop('mask', None) # `mask` should not be serialized.
self._add_inbound_node(
input_tensors=inputs, output_tensors=outputs, arguments=kwargs)
return inputs, outputs
def _inputs_from_call_args(self, call_args, call_kwargs):
"""Get Layer inputs from __call__ *args and **kwargs.
Args:
call_args: The positional arguments passed to __call__.
call_kwargs: The keyword argument dict passed to __call__.
Returns:
A tuple of (inputs, non_input_kwargs). These may be the same objects as
were passed in (call_args and call_kwargs).
"""
call_convention = getattr(
self, '_call_convention',
base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
if (call_convention in (
base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT,
base_layer_utils.CallConvention.SINGLE_POSITIONAL_ARGUMENT)):
assert len(call_args) == 1 # TypeError raised earlier in __call__.
return call_args[0], call_kwargs
else:
call_arg_spec = tf_inspect.getfullargspec(self.call)
# There is no explicit "inputs" argument expected or provided to
# call(). Arguments which have default values are considered non-inputs,
# and arguments without are considered inputs.
if call_arg_spec.defaults:
if call_arg_spec.varargs is not None:
raise TypeError(
'Layers may not accept both positional arguments and '
'arguments with default values (unable to determine which '
'are inputs to the layer). '
'Issue occurred with layer "%s"' % (self.name))
keyword_arg_names = set(
call_arg_spec.args[-len(call_arg_spec.defaults):])
else:
keyword_arg_names = set()
# Training is never an input argument name, to allow signatures like
# call(x, training).
keyword_arg_names.add('training')
_, unwrapped_call = tf_decorator.unwrap(self.call)
bound_args = inspect.getcallargs(
unwrapped_call, *call_args, **call_kwargs)
if call_arg_spec.varkw is not None:
var_kwargs = bound_args.pop(call_arg_spec.varkw)
bound_args.update(var_kwargs)
keyword_arg_names = keyword_arg_names.union(var_kwargs.keys())
all_args = call_arg_spec.args
if all_args and bound_args[all_args[0]] is self:
# Ignore the 'self' argument of methods
bound_args.pop(call_arg_spec.args[0])
all_args = all_args[1:]
non_input_arg_values = {}
input_arg_values = []
remaining_args_are_keyword = False
for argument_name in all_args:
if argument_name in keyword_arg_names:
remaining_args_are_keyword = True
else:
if remaining_args_are_keyword:
raise TypeError(
'Found a positional argument in a layer call after a non-input '
'argument. All arguments after "training" must be keyword '
'arguments, and are not tracked as inputs to the layer. '
'Issue occurred with layer "%s"' % (self.name))
if remaining_args_are_keyword:
non_input_arg_values[argument_name] = bound_args[argument_name]
else:
input_arg_values.append(bound_args[argument_name])
if call_arg_spec.varargs is not None:
input_arg_values.extend(bound_args[call_arg_spec.varargs])
return input_arg_values, non_input_arg_values
def _add_inbound_node(self,
input_tensors,
output_tensors,
arguments=None):
"""Internal method to create an inbound node for the layer.
Arguments:
input_tensors: list of input tensors.
output_tensors: list of output tensors.
arguments: dictionary of keyword arguments that were passed to the
`call` method of the layer at the call that created the node.
"""
inbound_layers = nest.map_structure(lambda t: t._keras_history.layer,
input_tensors)
node_indices = nest.map_structure(lambda t: t._keras_history.node_index,
input_tensors)
tensor_indices = nest.map_structure(lambda t: t._keras_history.tensor_index,
input_tensors)
# Create node, add it to inbound nodes.
Node(
self,
inbound_layers=inbound_layers,
node_indices=node_indices,
tensor_indices=tensor_indices,
input_tensors=input_tensors,
output_tensors=output_tensors,
arguments=arguments)
# Update tensor history metadata.
# The metadata attribute consists of
# 1) a layer instance
# 2) a node index for the layer
# 3) a tensor index for the node.
# The allows layer reuse (multiple nodes per layer) and multi-output
# or multi-input layers (e.g. a layer can return multiple tensors,
# and each can be sent to a different layer).
for i, tensor in enumerate(nest.flatten(output_tensors)):
tensor._keras_history = KerasHistory(self,
len(self._inbound_nodes) - 1, i) # pylint: disable=protected-access
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
"""Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the methods:
- get_input_shape_at
- get_output_shape_at
- get_input_at
etc...
Arguments:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The layer's attribute `attr` at the node of index `node_index`.
Raises:
RuntimeError: If the layer has no inbound nodes, or if called in Eager
mode.
ValueError: If the index provided does not match any node.
"""
if not self._inbound_nodes:
raise RuntimeError('The layer has never been called '
'and thus has no defined ' + attr_name + '.')
if not len(self._inbound_nodes) > node_index:
raise ValueError('Asked to get ' + attr_name + ' at node ' +
str(node_index) + ', but the layer has only ' +
str(len(self._inbound_nodes)) + ' inbound nodes.')
values = getattr(self._inbound_nodes[node_index], attr)
if isinstance(values, list) and len(values) == 1:
return values[0]
else:
return values
def _maybe_build(self, inputs):
# Check input assumptions set before layer building, e.g. input rank.
if not self.built:
input_spec.assert_input_compatibility(
self.input_spec, inputs, self.name)
input_list = nest.flatten(inputs)
if input_list and self._dtype is None:
try:
self._dtype = input_list[0].dtype.base_dtype.name
except AttributeError:
pass
input_shapes = None
if all(hasattr(x, 'shape') for x in input_list):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
# Only call `build` if the user has manually overridden the build method.
if not hasattr(self.build, '_is_default'):
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
with tf_utils.maybe_init_scope(self):
self.build(input_shapes)
# We must set self.built since user defined build functions are not
# constrained to set self.built.
self.built = True
# Optionally load weight values specified at layer instantiation.
if getattr(self, '_initial_weights', None) is not None:
self.set_weights(self._initial_weights)
self._initial_weights = None
def _symbolic_call(self, inputs):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
output_shapes = self.compute_output_shape(input_shapes)
def _make_placeholder_like(shape):
ph = backend.placeholder(shape=shape, dtype=self.dtype)
ph._keras_mask = None
return ph
return nest.map_structure(_make_placeholder_like, output_shapes)
@property
def _obj_reference_counts(self):
"""A dictionary counting the number of attributes referencing an object."""
self._maybe_create_attribute('_obj_reference_counts_dict',
object_identity.ObjectIdentityDictionary())
return self._obj_reference_counts_dict
def _maybe_create_attribute(self, name, default_value):
"""Create the attribute with the default value if it hasn't been created.
This is useful for fields that is used for tracking purpose,
_trainable_weights, or _layers. Note that user could create a layer subclass
and assign an internal field before invoking the Layer.__init__(), the
__setattr__() need to create the tracking fields and __init__() need to not
override them.
Args:
name: String, the name of the attribute.
default_value: Object, the default value of the attribute.
"""
if not hasattr(self, name):
super(Layer, self).__setattr__(name, default_value)
def __delattr__(self, name):
# For any super.__delattr__() call, we will directly use the implementation
# in Trackable and skip the behavior in AutoTrackable. The Layer was
# originally use Trackable as base class, the change of using Module as base
# class forced us to have AutoTrackable in the class hierarchy. Skipping
# the __delattr__ and __setattr__ in AutoTrackable will keep the status quo.
existing_value = getattr(self, name, None)
# If this value is replacing an existing object assigned to an attribute, we
# should clean it out to avoid leaking memory. First we check if there are
# other attributes referencing it.
reference_counts = self._obj_reference_counts
if existing_value not in reference_counts:
super(tracking.AutoTrackable, self).__delattr__(name)
return
reference_count = reference_counts[existing_value]
if reference_count > 1:
# There are other remaining references. We can't remove this object from
# _layers etc.
reference_counts[existing_value] = reference_count - 1
super(tracking.AutoTrackable, self).__delattr__(name)
return
else:
# This is the last remaining reference.
del reference_counts[existing_value]
super(tracking.AutoTrackable, self).__delattr__(name)
if (isinstance(existing_value, Layer)
or trackable_layer_utils.has_weights(existing_value)):
super(tracking.AutoTrackable, self).__setattr__(
'_layers',
[l for l in self._layers if l is not existing_value])
if isinstance(existing_value, tf_variables.Variable):
super(tracking.AutoTrackable, self).__setattr__(
'_trainable_weights',
[w for w in self._trainable_weights if w is not existing_value])
super(tracking.AutoTrackable, self).__setattr__(
'_non_trainable_weights',
[w for w in self._non_trainable_weights if w is not existing_value])
def __setattr__(self, name, value):
if (name == '_self_setattr_tracking' or
not getattr(self, '_self_setattr_tracking', True) or
getattr(self, '_is_graph_network', False) or
# Exclude @property.setters from tracking
hasattr(self.__class__, name)):
try:
super(tracking.AutoTrackable, self).__setattr__(name, value)
except AttributeError:
raise AttributeError(
('Can\'t set the attribute "{}", likely because it conflicts with '
'an existing read-only @property of the object. Please choose a '
'different name.').format(name))
return
# Keep track of trackable objects, for the needs of `Network.save_weights`.
value = data_structures.sticky_attribute_assignment(
trackable=self, value=value, name=name)
reference_counts = self._obj_reference_counts
reference_counts[value] = reference_counts.get(value, 0) + 1
# Clean out the old attribute, which clears _layers and _trainable_weights
# if necessary.
try:
self.__delattr__(name)
except AttributeError:
pass
# TODO(scottzhu): Need to track Module object as well for weight tracking.
# Be careful about metric if it becomes a Module in future.
# Append value to self._layers if relevant
if (isinstance(value, Layer) or
trackable_layer_utils.has_weights(value)):
self._maybe_create_attribute('_layers', [])
# We need to check object identity to avoid de-duplicating empty
# container types which compare equal.
if not any((layer is value for layer in self._layers)):
self._layers.append(value)
if hasattr(value, '_use_resource_variables'):
# Legacy layers (V1 tf.layers) must always use
# resource variables.
value._use_resource_variables = True
# Append value to list of trainable / non-trainable weights if relevant
# TODO(b/125122625): This won't pick up on any variables added to a
# list/dict after creation.
for val in nest.flatten(value):
# TODO(b/126450014): Remove `_UnreadVariable` check here when assign ops
# no longer return True for isinstance Variable checks.
if (isinstance(val, tf_variables.Variable) and
not isinstance(val, resource_variable_ops._UnreadVariable)): # pylint: disable=protected-access
# Users may add extra weights/variables
# simply by assigning them to attributes (invalid for graph networks)
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
if val not in self._trainable_weights + self._non_trainable_weights:
if val.trainable:
self._trainable_weights.append(val)
else:
self._non_trainable_weights.append(val)
backend.track_variable(val)
# Skip the auto trackable from tf.Module to keep status quo. See the comment
# at __delattr__.
super(tracking.AutoTrackable, self).__setattr__(name, value)
def _gather_children_attribute(self, attribute):
assert attribute in {
'weights', 'trainable_weights', 'non_trainable_weights', 'updates',
'losses', 'metrics'
}
if hasattr(self, '_layers'):
nested_layers = trackable_layer_utils.filter_empty_layer_containers(
self._layers)
return list(
itertools.chain.from_iterable(
getattr(layer, attribute) for layer in nested_layers))
return []
# This is a hack so that the is_layer (within
# training/trackable/layer_utils.py) check doesn't get the weights attr.
# TODO(b/110718070): Remove when fixed.
def _is_layer(self):
return True
@property
@tracking.cached_per_instance
def _call_fn_args(self):
return function_utils.fn_args(self.call)
@property
@tracking.cached_per_instance
def _should_compute_mask(self):
return ('mask' in self._call_fn_args or
getattr(self, 'compute_mask', None) is not None)
class Node(object):
"""A `Node` describes the connectivity between two layers.
Each time a layer is connected to some new input,
a node is added to `layer._inbound_nodes`.
Each time the output of a layer is used by another layer,
a node is added to `layer._outbound_nodes`.
Arguments:
outbound_layer: the layer that takes
`input_tensors` and turns them into `output_tensors`
(the node gets created when the `call`
method of the layer was called).
inbound_layers: a list of layers, the same length as `input_tensors`,
the layers from where `input_tensors` originate.
node_indices: a list of integers, the same length as `inbound_layers`.
`node_indices[i]` is the origin node of `input_tensors[i]`
(necessary since each inbound layer might have several nodes,
e.g. if the layer is being shared with a different data stream).
tensor_indices: a list of integers,
the same length as `inbound_layers`.
`tensor_indices[i]` is the index of `input_tensors[i]` within the
output of the inbound layer
(necessary since each inbound layer might
have multiple tensor outputs, with each one being
independently manipulable).
input_tensors: list of input tensors.
output_tensors: list of output tensors.
arguments: dictionary of keyword arguments that were passed to the
`call` method of the layer at the call that created the node.
`node_indices` and `tensor_indices` are basically fine-grained coordinates
describing the origin of the `input_tensors`.
A node from layer A to layer B is added to:
- A._outbound_nodes
- B._inbound_nodes
"""
def __init__(self,
outbound_layer,
inbound_layers,
node_indices,
tensor_indices,
input_tensors,
output_tensors,
arguments=None):
# Layer instance (NOT a sequence)
if isinstance(outbound_layer, (list, tuple, dict)):
raise ValueError('`outbound_layer` should be a layer instance, '
'not a list, tuple, or, dict.')
# this is the layer that takes a nested structure of input tensors
# and turns them into a nested structure of output tensors.
# the current node will be added to
# the inbound_nodes of outbound_layer.
self.outbound_layer = outbound_layer
# The following 3 properties describe where
# the input tensors come from: which layers,
# and for each layer, which node and which
# tensor output of each node.
# Nested structure of layer instances.
self.inbound_layers = inbound_layers
# Nested structure of integers, 1:1 mapping with inbound_layers.
self.node_indices = node_indices
# Nested of integers, 1:1 mapping with inbound_layers.
self.tensor_indices = tensor_indices
# Following 2 properties:
# tensor inputs and outputs of outbound_layer.
# Nested structure of tensors. 1:1 mapping with inbound_layers.
self.input_tensors = input_tensors
# Nested structure of tensors, created by outbound_layer.call().
self.output_tensors = output_tensors
# Following 2 properties: input and output shapes.
# Nested structure of shape tuples, shapes of input_tensors.
self.input_shapes = nest.map_structure(backend.int_shape, input_tensors)
# Nested structure of shape tuples, shapes of output_tensors.
self.output_shapes = nest.map_structure(backend.int_shape, output_tensors)
# Optional keyword arguments to layer's `call`.
self.arguments = arguments
# Add nodes to all layers involved.
for layer in nest.flatten(inbound_layers):
if layer is not None:
# For compatibility with external Keras, we use the deprecated
# accessor here.
layer.outbound_nodes.append(self)
# For compatibility with external Keras, we use the deprecated
# accessor here.
outbound_layer.inbound_nodes.append(self)
def iterate_inbound(self):
"""Returns a list of tuples representing the inbound data.
Returns:
List of tuples like: (inbound_layer, node_index, tensor_index, tensor).
"""
return zip(
nest.flatten(self.inbound_layers), nest.flatten(self.node_indices),
nest.flatten(self.tensor_indices), nest.flatten(self.input_tensors))
def get_config(self):
inbound_names = nest.map_structure(
lambda layer: layer.name if layer else None, self.inbound_layers)
return {
'outbound_layer': self.outbound_layer.name,
'inbound_layers': inbound_names,
'node_indices': self.node_indices,
'tensor_indices': self.tensor_indices
}
class TensorFlowOpLayer(Layer):
"""Wraps a TensorFlow Operation in a Layer.
This class is used internally by the Functional API. When a user
uses a raw TensorFlow Operation on symbolic tensors originating
from an `Input` Layer, the resultant operation will be wrapped
with this Layer object in order to make the operation compatible
with the Keras API.
This Layer will create a new, identical operation (except for inputs
and outputs) every time it is called. If `run_eagerly` is `True`,
the op creation and calculation will happen inside an Eager function.
Instances of this Layer are created when `autolambda` is called, which
is whenever a Layer's `__call__` encounters symbolic inputs that do
not have Keras metadata, or when a Network's `__init__` encounters
outputs that do not have Keras metadata.
Attributes:
node_def: String, the serialized NodeDef of the Op this layer will wrap.
constants: Dict of NumPy arrays, the values of any Tensors needed for this
Operation that do not originate from a Keras `Input` Layer. Since all
placeholders must come from Keras `Input` Layers, these Tensors must be
treated as constant in the Functional API.
name: String, the name of the Layer.
trainable: Bool, whether this Layer is trainable. Currently Variables are
not supported, and so this parameter has no effect.
dtype: The default dtype of this Layer. Inherited from `Layer` and has no
effect on this class, however is used in `get_config`.
"""
def __init__(self,
node_def,
constants=None,
name=None,
trainable=True,
dtype=None):
super(TensorFlowOpLayer, self).__init__(
name=_TF_OP_LAYER_NAME_PREFIX + name, trainable=trainable, dtype=dtype)
self.node_def = node_def_pb2.NodeDef.FromString(node_def)
self.constants = constants or {}
# Layer uses original op unless it is called on new inputs.
# This means `built` is not set in `__call__`.
self.built = True
def call(self, inputs):
if context.executing_eagerly():
return self._defun_call(inputs)
return self._make_op(inputs)
def _make_op(self, inputs):
inputs = nest.flatten(inputs)
graph = inputs[0].graph
with graph.as_default():
for index, constant in self.constants.items():
constant = ops.convert_to_tensor(constant)
inputs.insert(index, constant)
self.node_def.name = graph.unique_name(self.node_def.name)
# Check for case where first input should be a list of Tensors.
if 'N' in self.node_def.attr:
num_tensors = self.node_def.attr['N'].i
inputs = [inputs[:num_tensors]] + inputs[num_tensors:]
c_op = ops._create_c_op(graph, self.node_def, inputs, control_inputs=[])
op = graph._create_op_from_tf_operation(c_op)
# Record the gradient because custom-made ops don't go through the
# code-gen'd eager call path
op_type = compat.as_str(op.op_def.name)
attr_names = [compat.as_str(attr.name) for attr in op.op_def.attr]
attrs = []
for attr_name in attr_names:
attrs.append(attr_name)
attrs.append(op.get_attr(attr_name))
attrs = tuple(attrs)
execute.record_gradient(op_type, op.inputs, attrs, op.outputs,
op.name)
if len(op.outputs) == 1:
return op.outputs[0]
return op.outputs
@function.defun
def _defun_call(self, inputs):
"""Wraps the op creation method in an Eager function for `run_eagerly`."""
return self._make_op(inputs)
def get_config(self):
config = super(TensorFlowOpLayer, self).get_config()
config.update({
'node_def': self.node_def.SerializeToString(),
'constants': {
i: backend.get_value(c) for i, c in self.constants.items()
}
})
return config
class AddLoss(Layer):
"""Adds its inputs as a loss.
Attributes:
unconditional: Whether or not the loss should be conditioned on the inputs.
"""
def __init__(self, unconditional, **kwargs):
super(AddLoss, self).__init__(**kwargs)
self.unconditional = unconditional
def call(self, inputs):
self.add_loss(inputs, inputs=(not self.unconditional))
return inputs
def get_config(self):
config = super(AddLoss, self).get_config()
config.update({'unconditional': self.unconditional})
return config
class AddMetric(Layer):
"""Adds its inputs as a metric.
Attributes:
aggregation: 'mean' or None. How the inputs should be aggregated.
metric_name: The name to use for this metric.
"""
def __init__(self, aggregation=None, metric_name=None, **kwargs):
super(AddMetric, self).__init__(**kwargs)
self.aggregation = aggregation
self.metric_name = metric_name
def call(self, inputs):
self.add_metric(inputs, self.aggregation, self.metric_name)
return inputs
def get_config(self):
config = super(AddMetric, self).get_config()
config.update({
'aggregation': self.aggregation,
'metric_name': self.metric_name
})
return config
class KerasHistory(
collections.namedtuple('KerasHistory',
['layer', 'node_index', 'tensor_index'])):
"""Tracks the Layer call that created a Tensor, for Keras Graph Networks.
During construction of Keras Graph Networks, this metadata is added to
each Tensor produced as the output of a Layer, starting with an
`InputLayer`. This allows Keras to track how each Tensor was produced, and
this information is later retraced by the `keras.engine.Network` class to
reconstruct the Keras Graph Network.
Attributes:
layer: The Layer that produced the Tensor.
node_index: The specific call to the Layer that produced this Tensor. Layers
can be called multiple times in order to share weights. A new node is
created every time a Tensor is called.
tensor_index: The output index for this Tensor. Always zero if the Layer
that produced this Tensor only has one output. Nested structures of
Tensors are deterministically assigned an index via `nest.flatten`.
"""
# Added to maintain memory and performance characteristics of `namedtuple`
# while subclassing.
__slots__ = ()
def default(method):
"""Decorates a method to detect overrides in subclasses."""
method._is_default = True
return method
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
| 40.476592 | 111 | 0.673231 |
acf1e2f1bc6e221b93dab9e00fe59f1891ac80ee | 128 | py | Python | internship/admin.py | nandit123/UniIntern-Django | 6154dcfc16536b4309dd6f6df18c9b6ebc102f54 | [
"MIT"
] | null | null | null | internship/admin.py | nandit123/UniIntern-Django | 6154dcfc16536b4309dd6f6df18c9b6ebc102f54 | [
"MIT"
] | 1 | 2018-06-15T12:04:09.000Z | 2018-06-15T12:04:09.000Z | internship/admin.py | nandit123/UniIntern-Django | 6154dcfc16536b4309dd6f6df18c9b6ebc102f54 | [
"MIT"
] | 1 | 2021-06-04T07:57:31.000Z | 2021-06-04T07:57:31.000Z | from django.contrib import admin
from .models import Company, Opening
admin.site.register(Company)
admin.site.register(Opening) | 25.6 | 36 | 0.828125 |
acf1e3910c50043281d519dcb34ef78bfc4420b2 | 7,225 | py | Python | emonitor/modules/messages/messages.py | Durburz/eMonitor | 56f3b1fe39b9da3a12b49bdd60d0cfca51c23351 | [
"BSD-3-Clause"
] | 21 | 2015-03-04T11:36:47.000Z | 2021-04-20T07:51:53.000Z | emonitor/modules/messages/messages.py | Durburz/eMonitor | 56f3b1fe39b9da3a12b49bdd60d0cfca51c23351 | [
"BSD-3-Clause"
] | 79 | 2015-01-04T21:35:49.000Z | 2020-03-05T07:22:10.000Z | emonitor/modules/messages/messages.py | Durburz/eMonitor | 56f3b1fe39b9da3a12b49bdd60d0cfca51c23351 | [
"BSD-3-Clause"
] | 27 | 2015-03-04T11:36:48.000Z | 2021-09-20T08:15:17.000Z | import yaml
import datetime
import pytz
import logging
from sqlalchemy.exc import OperationalError
from emonitor.extensions import db, scheduler, monitorserver
from messageutils import calcNextStateChange, MessageTrigger
from emonitor.modules.messages.message_text import TextWidget # MessageText
from emonitor.modules.messages.messagetype import MessageType
from emonitor.modules.settings.settings import Settings
logger = logging.getLogger(__name__)
class Messages(db.Model):
"""Messages class"""
__tablename__ = 'messages'
__table_args__ = {'extend_existing': True}
ACTIVE_MESSAGES = []
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
remark = db.Column(db.TEXT)
startdate = db.Column(db.DATETIME)
enddate = db.Column(db.DATETIME)
state = db.Column(db.Integer)
_monitors = db.Column('monitors', db.String(32))
_type = db.Column('type', db.String(32))
_attributes = db.Column('attributes', db.TEXT)
def __init__(self, name, remark, startdate, enddate, state, mtype=TextWidget('dd')):
self.name = name
self.remark = remark
self.startdate = startdate
self.enddate = enddate
self.state = state
self.type = mtype
def __str__(self):
return "<Message %s> %s: %s-%s, visible: %s, on monitor: %s" % (self.id, self.name, self.startdate, self.enddate, self.currentState, self.monitors)
@property
def attributes(self):
"""
Load attributes from database in yaml format
:return: attributes as dict
"""
try:
return yaml.load(self._attributes)
except AttributeError:
return {}
@attributes.setter
def attributes(self, val):
"""
Store given attributes in yaml format in database
:param val: value for attribute
"""
self._attributes = yaml.safe_dump(val, encoding='utf-8')
def get(self, attribute, default=""):
"""
Get attribute value if found in definition of object, default else
:param attribute: name of attribute as string
:param optional default: use default value if attribute not found
:return: value of attribute or default
"""
if attribute in self.attributes:
return self.attributes[attribute]
attr = attribute.split('.')
if attr[0] in self.attributes:
if len(attr) == 2:
if attr[1] in self.attributes[attr[0]]:
return self.attributes[attr[0]][attr[1]]
else:
return self.attributes[attr[0]]
return default
def set(self, attribute, value):
"""
Set attribute with given value
:param attribute: name of attribute
:param value: value for attribute
"""
self.attributes[attribute] = value
@property
def type(self):
"""
Get message type as MessageType class object
:return: :py:class:`emonitor.modules.messages.messagetype.MessageType`
"""
if self._type == '':
self._type = Settings.get('messages.base.type')
impl = filter(lambda x: x[0].split('.')[0] == self._type, MessageType.getMessageTypes())
if len(impl) == 1:
return impl[0][1]
return None
@type.setter
def type(self, messageType):
"""
Set typename for message and store value in type column of database table
:param messageType: use messagetype object or objectname
"""
self._type = str(messageType).split('.')[0]
@property
def monitors(self):
"""
Getter for monitors property
:return: list of monitor ids
"""
try:
return [int(m) for m in self._monitors.split(',')]
except (ValueError, AttributeError):
return []
@monitors.setter
def monitors(self, monitors):
"""
Setter for monitors property
:param monitors: list of monitor ids
"""
self._monitors = ','.join(monitors)
@property
def currentState(self, timestamp=None):
"""
Return current state of message, use calculated value for next state change if defined
:param optional timestamp: use given timestamp or now as reference
:return: boolean
"""
if self.get('cron', None):
if not timestamp:
timestamp = datetime.datetime.now(tz=pytz.timezone('CET'))
return not calcNextStateChange(timestamp, self.get('cron'))[1]
return True
@staticmethod
def getMessages(id=0, state=-1):
"""
Get messages filtered by criteria
:param optional id: id of message
:param optional state: -1: all messages, else only messages with given state
:return: :py:class:`emonitor.modules.messages.messages.Messages` list
"""
if id == 0:
if state == -1:
return Messages.query.order_by('messages.startdate').all()
else:
return Messages.query.filter(Messages.state == state).order_by('messages.startdate').all()
else:
return Messages.query.filter(Messages.id == int(id)).first()
@staticmethod
def getActiveMessages():
"""
Filters only startdate, enddate and state > 0
:return: :py:class:`emonitor.modules.messages.messages.Messages` list
"""
try:
return Messages.query.filter(Messages.state > 0).filter(Messages.startdate <= datetime.datetime.now()).filter(Messages.enddate >= datetime.datetime.now()).order_by(Messages.startdate.asc()).all()
except OperationalError:
return []
@staticmethod
def initMessageTrigger():
"""Init scheduler tasks for messages"""
job = scheduler.add_job(Messages.doMessageTrigger, name="messages", id="messages", trigger=MessageTrigger(Messages.getActiveMessages(), minutes=60))
if len(job.trigger.messagelist) == 0: # pause job if no active messages
job.pause()
@staticmethod
def updateMessageTrigger():
"""Update message trigger after changes in message objects and update next fire time"""
job = scheduler.get_job(job_id="messages")
job.trigger.messagelist = Messages.getActiveMessages() # update message list for new firetime
if len(job.trigger.messagelist) == 0:
job.pause() # pause job if no active messages
else:
job.resume() # reactivate if active messages
scheduler.modify_job(job_id="messages", next_run_time=job.trigger.get_next_fire_time('', ''))
scheduler.app.logger.info('message trigger: update message trigger, next run %s' % job.next_run_time)
monitorserver.sendMessage('0', 'reset') # refresh monitor layout
@staticmethod
def doMessageTrigger():
"""Run every state-change of a message and update messagelist for displays and call monitors"""
scheduler.app.logger.info('message trigger: run state changes at %s' % datetime.datetime.now())
Messages.updateMessageTrigger() # update trigger and calculate next run
| 35.416667 | 207 | 0.628789 |
acf1e446e74b70e9c28c818a584f844fa70cd178 | 235 | py | Python | mindhome_alpha/erpnext/accounts/doctype/item_tax_template/test_item_tax_template.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/accounts/doctype/item_tax_template/test_item_tax_template.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/accounts/doctype/item_tax_template/test_item_tax_template.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestItemTaxTemplate(unittest.TestCase):
pass
| 21.363636 | 68 | 0.774468 |
acf1e44a44328241dc90951c7700aa7802b436ef | 78,018 | py | Python | torch/overrides.py | mleshen/pytorch | 314a578154d9f0981bc08397aaaeaf50d8233730 | [
"Intel"
] | 1 | 2021-06-23T20:14:19.000Z | 2021-06-23T20:14:19.000Z | torch/overrides.py | mleshen/pytorch | 314a578154d9f0981bc08397aaaeaf50d8233730 | [
"Intel"
] | null | null | null | torch/overrides.py | mleshen/pytorch | 314a578154d9f0981bc08397aaaeaf50d8233730 | [
"Intel"
] | null | null | null | """
Python implementation of ``__torch_function__``
While most of the torch API and handling for ``__torch_function__`` happens
at the C++ level, some of the torch API is written in Python so we need
python-level handling for ``__torch_function__`` overrides as well. The main
developer-facing functionality in this file are handle_torch_function and
has_torch_function. See torch/functional.py and test/test_overrides.py
for usage examples.
Note
----
heavily inspired by NumPy's ``__array_function__`` (see:
https://github.com/pytorch/pytorch/issues/24015 and
https://www.numpy.org/neps/nep-0018-array-function-protocol.html
)
If changing this file in a way that can affect ``__torch_function__`` overhead,
please report the benchmarks in ``benchmarks/overrides_benchmark``. See the
instructions in the ``README.md`` in that directory.
"""
import __future__
import collections
import functools
import types
from typing import Dict, Set, List, Any, Callable, Iterable, Type
import torch
from torch._C import (
_has_torch_function, _has_torch_function_unary,
_has_torch_function_variadic, _add_docstr)
__all__ = [
"get_ignored_functions",
"get_overridable_functions",
"get_testing_overrides",
"handle_torch_function",
"has_torch_function",
"is_tensor_like",
"is_tensor_method_or_property",
"wrap_torch_function",
]
@functools.lru_cache(None)
def get_ignored_functions() -> Set[Callable]:
"""
Return public functions that cannot be overridden by ``__torch_function__``.
Returns
-------
Set[Callable]
A tuple of functions that are publicly available in the torch API but cannot
be overridden with ``__torch_function__``. Mostly this is because none of the
arguments of these functions are tensors or tensor-likes.
Examples
--------
>>> torch.Tensor.as_subclass in torch.overrides.get_ignored_functions()
True
>>> torch.add in torch.overrides.get_ignored_functions()
False
"""
Tensor = torch.Tensor
return {
torch.typename,
torch.is_tensor,
torch.is_storage,
torch.set_default_tensor_type,
torch.set_rng_state,
torch.get_rng_state,
torch.manual_seed,
torch.initial_seed,
torch.seed,
torch.save,
torch.load,
torch.set_printoptions,
torch.fork,
torch.get_default_dtype,
torch.get_num_interop_threads,
torch.get_num_threads,
torch.init_num_threads,
torch.import_ir_module,
torch.import_ir_module_from_buffer,
torch.is_anomaly_enabled,
torch.is_grad_enabled,
torch.merge_type_from_type_comment,
torch.parse_ir,
torch.parse_schema,
torch.parse_type_comment,
torch.set_anomaly_enabled,
torch.set_flush_denormal,
torch.set_num_interop_threads,
torch.set_num_threads,
torch.wait,
torch.as_tensor,
torch.from_numpy,
torch.get_device,
torch.tensor,
torch.default_generator,
torch.has_cuda,
torch.has_cudnn,
torch.has_lapack,
torch.device,
torch.dtype,
torch.finfo,
torch.has_mkl,
torch.has_mkldnn,
torch.has_openmp,
torch.iinfo,
torch.memory_format,
torch.qscheme,
torch.set_grad_enabled,
torch.no_grad,
torch.enable_grad,
torch.inference_mode,
torch.is_inference_mode_enabled,
torch.layout,
torch.align_tensors,
torch.arange,
torch.as_strided,
torch.bartlett_window,
torch.blackman_window,
torch.broadcast_shapes,
torch.can_cast,
torch.cudnn_affine_grid_generator,
torch.cudnn_batch_norm,
torch.cudnn_convolution,
torch.cudnn_convolution_transpose,
torch.cudnn_convolution_relu,
torch.cudnn_convolution_add_relu,
torch.cudnn_grid_sampler,
torch.cudnn_is_acceptable,
torch.empty,
torch.empty_strided,
torch.empty_quantized,
torch.eye,
torch.fft.fftfreq,
torch.fft.rfftfreq,
torch.from_file,
torch.full,
torch.hamming_window,
torch.hann_window,
torch.kaiser_window,
torch.linspace,
torch.logspace,
torch.mkldnn_adaptive_avg_pool2d,
torch.mkldnn_convolution,
torch.mkldnn_convolution_backward_weights,
torch.mkldnn_max_pool2d,
torch.mkldnn_max_pool3d,
torch.mkldnn_linear_backward_weights,
torch.normal,
torch.ones,
torch.promote_types,
torch.rand,
torch.randn,
torch.randint,
torch.randperm,
torch.range,
torch.result_type,
torch.scalar_tensor,
torch.sparse_coo_tensor,
torch.sparse_csr_tensor,
torch.tril_indices,
torch.triu_indices,
torch.vander,
torch.zeros,
torch._jit_internal.boolean_dispatch,
torch.nn.functional.assert_int_or_pair,
torch.nn.functional.upsample,
torch.nn.functional.upsample_bilinear,
torch.nn.functional.upsample_nearest,
torch.nn.functional.has_torch_function,
torch.nn.functional.has_torch_function_unary,
torch.nn.functional.has_torch_function_variadic,
torch.nn.functional.handle_torch_function,
torch.nn.functional.sigmoid,
torch.nn.functional.hardsigmoid,
torch.nn.functional.tanh,
has_torch_function,
handle_torch_function,
torch.set_autocast_enabled,
torch.is_autocast_enabled,
torch.clear_autocast_cache,
torch.autocast_increment_nesting,
torch.autocast_decrement_nesting,
torch.nn.functional.hardswish,
torch.is_vulkan_available,
torch.is_deterministic,
torch.are_deterministic_algorithms_enabled,
torch.use_deterministic_algorithms,
torch.set_deterministic,
torch.unify_type_list,
torch.is_warn_always_enabled,
torch.set_warn_always,
torch.vitals_enabled,
torch.set_vital,
Tensor.__delitem__,
Tensor.__dir__,
Tensor.__getattribute__,
Tensor.__init__,
Tensor.__iter__,
Tensor.__init_subclass__,
Tensor.__delattr__,
Tensor.__setattr__,
Tensor.__torch_function__,
Tensor.__new__,
Tensor.__class__,
Tensor.__subclasshook__,
Tensor.as_subclass,
Tensor.reinforce,
Tensor.new,
Tensor.new_tensor,
Tensor.new_empty,
Tensor.new_empty_strided,
Tensor.new_zeros,
Tensor.new_ones,
Tensor.new_full,
Tensor._make_subclass,
Tensor.stride,
Tensor.unflatten,
Tensor.to_sparse_csr,
Tensor._reduce_ex_internal,
}
@functools.lru_cache(None)
def get_testing_overrides() -> Dict[Callable, Callable]:
"""Return a dict containing dummy overrides for all overridable functions
Returns
-------
Dict[Callable, Callable]
A dictionary that maps overridable functions in the PyTorch API to
lambda functions that have the same signature as the real function
and unconditionally return -1. These lambda functions are useful
for testing API coverage for a type that defines ``__torch_function__``.
Examples
--------
>>> import inspect
>>> my_add = torch.overrides.get_testing_overrides()[torch.add]
>>> inspect.signature(my_add)
<Signature (input, other, out=None)>
"""
# Every function in the PyTorchAPI that can be overriden needs an entry
# in this dict.
#
# Optimally we would use inspect to get the function signature and define
# the lambda function procedurally but that is blocked by generating
# function signatures for native kernels that can be consumed by inspect.
# See Issue #28233.
Tensor = torch.Tensor
ret: Dict[Callable, Callable] = {
torch.abs: lambda input, out=None: -1,
torch.absolute: lambda input, out=None: -1,
torch.adaptive_avg_pool1d: lambda input, output_size: -1,
torch.adaptive_max_pool1d: lambda inputs, output_size: -1,
torch.acos: lambda input, out=None: -1,
torch.arccos: lambda input, out=None: -1,
torch.acosh: lambda input, out=None: -1,
torch.arccosh: lambda input, out=None: -1,
torch.add: lambda input, other, out=None: -1,
torch.addbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1,
torch.addcdiv: lambda input, tensor1, tensor2, value=1, out=None: -1,
torch.addcmul: lambda input, tensor1, tensor2, value=1, out=None: -1,
torch.addmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
torch.addmv: lambda input, mat, vec, beta=1, alpha=1, out=None: -1,
torch.addr: lambda input, vec1, vec2, beta=1, alpha=1, out=None: -1,
torch.affine_grid_generator: lambda theta, size, align_corners: -1,
torch.all: lambda input, dim=None: -1,
torch.allclose: lambda input, other, trol=1e-05, atol=1e-08, equal_nan=False: -1,
torch.alpha_dropout: lambda input, p, train, inplace=False: -1,
torch.amax: lambda input, dim=None: -1,
torch.amin: lambda input, dim=None: -1,
torch.angle: lambda input, out=None: -1,
torch.any: lambda input, dim=None, keepdim=False, out=None: -1,
torch.argmax: lambda input: -1,
torch.argmin: lambda input: -1,
torch.argsort: lambda input, dim=None: -1,
torch.asin: lambda input, out=None: -1,
torch._assert_async: lambda input: -1,
torch.arcsin: lambda input, out=None: -1,
torch.asinh: lambda input, out=None: -1,
torch.arcsinh: lambda input, out=None: -1,
torch.atan: lambda input, out=None: -1,
torch.arctan: lambda input, out=None: -1,
torch.atan2: lambda input, other, out=None: -1,
torch.atanh: lambda input, out=None: -1,
torch.arctanh: lambda input, out=None: -1,
torch.atleast_1d: lambda *tensors: -1,
torch.atleast_2d: lambda *tensors: -1,
torch.atleast_3d: lambda *tensors: -1,
torch.avg_pool1d: lambda input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True: -1,
torch.baddbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1,
torch.batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled: -1,
torch.batch_norm_backward_elemt: lambda grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count_tensor: -1,
torch.batch_norm_backward_reduce: lambda grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g: -1,
torch.batch_norm_elemt: lambda input, weight, bias, mean, invstd, eps: -1,
torch.batch_norm_gather_stats: lambda input, mean, invstd, running_mean, running_var, momentum, eps, count: -1,
torch.batch_norm_gather_stats_with_counts: lambda input, mean, invstd, running_mean, running_var, momentum, eps, count: -1,
torch.batch_norm_stats: lambda input, eps: -1,
torch.batch_norm_update_stats: lambda input, running_mean, running_var, momentum: -1,
torch.bernoulli: lambda input, generator=None, out=None: -1,
torch.bilinear: lambda input1, input2, weight, bias: -1,
torch.binary_cross_entropy_with_logits: (lambda input, target, weight=None, size_average=None, reduce=None,
reduction='mean', pos_weight=None: -1),
torch.bincount: lambda input, weights=None, minlength=0: -1,
torch.binomial: lambda count, prob, generator=None: -1,
torch.bitwise_and: lambda input, other, out=None: -1,
torch.bitwise_not: lambda input, out=None: -1,
torch.bitwise_or: lambda input, other, out=None: -1,
torch.bitwise_xor: lambda input, other, out=None: -1,
torch.block_diag: lambda *tensors: -1,
torch.bmm: lambda input, mat2, out=None: -1,
torch.broadcast_tensors: lambda *tensors: -1,
torch.broadcast_to: lambda self, size: -1,
torch.bucketize: lambda input, boundaries, out_int32=False, right=False, out=None: -1,
torch.cartesian_prod: lambda *tensors: -1,
torch.cat: lambda tensors, dim=0, out=None: -1,
torch.cdist: lambda x1, x2, p=2.0, compute_mode='use_mm_for_euclid_dist_if_necessary': -1,
torch.ceil: lambda input, out=None: -1,
torch.celu: lambda input, alhpa=1., inplace=False: -1,
torch.chain_matmul: lambda *matrices, out=None: -1,
torch.channel_shuffle: lambda input, groups : -1,
torch.cholesky: lambda input, upper=False, out=None: -1,
torch.linalg.cholesky: lambda input, out=None: -1,
torch.linalg.cholesky_ex: lambda input, check_errors=False, out=None: -1,
torch.cholesky_inverse: lambda input, upper=False, out=None: -1,
torch.cholesky_solve: lambda input1, input2, upper=False, out=None: -1,
torch.choose_qparams_optimized: lambda input, numel, n_bins, ratio, bit_width: -1,
torch.chunk: lambda input, chunks, dim=0: -1,
torch.clamp: lambda input, min=None, max=None, out=None: -1,
torch.clip: lambda input, min=None, max=None, out=None: -1,
torch.clamp_min: lambda input, min, out=None: -1,
torch.clamp_max: lambda input, max, out=None: -1,
torch.column_stack: lambda tensors, out=None: -1,
torch.clone: lambda input: -1,
torch.combinations: lambda input, r=2, with_replacement=False: -1,
torch.complex: lambda real, imag: -1,
torch.copysign: lambda input, other, out=None: -1,
torch.polar: lambda abs, ang: -1,
torch.linalg.cond: lambda input, ord=None: -1,
torch.conj: lambda input, out=None: -1,
torch.constant_pad_nd: lambda input, pad, value=0: -1,
torch.conv1d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
torch.conv2d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
torch.conv3d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
torch.convolution: lambda input, weight, bias, stride, padding, dilation, transposed, output_adding, groups: -1,
torch.conv_tbc: lambda input, weight, bias, pad=0: -1,
torch.conv_transpose1d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
torch.conv_transpose2d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
torch.conv_transpose3d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
torch.cos: lambda input, out=None: -1,
torch.cosine_embedding_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean': -1,
torch.cosh: lambda input, out=None: -1,
torch.cosine_similarity: lambda x1, x2, dim=1, eps=1e-8: -1,
torch.count_nonzero: lambda input: -1,
torch.cross: lambda input, other, dim=-1, out=None: -1,
torch.ctc_loss: (lambda log_probs, targets, input_lengths, target_lengths, blank=0, reduction='mean',
zero_infinity=False: -1),
torch.cummax: lambda input, dim, out=None: -1,
torch.cummin: lambda input, dim, out=None: -1,
torch.cumprod: lambda input, dim, out=None, dtype=None: -1,
torch.cumsum: lambda input, dim, out=None, dtype=None: -1,
torch.logcumsumexp: lambda input, dim, out=None: -1,
torch.deg2rad: lambda input, out=None: -1,
torch.dequantize: lambda input: -1,
torch.det: lambda input: -1,
torch.linalg.det: lambda input: -1, # alias for torch.det # type: ignore[attr-defined]
torch.detach: lambda input: -1,
torch.diag: lambda input, diagonal=0, out=None: -1,
torch.diag_embed: lambda input, diagonal=0, out=None: -1,
torch.diagflat: lambda input, offset=0: -1,
torch.diff: lambda input, n=1, dim=-1, prepend=None, append=None, out=None: -1,
torch.diagonal: lambda input, offset=0, dim1=0, dim2=1: -1,
torch.digamma: lambda input, out=None: -1,
torch.dist: lambda input, other, p=2: -1,
torch.div: lambda input, other, rounding_mode=None, out=None: -1,
torch.divide: lambda input, other, rounding_mode=None, out=None: -1,
torch.dot: lambda input, other, out=None: -1,
torch.dropout: lambda input, p, train, inplace=False: -1,
torch.dsmm: lambda input, mat2: -1,
torch.hsmm: lambda mat1, mat2: -1,
torch.dsplit: lambda input, indices_or_sections: -1,
torch.dstack: lambda tensors, out=None: -1,
torch.eig: lambda input, eigenvectors=False, out=None: -1,
torch.linalg.eig: lambda input, out=None: -1,
torch.linalg.eigvals: lambda input, out=None: -1,
torch.linalg.eigh: lambda input, UPLO="L", out=None: -1,
torch.linalg.eigvalsh: lambda input, UPLO="L", out=None: -1,
torch.einsum: lambda equation, *operands: -1,
torch.embedding: (lambda input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False,
sparse=False: -1),
torch.embedding_bag: (lambda input, weight, offsets, max_norm=None, norm_type=2, scale_grad_by_freq=False,
mode='mean', sparse=False, per_sample_weights=None, padding_idx=None: -1),
torch.empty_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.eq: lambda input, other, out=None: -1,
torch.equal: lambda input, other: -1,
torch.erf: lambda input, out=None: -1,
torch.erfc: lambda input, out=None: -1,
torch.erfinv: lambda input, out=None: -1,
torch.exp: lambda input, out=None: -1,
torch.exp2: lambda input, out=None: -1,
torch.expm1: lambda input, out=None: -1,
torch.fake_quantize_per_channel_affine: lambda input, scale, zero_point, axis, quant_min, quant_max: -1,
torch.fake_quantize_per_tensor_affine: lambda input, scale, zero_point, quant_min, quant_max: -1,
torch.fbgemm_linear_fp16_weight: lambda input, packed_weight, bias: -1,
torch.fbgemm_linear_fp16_weight_fp32_activation: lambda input, packed_weight, bias: -1,
torch.fbgemm_linear_int8_weight: lambda input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias: -1,
torch.fbgemm_linear_int8_weight_fp32_activation: (lambda input, weight, packed, col_offsets, weight_scale,
weight_zero_point, bias: -1),
torch.fbgemm_linear_quantize_weight: lambda input: -1,
torch.fbgemm_pack_gemm_matrix_fp16: lambda input: -1,
torch.fbgemm_pack_quantized_matrix: lambda input, a, b: -1,
torch.feature_alpha_dropout: lambda input, p, train: -1,
torch.feature_dropout: lambda input, p, train: -1,
torch.fft.fft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.ifft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.rfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.irfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.hfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.ihfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.fftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.ifftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.rfftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.irfftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.fft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.ifft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.rfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.irfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.fftshift: lambda input, dim=None: -1,
torch.fft.ifftshift: lambda input, dim=None: -1,
torch.fft.fft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fix: lambda input, out=None: -1,
torch.flatten: lambda input, start_dim=0, end_dim=-1: -1,
torch.flip: lambda input, dims: -1,
torch.fliplr: lambda input: -1,
torch.flipud: lambda input: -1,
torch.frobenius_norm: lambda input, dim=None, keepdim=False, out=None: -1,
torch.floor: lambda input, out=None: -1,
torch.floor_divide: lambda input, other: -1,
torch.float_power: lambda input, exponent, out=None: -1,
torch.fmod: lambda input, other, out=None: -1,
torch.frac: lambda input, out=None: -1,
torch.frexp: lambda input, out=None: -1,
torch.full_like: lambda input, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False: -1,
torch.lu_unpack: lambda LU_data, LU_pivots, unpack_data=True, unpack_pivots=True: -1,
torch.gather: lambda input, dim, index, out=None, sparse_grad=False: -1,
torch.gcd: lambda input, other, out=None: -1,
torch.ge: lambda input, other, out=None: -1,
torch.greater_equal: lambda input, other, out=None: -1,
torch.geqrf: lambda input, out=None: -1,
torch.i0: lambda input, out=None: -1,
torch.inner: lambda input, other, out=None: -1,
torch.outer: lambda input, vec2, out=None: -1, # alias for torch.ger
torch.ger: lambda input, vec2, out=None: -1,
torch.gradient: lambda input, spacing=None, dim=None, edge_order=1: -1,
torch.grid_sampler: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
torch.grid_sampler_2d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
torch.grid_sampler_3d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
torch.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05, cudnn_enabled=True: -1,
torch.gru: lambda input, hx, params, has_biases, num_layers, gropout, train, bidirectional, batch_first: -1,
torch.gru_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.gt: lambda input, other, out=None: -1,
torch.greater: lambda input, other, out=None: -1,
torch.hardshrink: lambda input, lambd=0.5: -1,
torch.heaviside: lambda input, values, out=None: -1,
torch.hinge_embedding_loss: lambda input, target, margin=1.0, size_average=None, reduce=None, reduction='mean': -1,
torch.histc: lambda input, bins=100, min=0, max=0, out=None: -1,
torch.linalg.householder_product: lambda input, tau: -1,
torch.hspmm: lambda mat1, mat2, out=None: -1,
torch.hsplit: lambda input, indices_or_sections: -1,
torch.hstack: lambda tensors, out=None: -1,
torch.hypot: lambda input, other, out=None: -1,
torch.igamma: lambda input, other, out=None: -1,
torch.igammac: lambda input, other, out=None: -1,
torch.imag: lambda input, out=None: -1,
torch.index_add: lambda input, dim, index, source: -1,
torch.index_copy: lambda input, dim, index, source: -1,
torch.index_put: lambda input, indices, values, accumulate=False: -1,
torch.index_select: lambda input, dim, index, out=None: -1,
torch.index_fill: lambda input, dim, index, value: -1,
torch.isfinite: lambda tensor: -1,
torch.isinf: lambda tensor: -1,
torch.isreal: lambda tensor: -1,
torch.isposinf: lambda input, out=None: -1,
torch.isneginf: lambda input, out=None: -1,
torch.instance_norm: (lambda input, running_mean, running_var, weight, bias, use_input_stats, momentum, eps,
cudnn_enabled: -1),
torch.int_repr: lambda input: -1,
torch.inverse: lambda input, out=None: -1,
torch.linalg.inv: lambda input, out=None: -1,
torch.linalg.inv_ex: lambda input, check_errors=False, out=None: -1,
torch.is_complex: lambda input: -1,
torch.is_distributed: lambda input: -1,
torch.is_floating_point: lambda input: -1,
torch.is_nonzero: lambda input: -1,
torch.is_same_size: lambda input, other: -1,
torch.is_signed: lambda input: -1,
torch.isclose: lambda input, other, rtol=1e-05, atol=1e-08, equal_nan=False: -1,
torch.isnan: lambda input: -1,
torch.istft: (lambda input, n_fft, hop_length=None, win_length=None, window=None, center=True,
normalized=False, onesided=None, length=None, return_complex=False: -1),
torch.kl_div: lambda input, target, size_average=None, reduce=None, reduction='mean', log_target=False: -1,
torch.kron: lambda input, other: -1,
torch.kthvalue: lambda input, k, dim=None, keepdim=False, out=None: -1,
torch.layer_norm: lambda input, normalized_shape, weight=None, bias=None, esp=1e-05, cudnn_enabled=True: -1,
torch.lcm: lambda input, other, out=None: -1,
torch.ldexp: lambda input, other, out=None: -1,
torch.le: lambda input, other, out=None: -1,
torch.less_equal: lambda input, other, out=None: -1,
torch.lerp: lambda input, end, weight, out=None: -1,
torch.lgamma: lambda input, out=None: -1,
torch.lobpcg: lambda input, k=None, B=None, X=None, n=None, iK=None, niter=None, tol=None, largest=None, method=None,
tracker=None, ortho_iparams=None, ortho_fparams=None, ortho_bparams=None: -1,
torch.log: lambda input, out=None: -1,
torch.log_softmax: lambda input, dim, dtype=None: -1,
torch.log10: lambda input, out=None: -1,
torch.log1p: lambda input, out=None: -1,
torch.log2: lambda input, out=None: -1,
torch.logaddexp: lambda input, other, out=None: -1,
torch.logaddexp2: lambda input, other, out=None: -1,
torch.logdet: lambda input: -1,
torch.xlogy: lambda x, y: -1,
torch.logical_and: lambda input, other, out=None: -1,
torch.logical_not: lambda input, out=None: -1,
torch.logical_or: lambda input, other, out=None: -1,
torch.logical_xor: lambda input, other, out=None: -1,
torch.logsumexp: lambda input, names, keepdim=False, out=None: -1,
torch.logit: lambda input, eps=None: -1,
torch.logsumexp: lambda input, names, keepdim=False, out=None: -1,
torch.lstm: lambda data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional: -1,
torch.lstm_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.lstsq: lambda input, A, out=None: -1,
torch.lt: lambda input, other, out=None: -1,
torch.less: lambda input, other, out=None: -1,
torch.lu: lambda A, pivot=True, get_infos=False, out=None: -1,
torch.lu_solve: lambda b, LU_data, LU_pivots, out=None: -1,
torch.margin_ranking_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean': -1, # type: ignore[attr-defined] # noqa: B950
torch.masked_fill: lambda input, mask, value: -1,
torch.masked_scatter: lambda input, mask, source: -1,
torch.masked_select: lambda input, mask, out=None: -1,
torch.matmul: lambda input, other, out=None: -1,
torch.matrix_power: lambda input, n: -1,
torch.linalg.matrix_power: lambda input, n, out=None: -1,
torch.matrix_rank: lambda input, tol=None, symmetric=False: -1,
torch.linalg.matrix_rank: lambda input, tol=None, hermitian=False: -1,
torch.linalg.multi_dot: lambda tensors, out=None: -1,
torch.matrix_exp: lambda input: -1,
torch.max: lambda input, out=None: -1,
torch.maximum: lambda input, other, out=None: -1,
torch.fmax: lambda input, other, out=None: -1,
torch.max_pool1d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
torch.max_pool2d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
torch.max_pool3d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
torch.max_pool1d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.mean: lambda input, dim=None: -1,
torch.median: lambda input, dim=None: -1,
torch.nanmedian: lambda input, dim=None: -1,
torch.meshgrid: lambda *tensors, **kwargs: -1,
torch.min: lambda input, out=None: -1,
torch.minimum: lambda input, other, out=None: -1,
torch.fmin: lambda input, other, out=None: -1,
torch.miopen_batch_norm: (lambda input, weight, bias, running_mean, running_var, training,
exponential_average_factor, epsilon: -1),
torch.miopen_convolution: lambda input, weight, bias, padding, stride, dilation, groups, benchmark, deterministic: -1,
torch.miopen_convolution_transpose: (lambda input, weight, bias, padding, output_padding, stride, dilation,
groups, benchmark, deterministic: -1),
torch.miopen_depthwise_convolution: (lambda input, weight, bias, padding, stride, dilation, groups, benchmark,
deterministic: -1),
torch.miopen_rnn: (lambda input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first,
dropout, train, bidirectional, batch_sizes, dropout_state: -1),
torch.mm: lambda input, mat2, out=None: -1,
torch.mode: lambda input, dim=-1, keepdim=False, out=None: -1,
torch.movedim: lambda input, source, destination: -1,
torch.moveaxis: lambda input, source, destination: -1,
torch.msort: lambda input, descending=False, out=None: -1,
torch.mul: lambda input, other, out=None: -1,
torch.multiply: lambda input, other, out=None: -1,
torch.multinomial: lambda input, num_samples, replacement=False, out=None: -1,
torch.mv: lambda input, vec, out=None: -1,
torch.mvlgamma: lambda input, p: -1,
torch.narrow: lambda input, dim, start, length: -1,
torch.narrow_copy: lambda input, dim, start, length: -1,
torch.nan_to_num: lambda input, nan=0.0, posinf=None, neginf=None, out=None: -1,
torch.native_batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum, eps: -1,
torch.native_layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05: -1,
torch.native_group_norm: lambda input, weight, bias, N, C, HxW, group, eps: -1,
torch.native_norm: lambda input, p=2: -1,
torch.native_norm: lambda input, p=2: -1,
torch.native_norm: lambda input, p=2, dim=None, keepdim=False, dtype=None: -1,
torch.ne: lambda input, other, out=None: -1,
torch.not_equal: lambda input, other, out=None: -1,
torch.neg: lambda input, out=None: -1,
torch.negative: lambda input, out=None: -1,
torch.nextafter: lambda input, other, out=None: -1,
torch.nn.functional.adaptive_avg_pool2d: lambda input, output_size: -1,
torch.nn.functional.adaptive_avg_pool3d: lambda input, output_size: -1,
torch.nn.functional.adaptive_max_pool1d: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool1d_with_indices: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool2d: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool2d_with_indices: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool3d: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool3d_with_indices: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.affine_grid: lambda theta, size, align_corners=None: -1,
torch.nn.functional.alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1,
torch.nn.functional.avg_pool2d: (lambda input, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None: -1),
torch.nn.functional.avg_pool3d: (lambda input, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None: -1),
torch.nn.functional.batch_norm: (lambda input, running_mean, running_var, weight=None, bias=None, training=False,
momentum=0.1, eps=1e-05: -1),
torch.nn.functional.bilinear: lambda input1, input2, weight, bias=None: -1,
torch.nn.functional.binary_cross_entropy: (lambda input, target, weight=None, size_average=None, reduce=None,
reduction="mean": -1),
torch.nn.functional.binary_cross_entropy_with_logits: (lambda input, target, weight=None, size_average=None,
reduce=None, reduction="mean", pos_weight=None: -1),
torch.nn.functional.celu: lambda input, alpha=1.0, inplace=False: -1,
torch.nn.functional.cosine_embedding_loss: (lambda input1, input2, target, margin=0, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.cross_entropy: (lambda input, target, weight=None, size_average=None, ignore_index=-100,
reduce=None, reduction="mean": -1),
torch.nn.functional.ctc_loss: (lambda log_probs, targets, input_lengths, target_lengths, blank=0,
reduction='mean', zero_infinity=False: -1),
torch.nn.functional.dropout: lambda input, p=0.5, training=True, inplace=False: -1,
torch.nn.functional.dropout2d: lambda input, p=0.5, training=True, inplace=False: -1,
torch.nn.functional.dropout3d: lambda input, p=0.5, training=True, inplace=False: -1,
torch.nn.functional.elu: lambda input, alpha=1.0, inplace=False: -1,
torch.nn.functional.embedding: (lambda input, weight, padding_idx=None, max_norm=None, norm_type=2.0,
scale_grad_by_freq=False, sparse=False: -1),
torch.nn.functional.embedding_bag: (lambda input, weight, offsets=None, max_norm=None, norm_type=2,
scale_grad_by_freq=False, mode='mean', sparse=False, per_sample_weights=None,
include_last_offset=False, padding_idx=None: -1),
torch.nn.functional.feature_alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1,
torch.nn.functional.fold: lambda input, output_size, kernel_size, dilation=1, padding=0, stride=1: -1,
torch.nn.functional.fractional_max_pool2d: (lambda input, kernel_size, output_size=None, output_ratio=None,
return_indices=False, _random_samples=None: -1),
torch.nn.functional.fractional_max_pool2d_with_indices: (
lambda input, kernel_size, output_size=None, output_ratio=None, return_indices=False,
_random_samples=None: -1),
torch.nn.functional.fractional_max_pool3d: (lambda input, kernel_size, output_size=None, output_ratio=None,
return_indices=False, _random_samples=None: -1),
torch.nn.functional.fractional_max_pool3d_with_indices: (
lambda input, kernel_size, output_size=None, output_ratio=None, return_indices=False,
_random_samples=None: -1),
torch.nn.functional.gaussian_nll_loss: lambda input, target, var, full=False, eps=1e-06, reduction='mean': -1,
torch.nn.functional.gelu: lambda input: -1,
torch.nn.functional.glu: lambda input, dim=-1: -1,
torch.nn.functional.grid_sample: lambda input, grid, mode='bilinear', padding_mode='zeros', align_corners=None: -1,
torch.nn.functional.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05: -1,
torch.nn.functional.gumbel_softmax: lambda logits, tau=1, hard=False, eps=1e-10, dim=-1: -1,
torch.nn.functional.hardshrink: lambda input, lambd=0.5: -1,
torch.nn.functional.hardtanh: lambda input, min_val=-1., max_val=1., inplace=False: -1,
torch.nn.functional.hinge_embedding_loss: (lambda input, target, margin=1.0, size_average=None, reduce=None,
reduction='mean': -1),
torch.nn.functional.instance_norm: (lambda input, running_mean=None, running_var=None, weight=None, bias=None,
use_input_stats=True, momentum=0.1, eps=1e-05: -1),
torch.nn.functional.interpolate: (lambda input, size=None, scale_factor=None, mode='nearest', align_corners=None,
recompute_scale_factor=None: -1),
torch.nn.functional.kl_div: lambda input, target, size_average=None, reduce=None, reduction='mean', log_target=False: -1,
torch.nn.functional.l1_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
torch.nn.functional.layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05: -1,
torch.nn.functional.leaky_relu: lambda input, negative_slope=0.01, inplace=False: -1,
torch.nn.functional.linear: lambda input, weight, bias=None: -1,
torch.nn.functional.local_response_norm: lambda input, size, alpha=0.0001, beta=0.75, k=1.0: -1,
torch.nn.functional.log_softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
torch.nn.functional.logsigmoid: lambda input: -1,
torch.nn.functional.lp_pool1d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False: -1,
torch.nn.functional.lp_pool2d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False: -1,
torch.nn.functional.margin_ranking_loss: (lambda input1, input2, target, margin=0, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.max_pool1d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False: -1),
torch.nn.functional.max_pool1d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_pool2d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False: -1),
torch.nn.functional.max_pool2d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_pool3d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_pool3d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_unpool1d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
torch.nn.functional.max_unpool2d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
torch.nn.functional.max_unpool3d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
torch.nn.functional.mse_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
torch.nn.functional.multi_head_attention_forward: (
lambda query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v,
add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training=True, key_padding_mask=None,
need_weights=True, attn_mask=None, use_separate_proj_weight=False, q_proj_weight=None, k_proj_weight=None,
v_proj_weight=None, static_k=None, static_v=None: -1),
torch.nn.functional.multi_margin_loss: (lambda input, target, p=1, margin=1.0, weight=None, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.multilabel_margin_loss: (lambda input, target, size_average=None, reduce=None,
reduction='mean': -1),
torch.nn.functional.multilabel_soft_margin_loss: (lambda input, target, weight=None, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.nll_loss: (lambda input, target, weight=None, size_average=None, ignore_index=-100,
reduce=None, reduction='mean': -1),
torch.nn.functional.normalize: lambda input, p=2, dim=1, eps=1e-12, out=None: -1,
torch.nn.functional.one_hot: lambda tensor, num_classes=-1: -1,
torch.nn.functional.pad: lambda input, pad, mode='constant', value=0: -1,
torch.nn.functional.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1,
torch.nn.functional.poisson_nll_loss: (lambda input, target, log_input=True, full=False, size_average=None,
eps=1e-08, reduce=None, reduction='mean': -1),
torch.nn.functional.prelu: lambda input, weight: -1,
torch.nn.functional.relu: lambda input, inplace=False: -1,
torch.nn.functional.relu6: lambda input, inplace=False: -1,
torch.nn.functional.rrelu: lambda input, lower=0.125, upper=0.3333333333333333, training=False, inplace=False: -1,
torch.nn.functional.selu: lambda input, inplace=False: -1,
torch.nn.functional.silu: lambda input, inplace=False: -1,
torch.nn.functional.smooth_l1_loss: lambda input, target, size_average=None, reduce=None, reduction='mean', beta=1.: -1,
torch.nn.functional.huber_loss: lambda input, target, reduction='mean', delta=1.: -1,
torch.nn.functional.soft_margin_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
torch.nn.functional.softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
torch.nn.functional.softmin: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
torch.nn.functional.softplus: lambda input, beta=1, threshold=20: -1,
torch.nn.functional.softshrink: lambda input, lambd=0.5: -1,
torch.nn.functional.softsign: lambda input: -1,
torch.nn.functional.tanhshrink: lambda input: -1,
torch.nn.functional.threshold: lambda input, threshold, value, inplace=False: -1,
torch.nn.functional.triplet_margin_loss: (lambda anchor, positive, negative, margin=1.0, p=2, eps=1e-06,
swap=False, size_average=None, reduce=None, reduction='mean': -1),
torch.nn.functional.triplet_margin_with_distance_loss: (lambda anchor, positive, negative, *,
distance_function=None, margin=1.0,
swap=False, reduction='mean': -1),
torch.nn.functional.unfold: lambda input, kernel_size, dilation=1, padding=0, stride=1: -1,
torch.nonzero: lambda input, as_tuple=False: -1,
torch.norm: lambda input, p='fro', dim=None, keepdim=False, out=None, dtype=None: -1,
torch.linalg.norm: lambda input, ord=None, dim=None, keepdim=False, out=None, dtype=None: -1,
torch.linalg.vector_norm: lambda input, ord=2, dim=None, keepdim=False, out=None, dtype=None: -1,
torch.linalg.matrix_norm: lambda input, ord='fro', dim=(-2, -1), keepdim=False, out=None, dtype=None: -1,
torch.norm_except_dim: lambda v, pow=2, dim=0: -1,
torch.nuclear_norm: lambda input, p='fro', dim=None, keepdim=False, out=None, dtype=None: -1,
torch.numel: lambda input: -1,
torch.orgqr: lambda input, tau: -1,
torch.ormqr: lambda input, input2, input3, left=True, transpose=False: -1,
torch.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1,
torch.permute: lambda self, dim: -1,
torch.pca_lowrank: lambda input, q=None, center=True, niter=2: -1,
torch.pdist: lambda input, p=2: -1,
torch.pinverse: lambda input, rcond=1e-15: -1,
torch.linalg.pinv: lambda input, rcond=1e-15, hermitian=False: -1,
torch.pixel_shuffle: lambda input, upscale_factor: -1,
torch.pixel_unshuffle: lambda input, downscale_factor: -1,
torch.poisson: lambda input, generator=None: -1,
torch.poisson_nll_loss: lambda input, target, log_input, full, eps, reduction: -1,
torch.polygamma: lambda input, n, out=None: -1,
torch.positive: lambda input, out=None: -1,
torch.prelu: lambda input, weight: -1,
torch.ones_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.pow: lambda input, exponent, out=None: -1,
torch.prod: lambda input, dtype=None: -1,
torch.put: lambda input, index, source, accumulate=False: -1,
torch.q_per_channel_axis: lambda input: -1,
torch.q_per_channel_scales: lambda input: -1,
torch.q_per_channel_zero_points: lambda input: -1,
torch.q_scale: lambda input: -1,
torch.q_zero_point: lambda input: -1,
torch.qr: lambda input, some=True, out=None: -1,
torch.linalg.qr: lambda input, mode='reduced', out=None: -1,
torch.quantile: lambda input, q, dim=None, keepdim=False, out=None: -1,
torch.nanquantile: lambda input, q, dim=None, keepdim=False, out=None: -1,
torch.quantize_per_channel: lambda input, scales, zero_points, axis, dtype: -1,
torch.quantize_per_tensor: lambda input, scale, zero_point, dtype: -1,
torch.quantized_batch_norm: lambda input, weight, bias, mean, var, eps, output_scale, output_zero_point: -1,
torch.quantized_gru_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.quantized_lstm_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.quantized_max_pool1d: (lambda input, kernel_size, stride=tuple(), padding=(0,),
dilation=(1,), ceil_mode=False: -1),
torch.quantized_max_pool2d: (lambda input, kernel_size, stride=tuple(), padding=(0, 0),
dilation=(1, 1), ceil_mode=False: -1),
torch.quantized_rnn_relu_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.quantized_rnn_tanh_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.rad2deg: lambda input, out=None: -1,
torch.rand_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.randint_like: lambda input, high, dtype=None, layout=torch.strided, device=None, requires_grad=False: -1,
torch.randn_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.ravel: lambda input: -1,
torch.real: lambda input, out=None: -1,
torch.vdot: lambda input, other, out=None: -1,
torch.view_as_real: lambda input: -1,
torch.view_as_complex: lambda input: -1,
torch.reciprocal: lambda input, out=None: -1,
torch.relu: lambda input, inplace=False: -1,
torch.remainder: lambda input, other, out=None: -1,
torch.renorm: lambda input, p, dim, maxnorm, out=None: -1,
torch.repeat_interleave: lambda input, dim=None: -1,
torch.reshape: lambda input, shape: -1,
torch.rnn_relu: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1,
torch.rnn_relu_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.rnn_tanh: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1,
torch.rnn_tanh_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.roll: lambda input, shifts, dims=None: -1,
torch.rot90: lambda input, k=1, dims=(0, 1): -1,
torch.round: lambda input, out=None: -1,
torch.row_stack: lambda tensors, out=None: -1, # alias for torch.vstack
torch._rowwise_prune: (lambda weight, mask, compressed_indices_dtype: -1),
torch.rrelu: lambda input, lower=1. / 8, upper=1. / 3, training=False, inplace=False: -1,
torch.rsqrt: lambda input, out=None: -1,
torch.rsub: lambda input, other, alpha=1: -1,
torch.saddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
torch.scatter: lambda input, dim, index, src: -1,
torch.scatter_add: lambda input, dim, index, src: -1,
torch.searchsorted: lambda sorted_sequence, input, out_int32=False, right=False, out=None: -1,
torch.segment_reduce: lambda data, reduce="max", lengths=None, indices=None, axis=0, unsafe=False: -1,
torch.select: lambda input, dim, index: -1,
torch.selu: lambda input, inplace=False: -1,
torch.sigmoid: lambda input, out=None: -1,
torch.sign: lambda input, out=None: -1,
torch.signbit: lambda input, out=None: -1,
torch.sgn: lambda input, out=None: -1,
torch.sin: lambda input, out=None: -1,
torch.sinc: lambda input, out=None: -1,
torch.sinh: lambda input, out=None: -1,
torch.slogdet: lambda input: -1,
torch.linalg.slogdet: lambda input: -1,
torch.smm: lambda input, mat2: -1,
torch.spmm: lambda input, mat2: -1,
torch.softmax: lambda input, dim, dtype=None: -1,
torch.solve: lambda input, A, out=None: -1,
torch.linalg.solve: lambda input, other, out=None: -1,
torch.sort: lambda input, dim=-1, descending=False, *, stable=False, out=None: -1,
torch.split: lambda tensor, split_size_or_sections, dim=0: -1,
torch.split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1,
torch.sqrt: lambda input, out=None: -1,
torch.square: lambda input, out=None: -1,
torch.squeeze: lambda input, dim=None, out=None: -1,
torch.sspaddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
torch.stack: lambda tensors, dim=0, out=None: -1,
torch.std: lambda input, dim=None: -1,
torch.std_mean: lambda input, dim=None: -1,
torch.stft: (lambda input, n_fft, hop_length=None, win_length=None, window=None, center=True,
pad_mode='reflect', normalized=False, onesided=True, return_complex=None: -1),
torch.sub: lambda input, other, out=None: -1,
torch.subtract: lambda input, other, out=None: -1,
torch.sum: lambda input, dim=None: -1,
torch.nansum: lambda input, dim=None: -1,
torch.svd: lambda input, some=True, compute_uv=True, out=None: -1,
torch.svd_lowrank: lambda input, q=6, niter=2, M=None: -1,
torch.linalg.svd: lambda input, full_matrices=True, out=None: -1,
torch.linalg.svdvals: lambda input, out=None: -1,
torch.symeig: lambda input, eigenvectors=False, upper=True, out=None: -1,
torch.swapaxes: lambda input, dim0, dim1: -1,
torch.swapdims: lambda input, axis0, axis1: -1,
torch.special.entr: lambda input: -1,
torch.special.erf: lambda input: -1,
torch.special.erfc: lambda input: -1,
torch.special.erfinv: lambda input: -1,
torch.special.exp2: lambda input: -1,
torch.special.expm1: lambda input: -1,
torch.special.expit: lambda input: -1,
torch.special.gammaln: lambda input: -1,
torch.special.i0e: lambda input: -1,
torch.special.logit: lambda input: -1,
torch.special.xlog1py: lambda input, other, out=None: -1,
torch.t: lambda input: -1,
torch.take: lambda input, index: -1,
torch.take_along_dim: lambda input, indices, dim=None, out=None: -1,
torch.tan: lambda input, out=None: -1,
torch.tanh: lambda input, out=None: -1,
torch.linalg.tensorinv: lambda a, ind=2: -1,
torch.linalg.tensorsolve: lambda a, b, dims=None: -1,
torch.tensordot: lambda a, b, dims=2, out=None: -1,
torch.tensor_split: lambda input, indices_or_sections, dim=0: -1,
torch.threshold: lambda input, threshold, value, inplace=False: -1,
torch.tile: lambda input, dims: -1,
torch.topk: lambda input, k, dim=-1, descending=False, out=None: -1,
torch.trace: lambda input: -1,
torch.transpose: lambda input, dim0, dim1: -1,
torch.trapz: lambda y, x=None, dim=-1: -1,
torch.triangular_solve: lambda input, A, upper=True, transpose=False, unitriangular=False: -1,
torch.tril: lambda input, diagonal=0, out=None: -1,
torch.triplet_margin_loss: (lambda anchor, positive, negative, margin=1.0, p=2, eps=1e-06, swap=False,
size_average=None, reduce=None, reduction='mean': -1),
torch.triu: lambda input, diagonal=0, out=None: -1,
torch.true_divide: lambda input, other: -1,
torch.trunc: lambda input, out=None: -1,
torch.unbind: lambda input, dim=0: -1,
torch.unique: lambda input, sorted=True, return_inverse=False, return_counts=False, dim=None: -1,
torch.unique_consecutive: lambda input, return_inverse=False, return_counts=False, dim=None: -1,
torch.unsafe_chunk: lambda input, chunks, dim=0: -1,
torch.unsafe_split: lambda tensor, split_size_or_sections, dim=0: -1,
torch.unsafe_split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1,
torch.unsqueeze: lambda input, dim, out=None: -1,
torch.var: lambda input, dim=None: -1,
torch.var_mean: lambda input, dim=None: -1,
torch.vsplit: lambda input, indices_or_sections: -1,
torch.vstack: lambda tensors, out=None: -1,
torch.where: lambda condition, x=None, y=None: -1,
torch.zeros_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
Tensor.__floordiv__: lambda self, other: -1,
Tensor.__rfloordiv__: lambda self, other: -1,
Tensor.__ifloordiv__: lambda self, other: -1,
Tensor.__truediv__: lambda self, other: -1,
Tensor.__rtruediv__: lambda self, other: -1,
Tensor.__itruediv__: lambda self, other: -1,
Tensor.__lshift__: lambda self, other: -1,
Tensor.__ilshift__: lambda self, other: -1,
Tensor.__rshift__: lambda self, other: -1,
Tensor.__irshift__: lambda self, other: -1,
Tensor.__float__: lambda self: -1,
Tensor.__complex__: lambda self: -1,
Tensor.__array__: lambda self, dtype: -1,
Tensor.__bool__: lambda self: -1,
Tensor.__contains__: lambda self, other: -1,
Tensor.__neg__: lambda self: -1,
Tensor.__invert__: lambda self: -1,
Tensor.__mod__: lambda self, other: -1,
Tensor.__imod__: lambda self, other: -1,
Tensor.__array_wrap__: lambda self, array: -1,
Tensor.__getitem__: lambda self, idx: -1,
Tensor.__deepcopy__: lambda self, memo: -1,
Tensor.__int__: lambda self: -1,
Tensor.__long__: lambda self: -1,
Tensor.__hash__: lambda self: -1,
Tensor.__index__: lambda self: -1,
Tensor.__len__: lambda self: -1,
Tensor.__format__: lambda self, format_spec: -1,
Tensor.__reduce_ex__: lambda self, proto: -1,
Tensor.__reversed__: lambda self: -1,
Tensor.__repr__: lambda self: -1,
Tensor.__setitem__: lambda self, k, v: -1,
Tensor.__setstate__: lambda self, d: -1,
Tensor.T.__get__: lambda self: -1,
Tensor._backward_hooks.__get__: lambda self: -1,
Tensor._base.__get__: lambda self: -1,
Tensor._cdata.__get__: lambda self: -1,
Tensor.grad.__get__: lambda self: -1,
Tensor._grad.__get__: lambda self: -1,
Tensor._grad_fn.__get__: lambda self: -1,
Tensor.grad_fn.__get__: lambda self: -1,
Tensor._version.__get__: lambda self: -1,
Tensor.data.__get__: lambda self: -1,
Tensor.device.__get__: lambda self: -1,
Tensor.dtype.__get__: lambda self: -1,
Tensor.is_cuda.__get__: lambda self: -1,
Tensor.is_xpu.__get__: lambda self: -1,
Tensor.is_leaf.__get__: lambda self: -1,
Tensor.is_meta.__get__: lambda self: -1,
Tensor.is_mlc.__get__: lambda self: -1,
Tensor.is_mkldnn.__get__: lambda self: -1,
Tensor.is_quantized.__get__: lambda self: -1,
Tensor.is_sparse.__get__: lambda self: -1,
Tensor.is_sparse_csr.__get__: lambda self: -1,
Tensor.is_vulkan.__get__: lambda self: -1,
Tensor.layout.__get__: lambda self: -1,
Tensor.name.__get__: lambda self: -1,
Tensor.names.__get__: lambda self: -1,
Tensor.ndim.__get__: lambda self: -1,
Tensor.output_nr.__get__: lambda self: -1,
Tensor.requires_grad.__get__: lambda self: -1,
Tensor.shape.__get__: lambda self: -1,
Tensor.volatile.__get__: lambda self: -1,
Tensor.real.__get__: lambda self: -1,
Tensor.imag.__get__: lambda self: -1,
Tensor.__cuda_array_interface__.__get__: lambda self: -1,
Tensor.type: lambda self, dtype=None, non_blocking=False, **kwargs: -1,
Tensor._coalesced_: lambda self: -1,
Tensor._dimI: lambda self: -1,
Tensor._dimV: lambda self: -1,
Tensor._indices: lambda self: -1,
Tensor._is_view: lambda self: -1,
Tensor._nnz: lambda self: -1,
Tensor.crow_indices: lambda self: -1,
Tensor.col_indices: lambda self: -1,
Tensor._update_names: lambda self, names, inplace: -1,
Tensor._values: lambda self: -1,
Tensor.align_as: lambda self, other: -1,
Tensor.align_to: lambda self, order, ellipsis_idx: -1,
Tensor.apply_: lambda self, callable: -1,
Tensor.as_strided: lambda self, size, stride: -1,
Tensor.as_strided_: lambda self, size, stride: -1,
Tensor.backward: lambda self, gradient=None, retain_graph=None, create_graph=False, inputs=None: -1,
Tensor.bfloat16: lambda self, memory_format=torch.preserve_format: -1,
Tensor.bool: lambda self, memory_format=torch.preserve_format: -1,
Tensor.byte: lambda self, memory_format=torch.preserve_format: -1,
Tensor.char: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cauchy_: lambda self, median=0, sigma=1, *, generator=None: -1,
Tensor.coalesce: lambda self: -1,
Tensor._coalesced_: lambda self, coalesced: -1,
Tensor.contiguous: lambda self, memory_format=torch.contiguous_format: -1,
Tensor.copy_: lambda self, src, non_blocking=False: -1,
Tensor.cpu: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cuda: lambda self, memory_format=torch.preserve_format: -1,
Tensor.xpu: lambda self, memory_format=torch.preserve_format: -1,
Tensor.data_ptr: lambda self: -1,
Tensor.dense_dim: lambda self: -1,
Tensor.dim: lambda self: -1,
Tensor.double: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cdouble: lambda self, memory_format=torch.preserve_format: -1,
Tensor.element_size: lambda self: -1,
Tensor.expand: lambda self, size: -1,
Tensor.expand_as: lambda self, other: -1,
Tensor.exponential_: lambda self, lambd=1, *, generator=None: -1,
Tensor.fill_: lambda self, value: -1,
Tensor.fill_diagonal_: lambda self, value: -1,
Tensor.float: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cfloat: lambda self, memory_format=torch.preserve_format: -1,
Tensor.geometric_: lambda self, p, *, generator=None: -1,
Tensor.get_device: lambda self: -1,
Tensor.half: lambda self, memory_format=torch.preserve_format: -1,
Tensor.has_names: lambda self: -1,
Tensor.indices: lambda self: -1,
Tensor.int: lambda self, memory_format=torch.preserve_format: -1,
Tensor.is_coalesced: lambda self: -1,
Tensor.is_contiguous: lambda self: -1,
Tensor.is_pinned: lambda self: -1,
Tensor.is_set_to: lambda self, tensor: -1,
Tensor.is_shared: lambda self: -1,
Tensor.item: lambda self: -1,
Tensor.log_normal_: lambda self, mean=1, std=2, *, generator=None: -1,
Tensor.log_softmax: lambda self, dim: -1,
Tensor.long: lambda self, memory_format=torch.preserve_format: -1,
Tensor.map_: lambda self, tensor, callable: -1,
Tensor.map2_: lambda self, x, y, callable: -1,
Tensor.mm: lambda self, mat2: -1,
Tensor.narrow_copy: lambda self, dimension, start, length: -1,
Tensor.ndimension: lambda self: -1,
Tensor.nelement: lambda self: -1,
Tensor.normal_: lambda self: -1,
Tensor.numpy: lambda self: -1,
Tensor.permute: lambda self, dim: -1,
Tensor.pin_memory: lambda self: -1,
Tensor.put_: lambda self, indices, tensor, accumulate=False: -1,
Tensor.qscheme: lambda self: -1,
Tensor.random_: lambda self, from_=0, to=None, *, generator=None: -1,
Tensor.record_stream: lambda self, stream: -1,
Tensor.refine_names: lambda self, names: -1,
Tensor.register_hook: lambda self, hook: -1,
Tensor.rename: lambda self, name: -1,
Tensor.repeat: lambda self, *size: -1,
Tensor.requires_grad_: lambda self, requires_grad=True: -1,
Tensor.reshape_as: lambda self, other: -1,
Tensor.resize: lambda self, *size: -1,
Tensor.resize_: lambda self, size: -1,
Tensor.resize_as: lambda self, other: -1,
Tensor.retain_grad: lambda self: -1,
Tensor.set_: lambda self, source=None, storage_offset=0, size=None, stride=None: -1,
Tensor.share_memory_: lambda self: -1,
Tensor.short: lambda self, memory_format=torch.preserve_format: -1,
Tensor.size: lambda self: -1,
Tensor.sparse_dim: lambda self: -1,
Tensor.sparse_mask: lambda self, mask: -1,
Tensor.sparse_resize_: lambda self, size1, size2, dense_dim: -1,
Tensor.sparse_resize_and_clear_: lambda self, size1, size2, dense_dim: -1,
Tensor.sspaddmm: lambda self, mat1, mat2, beta=1, alpha=1, out=None: -1,
Tensor.storage: lambda self: -1,
Tensor.storage_offset: lambda self: -1,
Tensor.storage_type: lambda self: -1,
Tensor.sum_to_size: lambda self, size: -1,
Tensor.tile: lambda self, *reps: -1,
Tensor.to: lambda self, dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format: -1,
Tensor.to_dense: lambda self: -1,
Tensor.to_sparse: lambda self: -1,
Tensor.tolist: lambda self: -1,
Tensor.to_mkldnn: lambda self: -1,
Tensor.type_as: lambda self, other: -1,
Tensor.unfold: lambda self, dimension, size, step: -1,
Tensor.uniform_: lambda self, from_=0, to=1: -1,
Tensor.values: lambda self: -1,
Tensor.view: lambda self, shape: -1,
Tensor.view_as: lambda self, other: -1,
Tensor.zero_: lambda self: -1,
torch.linalg.lstsq: lambda self, b, cond=None, driver=None: -1,
}
ret2 = {}
ignored = get_ignored_functions()
for k, v in ret.items():
# Generate methods like __add__ and add_ by default from add
names = [
k.__name__, # Default method
k.__name__ + "_", # Inplace variant
"__" + k.__name__ + "__", # Dunder method
"__i" + k.__name__ + "__", # Inplace dunder method
"__r" + k.__name__ + "__", # Reverse dunder method
]
if k.__name__.startswith("bitwise_"):
# bitwise_<op> have dunder methods of the form __<op>__
# And so on.
subname = k.__name__[len("bitwise_"):]
names.extend([
"__" + subname + "__",
"__i" + subname + "__",
"__r" + subname + "__"
])
for name in names:
func = getattr(Tensor, name, None)
if callable(func) and func not in ret and func not in ignored:
ret2[func] = v
ret.update(ret2)
return ret
def wrap_torch_function(dispatcher: Callable):
"""Wraps a given function with ``__torch_function__`` -related functionality.
Parameters
----------
dispatcher: Callable
A callable that returns an iterable of Tensor-likes passed into the function.
Note
----
This decorator may reduce the performance of your code. Generally, it's enough to express
your code as a series of functions that, themselves, support __torch_function__. If you
find yourself in the rare situation where this is not the case, e.g. if you're wrapping a
low-level library and you also need it to work for Tensor-likes, then this function is available.
Examples
--------
>>> def dispatcher(a): # Must have the same signature as func
... return (a,)
>>> @torch.overrides.wrap_torch_function(dispatcher)
>>> def func(a): # This will make func dispatchable by __torch_function__
... return a + 0
"""
def inner(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
relevant_args = dispatcher(*args, **kwargs)
if has_torch_function(relevant_args):
return handle_torch_function(func, relevant_args, *args, **kwargs)
return func(*args, **kwargs)
return wrapped
return inner
def _get_overloaded_args(relevant_args: Iterable[Any]) -> List[Any]:
"""Returns a list of arguments on which to call __torch_function__.
Checks arguments in relevant_args for __torch_function__ implementations,
storing references to the arguments and their types in overloaded_args and
overloaded_types in order of calling precedence. Only distinct types are
considered. If a type is a subclass of another type it will have higher
precedence, otherwise the precedence order is the same as the order of
arguments in relevant_args, that is, from left-to-right in the argument list.
The precedence-determining algorithm implemented in this function is
described in `NEP-0018`_.
See torch::append_overloaded_arg for the equivalent function in the C++
implementation.
Parameters
----------
relevant_args : iterable of array-like
Iterable of array-like arguments to check for __torch_function__
methods.
Returns
-------
overloaded_args : list
Arguments from relevant_args on which to call __torch_function__
methods, in the order in which they should be called.
.. _NEP-0018:
https://numpy.org/neps/nep-0018-array-function-protocol.html
"""
# Runtime is O(num_arguments * num_unique_types)
overloaded_types: Set[Type] = set()
overloaded_args: List[Any] = []
for arg in relevant_args:
arg_type = type(arg)
# We only collect arguments if they have a unique type, which ensures
# reasonable performance even with a long list of possibly overloaded
# arguments.
if (arg_type not in overloaded_types and hasattr(arg_type, '__torch_function__')):
# Create lists explicitly for the first type (usually the only one
# done) to avoid setting up the iterator for overloaded_args.
if overloaded_types:
overloaded_types.add(arg_type)
# By default, insert argument at the end, but if it is
# subclass of another argument, insert it before that argument.
# This ensures "subclasses before superclasses".
index = len(overloaded_args)
for i, old_arg in enumerate(overloaded_args):
if issubclass(arg_type, type(old_arg)):
index = i
break
overloaded_args.insert(index, arg)
else:
overloaded_types = {arg_type}
overloaded_args = [arg]
return overloaded_args
def handle_torch_function(
public_api: Callable, relevant_args: Iterable[Any], *args, **kwargs) -> Any:
"""Implement a function with checks for ``__torch_function__`` overrides.
See torch::autograd::handle_torch_function for the equivalent of this
function in the C++ implementation.
Arguments
---------
public_api : function
Function exposed by the public torch API originally called like
``public_api(*args, **kwargs)`` on which arguments are now being
checked.
relevant_args : iterable
Iterable of arguments to check for __torch_function__ methods.
args : tuple
Arbitrary positional arguments originally passed into ``public_api``.
kwargs : tuple
Arbitrary keyword arguments originally passed into ``public_api``.
Returns
-------
object
Result from calling ``implementation`` or an ``__torch_function__``
method, as appropriate.
Raises
------
TypeError : if no implementation is found.
Example
-------
>>> def func(a):
... if type(a) is not torch.Tensor: # This will make func dispatchable by __torch_function__
... return handle_torch_function(func, (a,), a)
... return a + 0
"""
# Check for __torch_function__ methods.
overloaded_args = _get_overloaded_args(relevant_args)
# overloaded_args already have unique types.
types = tuple(map(type, overloaded_args))
# Call overrides
for overloaded_arg in overloaded_args:
# Use `public_api` instead of `implementation` so __torch_function__
# implementations can do equality/identity comparisons.
result = overloaded_arg.__torch_function__(public_api, types, args, kwargs)
if result is not NotImplemented:
return result
func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
raise TypeError("no implementation found for '{}' on types that implement "
'__torch_function__: {}'
.format(func_name, [type(arg) for arg in overloaded_args]))
has_torch_function = _add_docstr(
_has_torch_function,
r"""Check for __torch_function__ implementations in the elements of an iterable.
Considers exact ``Tensor`` s and ``Parameter`` s non-dispatchable.
Arguments
---------
relevant_args : iterable
Iterable or aguments to check for __torch_function__ methods.
Returns
-------
bool
True if any of the elements of relevant_args have __torch_function__
implementations, False otherwise.
See Also
________
torch.is_tensor_like
Checks if something is a Tensor-like, including an exact ``Tensor``.
"""
)
has_torch_function_unary = _add_docstr(
_has_torch_function_unary,
r"""Special case of `has_torch_function` for single inputs.
Instead of:
`has_torch_function((t,))`
call:
`has_torch_function_unary(t)`
which skips unnecessary packing and unpacking work.
"""
)
has_torch_function_variadic = _add_docstr(
_has_torch_function_variadic,
r"""Special case of `has_torch_function` that skips tuple creation.
This uses the METH_FASTCALL protocol introduced in Python 3.7; for 3.6
and before it has roughly equivilent performance compared to
`has_torch_function`.
Instead of:
`has_torch_function((a, b))`
call:
`has_torch_function_variadic(a, b)`
which skips unnecessary packing and unpacking work.
"""
)
@functools.lru_cache(None)
def get_overridable_functions() -> Dict[Any, List[Callable]]:
"""List functions that are overridable via __torch_function__
Returns
-------
Dict[Any, List[Callable]]
A dictionary that maps namespaces that contain overridable functions
to functions in that namespace that can be overridden.
"""
overridable_funcs = collections.defaultdict(list)
tested_namespaces = [
(torch, torch.__all__ + dir(torch._C._VariableFunctions)),
(torch.functional, torch.functional.__all__),
(torch.nn.functional, dir(torch.nn.functional)),
(torch.Tensor, dir(torch.Tensor)),
(torch.linalg, dir(torch.linalg)),
(torch.fft, dir(torch.fft)),
(torch.special, dir(torch.special)),
]
for namespace, ns_funcs in tested_namespaces:
for func_name in ns_funcs:
# ignore private functions or functions that are deleted in torch.__init__
if namespace is not torch.Tensor:
if func_name.startswith('_'):
continue
elif func_name.endswith('_'):
continue
elif not func_name[0].islower():
continue
elif func_name == 'unique_dim':
continue
else:
func = getattr(namespace, func_name)
if getattr(object, func_name, None) == func:
continue
if func_name == '__weakref__':
continue
func = getattr(namespace, func_name)
if namespace is torch.Tensor and getattr(object, func_name, None) == func:
continue
# ignore re-exported modules
if isinstance(func, types.ModuleType):
continue
# ignore __future__ imports
if isinstance(func, __future__._Feature):
continue
if not callable(func) and hasattr(func, "__get__"):
overridable_funcs[func].append(func.__get__)
continue
if not callable(func):
continue
# cannot be overriden by __torch_function__
if func in get_ignored_functions():
msg = ("{}.{} is in the tuple returned by torch._overrides.get_ignored_functions "
"but still has an explicit override")
assert func not in get_testing_overrides(), msg.format(namespace, func.__name__)
continue
overridable_funcs[namespace].append(func)
return overridable_funcs
@functools.lru_cache(None)
def _get_tensor_methods() -> Set[Callable]:
""" Returns a set of the overridable methods on ``torch.Tensor`` """
overridable_funcs = get_overridable_functions()
methods = set(overridable_funcs[torch.Tensor])
return methods
def is_tensor_method_or_property(func: Callable) -> bool:
"""
Returns True if the function passed in is a handler for a
method or property belonging to ``torch.Tensor``, as passed
into ``__torch_function__``.
.. note::
For properties, their ``__get__`` method must be passed in.
This may be needed, in particular, for the following reasons:
1. Methods/properties sometimes don't contain a `__module__` slot.
2. They require that the first passed-in argument is an instance
of ``torch.Tensor``.
Examples
--------
>>> is_tensor_method_or_property(torch.Tensor.add)
True
>>> is_tensor_method_or_property(torch.add)
False
"""
return func in _get_tensor_methods() or func.__name__ == "__get__"
def is_tensor_like(inp):
"""
Returns ``True`` if the passed-in input is a Tensor-like.
Currently, this occurs whenever there's a ``__torch_function__``
attribute on the type of the input.
Examples
--------
A subclass of tensor is generally a Tensor-like.
>>> class SubTensor(torch.Tensor): ...
>>> is_tensor_like(SubTensor([0]))
True
Built-in or user types aren't usually Tensor-like.
>>> is_tensor_like(6)
False
>>> is_tensor_like(None)
False
>>> class NotATensor: ...
>>> is_tensor_like(NotATensor())
False
But, they can be made Tensor-like by implementing __torch_function__.
>>> class TensorLike:
... def __torch_function__(self, func, types, args, kwargs):
... return -1
>>> is_tensor_like(TensorLike())
True
"""
return type(inp) is torch.Tensor or hasattr(type(inp), "__torch_function__")
| 54.292276 | 173 | 0.644338 |
acf1e47c47e333f5db85c1b9c260abd2c507d556 | 2,492 | py | Python | docs/conf.py | aneeshdurg/katana | 713ac5a12e884c691e619d50f8ea16a5a048bfad | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | aneeshdurg/katana | 713ac5a12e884c691e619d50f8ea16a5a048bfad | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | aneeshdurg/katana | 713ac5a12e884c691e619d50f8ea16a5a048bfad | [
"BSD-3-Clause"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here.
import pathlib
import os
import katana
# Breathe takes a minute or two to parse Doxygen output. If you aren't editing
# C++ documents, set this environment variable for faster edit-render loops.
cxx_disabled = os.environ.get("KATANA_DOCS_DISABLE_CXX", False)
doxygen_path = os.environ["KATANA_DOXYGEN_PATH"]
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"breathe",
"sphinx.ext.intersphinx",
#'sphinx.ext.autosummary',
"sphinx.ext.autodoc",
# 'sphinx_autodoc_typehints',
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx_tabs.tabs"
]
if cxx_disabled:
breathe_projects = {}
else:
breathe_default_project = "katana"
breathe_projects = {"katana": str(pathlib.Path(doxygen_path))}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "pydata_sphinx_theme"
html_logo = "_static/logo.png"
html_title = "Katana"
html_theme_options = {"show_prev_next": False}
# html_theme_path = []
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
autodoc_preserve_defaults = True
autodoc_member_order = "groupwise"
# Standard Sphinx values
project = "Katana Graph"
version = katana.__version__
release = katana.__version__
author = "Katana Graph"
# TODO(ddn): Get this from katana.libgalois.version
copyright = "Katana Graph, Inc. 2021"
| 31.544304 | 79 | 0.696629 |
acf1e518deed138a321976d07646b1a279ec5106 | 4,154 | py | Python | reference/tutorial_korean/04 - Autoencoder, GAN/02 - GAN.py | KangByungWook/tensorflow | 10e553637f9837779352f64deb5986c194bb7bfc | [
"MIT"
] | null | null | null | reference/tutorial_korean/04 - Autoencoder, GAN/02 - GAN.py | KangByungWook/tensorflow | 10e553637f9837779352f64deb5986c194bb7bfc | [
"MIT"
] | null | null | null | reference/tutorial_korean/04 - Autoencoder, GAN/02 - GAN.py | KangByungWook/tensorflow | 10e553637f9837779352f64deb5986c194bb7bfc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# 2016년에 가장 관심을 많이 받았던 비감독(Unsupervised) 학습 방법인
# Generative Adversarial Network(GAN)을 구현해봅니다.
# https://arxiv.org/abs/1406.2661
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./mnist/data/", one_hot=True)
#########
# 옵션 설정
######
total_epoch = 20
batch_size = 100
learning_rate = 0.0002
# 신경망 레이어 구성 옵션
n_hidden = 256
n_input = 28 * 28
n_noise = 128 # 생성기의 입력값으로 사용할 노이즈의 크기
#########
# 신경망 모델 구성
######
# GAN 도 Unsupervised 학습이므로 Autoencoder 처럼 Y 를 사용하지 않습니다.
X = tf.placeholder(tf.float32, [None, n_input])
# 노이즈 Z를 입력값으로 사용합니다.
Z = tf.placeholder(tf.float32, [None, n_noise])
# 생성기 신경망에 사용하는 변수들입니다.
G_W1 = tf.Variable(tf.random_normal([n_noise, n_hidden], stddev=0.01))
G_b1 = tf.Variable(tf.zeros([n_hidden]))
G_W2 = tf.Variable(tf.random_normal([n_hidden, n_input], stddev=0.01))
G_b2 = tf.Variable(tf.zeros([n_input]))
# 판별기 신경망에 사용하는 변수들입니다.
D_W1 = tf.Variable(tf.random_normal([n_input, n_hidden], stddev=0.01))
D_b1 = tf.Variable(tf.zeros([n_hidden]))
# 판별기의 최종 결과값은 얼마나 진짜와 가깝냐를 판단하는 한 개의 스칼라값입니다.
D_W2 = tf.Variable(tf.random_normal([n_hidden, 1], stddev=0.01))
D_b2 = tf.Variable(tf.zeros([1]))
# 생성기(G) 신경망을 구성합니다.
def generator(noise_z):
hidden_layer = tf.nn.relu(tf.matmul(noise_z, G_W1) + G_b1)
generated_outputs = tf.sigmoid(tf.matmul(hidden_layer, G_W2) + G_b2)
return generated_outputs
# 판별기(D) 신경망을 구성합니다.
def discriminator(inputs):
hidden_layer = tf.nn.relu(tf.matmul(inputs, D_W1) + D_b1)
discrimination = tf.sigmoid(tf.matmul(hidden_layer, D_W2) + D_b2)
return discrimination
# 랜덤한 노이즈(Z)를 만듭니다.
def get_noise(batch_size):
return np.random.normal(size=(batch_size, n_noise))
# 노이즈를 이용해 랜덤한 이미지를 생성합니다.
G = generator(Z)
# 노이즈를 이용해 생성한 이미지가 진짜 이미지인지 판별한 값을 구합니다.
D_gene = discriminator(G)
# 진짜 이미지를 이용해 판별한 값을 구합니다.
D_real = discriminator(X)
# 논문에 따르면, GAN 모델의 최적화는 loss_G 와 loss_D 를 최대화 하는 것 입니다.
# 논문의 수식에 따른 다음 로직을 보면 loss_D 를 최대화하기 위해서는 D_gene 값을 최소화하게 됩니다.
# 판별기에 진짜 이미지를 넣었을 때에도 최대값을 : tf.log(D_real)
# 가짜 이미지를 넣었을 때에도 최대값을 : tf.log(1 - D_gene)
# 갖도록 학습시키기 때문입니다.
# 이것은 판별기는 생성기가 만들어낸 이미지가 가짜라고 판단하도록 판별기 신경망을 학습시킵니다.
loss_D = tf.reduce_mean(tf.log(D_real) + tf.log(1 - D_gene))
# 반면 loss_G 를 최대화하기 위해서는 D_gene 값을 최대화하게 되는데,
# 이것은 가짜 이미지를 넣었을 때, 판별기가 최대한 실제 이미지라고 판단하도록 생성기 신경망을 학습시킵니다.
# 논문에서는 loss_D 와 같은 수식으로 최소화 하는 생성기를 찾지만,
# 결국 D_gene 값을 최대화하는 것이므로 다음과 같이 사용할 수 있습니다.
loss_G = tf.reduce_mean(tf.log(D_gene))
# loss_D 를 구할 때는 생성기 신경망에 사용되는 변수만 사용하고,
# loss_G 를 구할 때는 판별기 신경망에 사용되는 변수만 사용하여 최적화를 합니다.
D_var_list = [D_W1, D_b1, D_W2, D_b2]
G_var_list = [G_W1, G_b1, G_W2, G_b2]
# GAN 논문의 수식에 따르면 loss 를 극대화 해야하지만, minimize 하는 최적화 함수를 사용하기 때문에
# 최적화 하려는 loss_D 와 loss_G 에 음수 부호를 붙여줍니다.
train_D = tf.train.AdamOptimizer(learning_rate).minimize(-loss_D, var_list=D_var_list)
train_G = tf.train.AdamOptimizer(learning_rate).minimize(-loss_G, var_list=G_var_list)
#########
# 신경망 모델 학습
######
sess = tf.Session()
sess.run(tf.global_variables_initializer())
total_batch = int(mnist.train.num_examples/batch_size)
loss_val_D, loss_val_G = 0, 0
for epoch in range(total_epoch):
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
noise = get_noise(batch_size)
# 판별기와 생성기 신경망을 각각 학습시킵니다.
_, loss_val_D = sess.run([train_D, loss_D], feed_dict={X: batch_xs, Z: noise})
_, loss_val_G = sess.run([train_G, loss_G], feed_dict={Z: noise})
print 'Epoch:', '%04d' % (epoch + 1), \
'D loss: {:.4}'.format(loss_val_D), \
'G loss: {:.4}'.format(loss_val_G)
#########
# 학습이 되어가는 모습을 보기 위해 주기적으로 이미지를 생성하여 저장
######
sample_size = 10
noise = get_noise(sample_size)
samples = sess.run(G, feed_dict={Z: noise})
fig, ax = plt.subplots(1, sample_size, figsize=(sample_size, 1))
for i in range(sample_size):
ax[i].set_axis_off()
ax[i].imshow(np.reshape(samples[i], (28, 28)))
plt.savefig('samples/{}.png'.format(str(epoch).zfill(3)), bbox_inches='tight')
plt.close(fig)
print '최적화 완료!'
| 29.884892 | 86 | 0.686808 |
acf1e53fd682a3f7c665b61a720902bef67cd64f | 2,258 | py | Python | src/documents/urls.py | Talengi/phase | 60ff6f37778971ae356c5b2b20e0d174a8288bfe | [
"MIT"
] | 8 | 2016-01-29T11:53:40.000Z | 2020-03-02T22:42:02.000Z | src/documents/urls.py | Talengi/phase | 60ff6f37778971ae356c5b2b20e0d174a8288bfe | [
"MIT"
] | 289 | 2015-03-23T07:42:52.000Z | 2022-03-11T23:26:10.000Z | src/documents/urls.py | Talengi/phase | 60ff6f37778971ae356c5b2b20e0d174a8288bfe | [
"MIT"
] | 7 | 2015-12-08T09:03:20.000Z | 2020-05-11T15:36:51.000Z | from django.conf.urls import url
from documents.views import (
DocumentList, DocumentCreate, DocumentDetail, DocumentEdit,
DocumentDownload, DocumentRedirect, DocumentRevise, DocumentDelete,
DocumentRevisionDelete, RevisionFileDownload, DocumentFileDownload
)
urlpatterns = [
# Document short url
url(r'^documents/(?P<document_key>[\w-]+)/$',
DocumentRedirect.as_view(),
name='document_short_url'),
# Downloads
url(r'^(?P<organisation>[\w-]+)/(?P<category>[\w-]+)/download/$',
DocumentDownload.as_view(),
name="document_download"),
# Documents
url(r'^(?P<organisation>[\w-]+)/(?P<category>[\w-]+)/$',
DocumentList.as_view(),
name="category_document_list"),
url(r'^(?P<organisation>[\w-]+)/(?P<category>[\w-]+)/create/$',
DocumentCreate.as_view(),
name="document_create"),
url(r'^(?P<organisation>[\w-]+)/(?P<category>[\w-]+)/(?P<document_key>[\w-]+)/$',
DocumentDetail.as_view(),
name="document_detail"),
url(r'^(?P<organisation>[\w-]+)/(?P<category>[\w-]+)/(?P<document_key>[\w-]+)/edit/$',
DocumentEdit.as_view(),
name="document_edit"),
url(r'^(?P<organisation>[\w-]+)/(?P<category>[\w-]+)/(?P<document_key>[\w-]+)/edit/(?P<revision>\d+)/$',
DocumentEdit.as_view(),
name="document_edit"),
url(r'^(?P<organisation>[\w-]+)/(?P<category>[\w-]+)/(?P<document_key>[\w-]+)/revise/$',
DocumentRevise.as_view(),
name="document_revise"),
url(r'^(?P<organisation>[\w-]+)/(?P<category>[\w-]+)/(?P<document_key>[\w-]+)/delete/$',
DocumentDelete.as_view(),
name="document_delete"),
url(r'^(?P<organisation>[\w-]+)/(?P<category>[\w-]+)/(?P<document_key>[\w-]+)/revision_delete/$',
DocumentRevisionDelete.as_view(),
name="document_revision_delete"),
url(r'^(?P<organisation>[\w-]+)/(?P<category>[\w-]+)/(?P<document_key>[\w-]+)/(?P<revision>\d+)/(?P<field_name>\w+)/$',
RevisionFileDownload.as_view(),
name="revision_file_download"),
url(r'^(?P<organisation>[\w-]+)/(?P<category>[\w-]+)/(?P<document_key>[\w-]+)/(?P<field_name>\w+)/$',
DocumentFileDownload.as_view(),
name="document_file_download"),
]
| 42.603774 | 123 | 0.593003 |
acf1e59ae53948f5c46eca2015422b0a9506e9b6 | 469 | py | Python | Scripts/python/scripts mundo 1/jogo do dodo/Desafio048.py | BrenoNAlmeida/Scripts-Escola | 20d886d0401ef7f40a4a46e307eadbf5b1c0a5eb | [
"Apache-2.0"
] | null | null | null | Scripts/python/scripts mundo 1/jogo do dodo/Desafio048.py | BrenoNAlmeida/Scripts-Escola | 20d886d0401ef7f40a4a46e307eadbf5b1c0a5eb | [
"Apache-2.0"
] | null | null | null | Scripts/python/scripts mundo 1/jogo do dodo/Desafio048.py | BrenoNAlmeida/Scripts-Escola | 20d886d0401ef7f40a4a46e307eadbf5b1c0a5eb | [
"Apache-2.0"
] | null | null | null | print('o somatorio de todos os numeros impares divisiveis por três entre 0 e 500 é')
for c in range(0,500):
if c % 3 == 0 :
algo = (c)
soma = algo + c
print(soma)
escolha4 = ''
while escolha4 != 'sim' and escolha4 != 'nao':
escolha4 = str(input('você deseja executar novamente [sim/nao]?')).lower()
if escolha4 == 'sim':
import jogo_do_tio_Dodo
if escolha4 == 'nao':
print('obrigado por ultilizar nossos serviços')
break
k | 31.266667 | 84 | 0.626866 |
acf1e5caf26d3cb7c50249310275af83c332f8d4 | 3,388 | py | Python | authlib/little_auth/migrations/0001_initial.py | matthiask/django-authlib | 6589316159bb0c2dace29d35334e4f930353eb88 | [
"MIT"
] | 35 | 2017-05-17T14:47:37.000Z | 2022-01-20T10:58:43.000Z | authlib/little_auth/migrations/0001_initial.py | matthiask/django-authlib | 6589316159bb0c2dace29d35334e4f930353eb88 | [
"MIT"
] | 7 | 2017-01-13T15:41:49.000Z | 2021-12-16T14:54:47.000Z | authlib/little_auth/migrations/0001_initial.py | matthiask/django-authlib | 6589316159bb0c2dace29d35334e4f930353eb88 | [
"MIT"
] | 9 | 2017-12-26T19:50:43.000Z | 2021-12-15T18:33:58.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-11 07:09
from __future__ import unicode_literals
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [("auth", "__first__")]
operations = [
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"email",
models.EmailField(
max_length=254, unique=True, verbose_name="email"
),
),
(
"is_active",
models.BooleanField(default=True, verbose_name="is active"),
),
(
"is_staff",
models.BooleanField(default=False, verbose_name="is staff"),
),
(
"date_joined",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="date joined"
),
),
(
"full_name",
models.CharField(max_length=200, verbose_name="full name"),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"abstract": False,
"verbose_name_plural": "users",
"ordering": ["full_name"],
"verbose_name": "user",
},
)
]
| 34.571429 | 134 | 0.399941 |
acf1e627ab053dbe64cf7488204f336f9c80dc72 | 7,292 | py | Python | plugin/core/protocol.py | scriptis/LSP | 4844d70ee51a107ba768569127149a7fbe1fa2f9 | [
"MIT"
] | null | null | null | plugin/core/protocol.py | scriptis/LSP | 4844d70ee51a107ba768569127149a7fbe1fa2f9 | [
"MIT"
] | null | null | null | plugin/core/protocol.py | scriptis/LSP | 4844d70ee51a107ba768569127149a7fbe1fa2f9 | [
"MIT"
] | null | null | null | try:
from typing import Any, List, Dict, Tuple, Callable, Optional
assert Any and List and Dict and Tuple and Callable and Optional
except ImportError:
pass
TextDocumentSyncKindNone = 0
TextDocumentSyncKindFull = 1
TextDocumentSyncKindIncremental = 2
class DiagnosticSeverity(object):
Error = 1
Warning = 2
Information = 3
Hint = 4
class SymbolKind(object):
File = 1
Module = 2
Namespace = 3
Package = 4
Class = 5
Method = 6
Property = 7
Field = 8
Constructor = 9
Enum = 10
Interface = 11
Function = 12
Variable = 13
Constant = 14
String = 15
Number = 16
Boolean = 17
Array = 18
class CompletionItemKind(object):
Text = 1
Method = 2
Function = 3
Constructor = 4
Field = 5
Variable = 6
Class = 7
Interface = 8
Module = 9
Property = 10
Unit = 11
Value = 12
Enum = 13
Keyword = 14
Snippet = 15
Color = 16
File = 17
Reference = 18
Folder = 19
EnumMember = 20
Constant = 21
Struct = 22
Event = 23
Operator = 24
TypeParameter = 25
class DocumentHighlightKind(object):
Unknown = 0
Text = 1
Read = 2
Write = 3
class Request:
def __init__(self, method: str, params: 'Optional[dict]') -> None:
self.method = method
self.params = params
self.jsonrpc = "2.0"
@classmethod
def initialize(cls, params: dict) -> 'Request':
return Request("initialize", params)
@classmethod
def hover(cls, params: dict) -> 'Request':
return Request("textDocument/hover", params)
@classmethod
def complete(cls, params: dict) -> 'Request':
return Request("textDocument/completion", params)
@classmethod
def signatureHelp(cls, params: dict) -> 'Request':
return Request("textDocument/signatureHelp", params)
@classmethod
def references(cls, params: dict) -> 'Request':
return Request("textDocument/references", params)
@classmethod
def definition(cls, params: dict) -> 'Request':
return Request("textDocument/definition", params)
@classmethod
def rename(cls, params: dict) -> 'Request':
return Request("textDocument/rename", params)
@classmethod
def codeAction(cls, params: dict) -> 'Request':
return Request("textDocument/codeAction", params)
@classmethod
def executeCommand(cls, params: dict) -> 'Request':
return Request("workspace/executeCommand", params)
@classmethod
def formatting(cls, params: dict) -> 'Request':
return Request("textDocument/formatting", params)
@classmethod
def rangeFormatting(cls, params: dict) -> 'Request':
return Request("textDocument/rangeFormatting", params)
@classmethod
def documentSymbols(cls, params: dict) -> 'Request':
return Request("textDocument/documentSymbol", params)
@classmethod
def documentHighlight(cls, params: dict) -> 'Request':
return Request("textDocument/documentHighlight", params)
@classmethod
def resolveCompletionItem(cls, params: dict) -> 'Request':
return Request("completionItem/resolve", params)
@classmethod
def shutdown(cls) -> 'Request':
return Request("shutdown", None)
def __repr__(self) -> str:
return self.method + " " + str(self.params)
def to_payload(self, id) -> 'Dict[str, Any]':
r = {
"jsonrpc": "2.0",
"id": id,
"method": self.method
} # type: Dict[str, Any]
if self.params is not None:
r["params"] = self.params
return r
class Response:
def __init__(self, request_id: int, result: 'Dict[str, Any]') -> None:
self.request_id = request_id
self.result = result
self.jsonrpc = "2.0"
def to_payload(self) -> 'Dict[str, Any]':
r = {
"id": self.request_id,
"jsonrpc": self.jsonrpc,
"result": self.result
}
return r
class Notification:
def __init__(self, method: str, params: dict = {}) -> None:
self.method = method
self.params = params
self.jsonrpc = "2.0"
@classmethod
def initialized(cls) -> 'Notification':
return Notification("initialized")
@classmethod
def didOpen(cls, params: dict) -> 'Notification':
return Notification("textDocument/didOpen", params)
@classmethod
def didChange(cls, params: dict) -> 'Notification':
return Notification("textDocument/didChange", params)
@classmethod
def didSave(cls, params: dict) -> 'Notification':
return Notification("textDocument/didSave", params)
@classmethod
def didClose(cls, params: dict) -> 'Notification':
return Notification("textDocument/didClose", params)
@classmethod
def didChangeConfiguration(cls, params: dict) -> 'Notification':
return Notification("workspace/didChangeConfiguration", params)
@classmethod
def exit(cls) -> 'Notification':
return Notification("exit")
def __repr__(self) -> str:
return self.method + " " + str(self.params)
def to_payload(self) -> 'Dict[str, Any]':
r = {
"jsonrpc": "2.0",
"method": self.method
} # type: Dict[str, Any]
if self.params is not None:
r["params"] = self.params
else:
r["params"] = dict()
return r
class Point(object):
def __init__(self, row: int, col: int) -> None:
self.row = int(row)
self.col = int(col)
def __repr__(self) -> str:
return "{}:{}".format(self.row, self.col)
@classmethod
def from_lsp(cls, point: dict) -> 'Point':
return Point(point['line'], point['character'])
def to_lsp(self) -> 'Dict[str, Any]':
return {
"line": self.row,
"character": self.col
}
class Range(object):
def __init__(self, start: Point, end: Point) -> None:
self.start = start
self.end = end
def __repr__(self) -> str:
return "({} {})".format(self.start, self.end)
@classmethod
def from_lsp(cls, range: dict) -> 'Range':
return Range(Point.from_lsp(range['start']), Point.from_lsp(range['end']))
def to_lsp(self) -> 'Dict[str, Any]':
return {
'start': self.start.to_lsp(),
'end': self.end.to_lsp()
}
class Diagnostic(object):
def __init__(self, message: str, range: Range, severity: int,
source: 'Optional[str]', lsp_diagnostic: dict) -> None:
self.message = message
self.range = range
self.severity = severity
self.source = source
self._lsp_diagnostic = lsp_diagnostic
@classmethod
def from_lsp(cls, lsp_diagnostic: dict) -> 'Diagnostic':
return Diagnostic(
# crucial keys
lsp_diagnostic['message'],
Range.from_lsp(lsp_diagnostic['range']),
# optional keys
lsp_diagnostic.get('severity', DiagnosticSeverity.Error),
lsp_diagnostic.get('source'),
lsp_diagnostic
)
def to_lsp(self) -> 'Dict[str, Any]':
return self._lsp_diagnostic
| 25.950178 | 82 | 0.597778 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.