content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/python3
#Read file $1 and convert to $2
from tabulate import tabulate
from sys import argv
data = []
f1 = open(argv[1], "r")
for line in f1:
nw = line.split()
if len(nw) != 4:
continue
data.append(nw)
f1.close()
rst = tabulate(data[1:], headers=data[0],tablefmt='rst')
f2 = open(argv[2], "w")
f2.write("Statement Coverage of pysumo and pySUMOQt\n======================================================\n")
f2.write(rst)
f2.write("\n")
f2.close()
|
from lpd.enums import Phase, State
from lpd.callbacks.callback_base import CallbackBase
from typing import Union, List, Optional
class LossOptimizerHandlerBase(CallbackBase):
"""
In case LossOptimizerHandler does not suitable for your needs, create your custom
callback and derive from this class, and implement __call__ .
There you have full control for handling loss and optimizer
Args:
apply_on_phase - see in CallbackBase
apply_on_states - see in CallbackBase
round_values_on_print_to - see in CallbackBase
"""
def __init__(self, apply_on_phase: Phase,
apply_on_states: Union[State, List[State]],
round_values_on_print_to: Optional[int]=None):
super(LossOptimizerHandlerBase, self).__init__(apply_on_phase, apply_on_states, round_values_on_print_to)
|
import math
import time
from utils.defines import *
from utils.interface import *
kb = KeyBoard(r'/dev/hidg0')
mouse = Mouse(r'/dev/hidg1')
def makeCircle(r):
points = []
for i in range(r):
x = r * math.cos(i * 2 * math.pi / r)
y = r * math.sin(i * 2 * math.pi / r)
points.append((int(x), int(y)))
offsets = []
for i in range(len(points)-1):
x = points[i+1][0] - points[i][0]
y = points[i+1][1] - points[i][1]
offsets.append((x, y))
return offsets
mouse.btn_press(MOUSE_BTN_LEFT)
mouse.btn_release(MOUSE_BTN_LEFT)
time.sleep(0.5)
for (x,y) in makeCircle(200):
mouse.move(x=x,y=y)
time.sleep(0.01)
for i in range(10):
mouse.wheel_move(wh=-1)
time.sleep(0.5)
for key in [KEY_A,KEY_B,KEY_C,KEY_D]:
kb.key_press(key)
time.sleep(0.1)
kb.key_release(key)
time.sleep(0.1)
for key in [KEY_LEFT_CTRL,
KEY_LEFT_SHIFT,
KEY_LEFT_ALT,
KEY_RIGHT_CTRL,
KEY_RIGHT_SHIFT,
KEY_RIGHT_ALT,
]:
kb.key_press(key)
time.sleep(0.1)
kb.key_release(key)
time.sleep(0.5)
|
from flask import Flask, render_template
from flask_socketio import SocketIO
from config import app_config
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# app.config['SECRET_KEY'] = 'secret!'
app.config.from_pyfile('config.py', silent=True)
app.config.from_object(app_config['development'])
socketio = SocketIO(app)
db = SQLAlchemy()
db.init_app(app)
@socketio.on('new photo')
def handle_new_photo(data):
pass
@app.route('/')
def index():
return 'INDEX'
@app.route('/images/<int: index>')
def fetch_image(index):
pass
|
"""
File: boggle.py
Name: Jeffrey Lin 2020/12
----------------------------------------
This program aim to find words that can be constructed from sequentially
adjacent letters from the 4×4 grid. “Adjacent” letters are those horizontally, vertically,
and diagonally neighbouring. Words must be at least four letters long, but may not use the same letter.
"""
# This is the file name of the dictionary txt file
# we will be checking if a word exists by searching through it
FILE = 'dictionary.txt'
# Global Variable
dictionary = []
d = {}
row_list = []
num = 0
check = 0
ans_list = []
def main():
"""
find words that can be constructed from sequentially adjacent letters
from the 4×4 grid. Words must be at least four letters long, but may not use the same letter.
"""
global d, num
read_dictionary()
input_boggle_letter()
if check == 0:
current_index = []
for row, col in d:
current_index.append((row, col))
find_word(row, col, d, d[(row, col)], current_index)
current_index.pop()
print(f'There are {num} words in total.')
def input_boggle_letter():
global row_list, check
for i in range(4):
row = input(str(i+1)+' row of letters: ').lower()
if len(row) == 7:
row_list = row.split()
if len(row_list) == 4:
for j in range(len(row_list)):
d[(i, j)] = row_list[j]
else:
print('Illegal Input')
check += 1
break
def find_word(row, col, d, answer, current_index):
"""
:param row: int, the number of row
:param col: int, the number of column
:param d: dict{(int,int):str(row_character)}
:param answer: the current answer string
:param current_index: list[(int,int)] index list has be found
"""
global dictionary, num, ans_list
if len(answer) >= 4:
if answer in dictionary and answer not in ans_list:
print(f'Found: "{answer}" ')
ans_list.append(answer)
num += 1
if has_prefix(answer):
for i in range(-1, 2, 1):
for j in range(-1, 2, 1):
if 0 <= row + i < 4 and 0 <= col + j < 4:
if (row+i, col+j) not in current_index:
# choose
current_index.append((row+i, col+j))
find_word(row+i, col+j, d, answer+d[(row+i, col+j)], current_index)
# un-choose
current_index.pop()
def read_dictionary():
"""
This function reads file "dictionary.txt" stored in FILE
and appends words in each line into a Python list
"""
global dictionary
with open(FILE, 'r') as f:
for line in f:
word = line.strip()
dictionary.append(word)
def has_prefix(sub_s):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
global dictionary
for voc in dictionary:
if voc.startswith(sub_s):
return True
return False
if __name__ == '__main__':
main()
|
from exponent_server_sdk import (
DeviceNotRegisteredError,
PushClient,
PushMessage,
# PushResponseError,
)
from app.services.user import UserService
class Notification:
def __init__(self, user_svc: UserService) -> None:
self.user_svc = user_svc
# Basic arguments. You should extend this function with the push features you
# want to use, or simply pass in a `PushMessage` object.
def send_push_message(token, message, extra=None):
response = PushClient().publish(PushMessage(to=token, body=message, data=extra))
try:
# We got a response back, but we don't know whether it's an error yet.
# This call raises errors so we can handle them with normal exception
# flows.
response.validate_response()
except DeviceNotRegisteredError:
# self.user_svc.edit_user()
# Mark the push token as inactive
# PushToken.objects.filter(token=token).update(active=False)
print("device not registered")
except Exception as exc:
# Encountered some other per-notification error.
raise exc
|
import os
from os.path import join, dirname
from dotenv import load_dotenv # need to `pip install -U python-dotenv`
# Create .env file path.
dotenv_path = join(dirname(__file__), '.env')
print(dotenv_path)
# Load file from the path.
load_dotenv(dotenv_path)
# Accessing variables.
ACCOUNT_SID = os.getenv('TWILIO_ACCOUNT_SID')
AUTH_TOKEN = os.getenv('TWILIO_AUTH_TOKEN')
|
from joblib import load
import pandas as pd
import numpy as np
class models():
def __init__(self):
self.bmi_model = self.load_model(model_path='assets/bmi_model.joblib')
self.general_health_model = self.load_model(model_path='assets/gen_health_model.joblib')
self.model_status = 'Loading'
def load_model(self, model_path):
try:
model = load(model_path)
self.model_status = 'Models Ready'
print('Model Loaded Successfully')
return model
except FileNotFoundError:
print('Model not found. Please contact administrator.')
except NameError:
print('joblib Load Module not defined')
class data():
def __init__(self):
self.default_data= None
self.model_data = None
self.user_data = None
self.data_status = None
self.set_default_data() #'default' or 'user' for each variable
def get_data(self, data_name):
return np.array([list(self.model_data[data_name].values())])
def update_user_data(self, data_to_update):
pass
def get_model_names_if_key(self, data_to_check):
assert type(data_to_check) == dict
valid_keys = []
if list(data_to_check.keys())[0] in self.model_data['bmi_data']:
valid_keys.append('bmi_data')
if list(data_to_check.keys())[0] in self.model_data['general_health_data']:
valid_keys.append('general_health_data')
return valid_keys
def update_model_data(self, data_to_update):
assert type(data_to_update) == dict
model_keys = self.get_model_names_if_key(data_to_update)
for model_key in model_keys:
for model_var, new_val in data_to_update.items():
self.model_data[model_key][model_var] = new_val
def set_default_data(self, models='all'):
self.default_data = {}
default_bmi_data_path = 'assets/default_bmi_data.csv'
default_general_health_data_path = 'assets/default_gen_health_data.csv'
self.default_data['bmi_data'] = self.load_data(path=default_bmi_data_path)
self.default_data['general_health_data'] = self.load_data(path=default_general_health_data_path)
self.model_data = self.default_data
self.strip_target(self.model_data['bmi_data'], 'bmi')
self.strip_target(self.model_data['general_health_data'], 'general_health')
def strip_target(self, data, target):
try:
del data[target]
except:
print('Could not strip target from [{}]'.format(target))
def load_data(self, path):
df = pd.read_csv(path, header=None, names=['variable', 'median_value'])
default_dict = {}
for i in range(len(df)):
default_dict[df.iloc[i][0]] = df.iloc[i][1]
return default_dict
|
from numbers import Number
from typing import Iterable
class Skin:
def get_index(self, x: Number, y: Number):
raise NotImplementedError
def get_bounds(self):
raise NotImplementedError
class DefiniteSkin(Skin, object):
__slots__ = ("_skin",)
def __init__(self, skin: Iterable[Iterable[str]]):
self._skin = skin
self._skin.reverse()
def get_bounds(self):
return (range(len(self._skin[0])), range(len(self._skin)))
def get_index(self, x: Number, y: Number):
bounds = self.get_bounds()
if y not in bounds[1] or x not in bounds[0]:
return None
return self._skin[int(y)][int(x)]
|
import random
import hashlib
import os
import errno
import base64
from datetime import datetime
import calendar
import time
import flask
from flask import jsonify
from flask.ext.restful import Resource
from sqlalchemy import Column, String, DateTime, SmallInteger, ForeignKey, exc, Integer, not_, exists, BLOB, UnicodeText
from sqlalchemy.orm import *
from sqlalchemy.ext.hybrid import hybrid_property
import pytz
import database
from database import Base
from app import app
import authorized
import email_handling
import string_constants
from boto.s3.connection import S3Connection, Bucket, Key
class UserModel(Base):
__tablename__ = 'users'
user_id = Column(Integer, primary_key=True)
username = Column(String(app.config["COLUMN_MAX_LENGTH"]))
password = Column(String(app.config["COLUMN_MAX_LENGTH"]))
password_salt = Column(String(app.config["COLUMN_MAX_LENGTH"]))
profile_image = Column(String(app.config["COLUMN_MAX_LENGTH"]))
_display_name = Column('display_name', UnicodeText())
deactivated = Column(SmallInteger)
password_reset = Column(SmallInteger)
@hybrid_property
def display_name(self):
__display_name = self._display_name
if type(self) is UserModel:
__display_name = __display_name.decode('unicode_escape')
return __display_name
@display_name.setter
def display_name(self, value):
_value = value.encode('unicode_escape')
self._display_name = _value
def __init__(self, user_id, profile_image, username, password, password_salt, display_name, deactivated=False,
password_reset=False):
self.user_id = user_id
self.username = username
self.password = password
self.password_salt = password_salt
self.profile_image = profile_image
self.display_name = display_name
self.deactivated = deactivated
self.password_reset = password_reset
def __repr__(self):
return "<User('%s', '%s', '%s', '%s', '%s', '%i', '%i')>" % (self.user_id, self.profile_image, self.username, self.password, self.display_name, self.deactivated, self.password_reset)
class ActiveUser(Base):
__tablename__ = 'active_users'
user_id = Column(String(app.config["COLUMN_MAX_LENGTH"]), primary_key=True)
access_token = Column(String(app.config["COLUMN_MAX_LENGTH"]))
expiry_date = Column(DateTime)
active = Column(SmallInteger)
def __init__(self, user_id, access_token, expiry_date, active):
self.user_id = user_id
self.access_token = access_token
self.expiry_date = expiry_date
self.active = active
def __repr__(self):
return "<ActiveUser('%s', '%s', '%s', '%i')>" % (self.user_id, self.access_token, self.expiry_date, self.active)
class Connection(Base):
__tablename__ = 'connections'
connection_id = Column(String(app.config["COLUMN_MAX_LENGTH"]), primary_key=True)
user1 = Column(Integer, ForeignKey(UserModel.user_id))
user2 = Column(Integer, ForeignKey(UserModel.user_id))
user1_model = relationship('UserModel', foreign_keys='Connection.user1')
user2_model = relationship('UserModel', foreign_keys='Connection.user2')
start_date = Column(DateTime)
approved = Column(SmallInteger)
disabled = Column(SmallInteger)
def __init__(self, connection_id, user1, user2, start_date, approved):
self.connection_id = connection_id
self.user1 = user1
self.user2 = user2
self.start_date = start_date
self.approved = approved
self.disabled = 0
def __repr__(self):
return "<Connection('%s', '%i', '%i', '%s', '%i', '%i')>" % (self.connection_id, self.user1, self.user2, self.start_date, self.approved, self.disabled)
class Receipt(Base):
__tablename__ = 'receipts'
receipt_id = Column(Integer, primary_key=True, autoincrement=True)
receipt_data = Column(BLOB)
receipt_date = Column(Integer)
receipt_user_id = Column(Integer)
receipt_product_id = Column(String(app.config["COLUMN_MAX_LENGTH"]))
def __init__(self, receipt_data, receipt_date, receipt_user_id, receipt_product_id):
self.receipt_id = None
self.receipt_data = receipt_data
self.receipt_date = receipt_date
self.receipt_user_id = receipt_user_id
self.receipt_product_id = receipt_product_id
def __repr__(self):
return "<Receipt('%i', '%s', '%i', '%i', '%s')>" % (self.receipt_id, self.receipt_data, self.receipt_date, self.receipt_user_id, self.receipt_product_id)
class User(Resource):
# Creating a user
# Required Params:
# username - string (Email)
# display_name - string
# password - string
# profile_image - string
@app.route('/api/'+app.config["API_VERSION"]+'/user/', methods=["POST"])
def createUser():
req = flask.request.get_json()['params']
username = req['username']
display_name = req['display_name']
password = req['password']
profile_image = None
if "profile_image" in req:
profile_image = base64.b64decode(req['profile_image'])
if username is None or password is None or display_name is None:
return authorized.wrongParams()
session = database.DBSession()
userCheck = session.query(UserModel).filter(UserModel.username == username).first()
if userCheck is not None:
response = jsonify(message=string_constants.kServerUserAlreadyExistsError,
status=False,
HTTP_CODE=200
)
response.status_code = 200
return response
# Create User object
salt = hashlib.sha256(str(time.time() * random.randint(1, 9999999))).hexdigest()
password_hash = hashlib.sha256(password + salt).hexdigest()
new_User = UserModel(None, 'profileImage', username, password_hash, salt, display_name, False)
user_id = ""
try:
session.add(new_User)
session.commit()
user_id = session.query(UserModel.user_id).filter(UserModel.username == username).first()[0]
if app.config["AWS_S3"]:
if profile_image is not None:
aws_s3_connection = S3Connection(app.config['AWS_ACCESS_KEY'], app.config['AWS_SECRET_KEY'])
aws_s3_bucket = Bucket(aws_s3_connection, app.config['AWS_BUCKET_NAME'])
aws_s3_profile_image_key = Key(aws_s3_bucket)
aws_s3_profile_image_key.key = User.getProfileImage(user_id)
aws_s3_profile_image_key.content_type = app.config['AWS_KEY_CONTENT_TYPE']
aws_s3_profile_image_key.set_contents_from_string(profile_image, replace=True)
# Create Notification User object
from notification import RegisteredNotificationUserModel
notification_user = RegisteredNotificationUserModel(user_id, "")
session.add(notification_user)
session.commit()
except exc.SQLAlchemyError:
response = jsonify(message=string_constants.kServerUserCreationError,
status=False,
HTTP_CODE=404
)
response.status_code = 404
session.close()
return response
# Send welcome email to new user
email_handling.send_email(username, (string_constants.kWelcomeEmail % display_name.encode('unicode_escape')), string_constants.kWelcomeEmailSubject)
session.close()
response = jsonify(message=string_constants.kServerUserSignUpSuccess,
status=True,
HTTP_CODE=200
)
response.status_code = 200
return response
# Getting user information
# Required Params:
# user_id - string
# user - string
# access_token - string
@app.route('/api/'+app.config["API_VERSION"]+'/user/info', methods=["GET"])
def getUserInfo():
user_id = flask.request.args.get('user_id')
other_user_id = flask.request.args.get('user')
access_token = flask.request.args.get('access_token')
session = database.DBSession()
if user_id is None or access_token is None or other_user_id is None:
session.close()
return authorized.wrongParams()
# check if they are None and check the access_token against the active_users table
allowed = authorized.authorized(user_id, access_token, session)
if allowed is not True:
session.close()
return allowed
user = session.query(UserModel).filter(UserModel.user_id == other_user_id).first()
if user is None:
response = jsonify(message=string_constants.kServerUserNotFoundError,
status=False,
HTTP_CODE=200,
User=None
)
response.status_code = 200
session.close()
return response
else:
response = jsonify(message=string_constants.kServerUserInfoResponseSuccess,
status=True,
HTTP_CODE=200,
User={'user_id': user.user_id,
'profile_image': User.getProfileImage(user.user_id),
'connections': None,
'display_name': user.display_name,
'deactivated': user.deactivated
}
)
response.status_code = 200
session.close()
return response
# Deleting a user
# Required Params:
# user_id - string
# password - string
# access_token - string
@app.route('/api/'+app.config["API_VERSION"]+'/user/removeUser/', methods=["POST"])
def removeUser():
req = flask.request.get_json()['params']
user_id = req['user_id']
password = req['password']
access_token = req['access_token']
session = database.DBSession()
if user_id is None and access_token is None and password is None:
session.close()
return authorized.wrongParams()
allowed = authorized.authorized(user_id, access_token, session)
if allowed is not True:
session.close()
return allowed
user = session.query(UserModel).filter(UserModel.user_id == user_id).first()
if user.password != hashlib.sha256(password + user.password_salt).hexdigest():
response = jsonify(message=string_constants.kServerDeactivatedNotAuthorizedError,
status=False,
HTTP_CODE=200
)
response.status_code = 200
session.close()
return response
if user.deactivated is True:
response = jsonify(message=string_constants.kServerUserAlreadyDeactivatedError,
status=False,
HTTP_CODE=200
)
response.status_code = 200
session.close()
return response
#Deactivate the user's account
session.query(UserModel).filter(UserModel.user_id == user_id).update({'deactivated': 1}, synchronize_session='fetch')
#Remove users access token
active_user = session.query(ActiveUser).filter(ActiveUser.user_id == user_id).filter(
ActiveUser.access_token == access_token).first()
if active_user is not None:
session.delete(active_user)
#Remove user from registered notifications users
from notification import RegisteredNotificationUserModel
notification_user = session.query(RegisteredNotificationUserModel).filter(RegisteredNotificationUserModel.user_id == user_id).first()
if notification_user is not None:
session.delete(notification_user)
session.commit()
session.close()
response = jsonify(message=string_constants.kServerUserDeactivatedSuccess,
status=True,
HTTP_CODE=200
)
response.status_code = 200
return response
# Updating a user
# Required Params:
# user_id - string
# changes - dictionary <List of items to change>
# access_token - string
@app.route('/api/'+app.config["API_VERSION"]+'/user/updateUser/', methods=["POST"])
def updateUser():
req = flask.request.get_json()['params']
user_id = req['user_id']
access_token = req['access_token']
session = database.DBSession()
changes = {}
if 'changes' in req:
changes = req['changes']
if user_id is None or access_token is None:
session.close()
return authorized.wrongParams()
allowed = authorized.authorized(user_id, access_token, session)
if allowed is not True:
session.close()
return allowed
if (len(changes) == 0):
response = jsonify(message=string_constants.kServerUserUpdateNoInfoToUpdateError,
status=False,
HTTP_CODE=200
)
response.status_code = 200
session.close()
return response
# Loop through changes dictionary and extract the white listed keys that we allow the user to change
allowed_keys = ['display_name', 'password', 'username', 'profile_image']
allowed_changes = {}
for key in changes:
if key in allowed_keys:
if key == 'display_name':
changes[key] = changes[key].encode('unicode_escape')
allowed_changes.update({key: changes[key]})
if 'profile_image' in allowed_changes:
try:
if app.config["AWS_S3"]:
profile_image = base64.b64decode(allowed_changes['profile_image'])
aws_s3_connection = S3Connection(app.config['AWS_ACCESS_KEY'], app.config['AWS_SECRET_KEY'])
aws_s3_bucket = Bucket(aws_s3_connection, app.config['AWS_BUCKET_NAME'])
aws_s3_profile_image_key = Key(aws_s3_bucket)
aws_s3_profile_image_key.key = User.getProfileImage(user_id)
aws_s3_profile_image_key.content_type = app.config['AWS_KEY_CONTENT_TYPE']
aws_s3_profile_image_key.set_contents_from_string(profile_image, replace=True)
except:
pass
del allowed_changes['profile_image']
if 'password' in allowed_changes:
allowed_changes['password'] = allowed_changes['password']
salt = hashlib.sha256(str(time.time() * random.randint(1, 9999999))).hexdigest()
password_hash = hashlib.sha256(allowed_changes['password'] + salt).hexdigest()
allowed_changes['password'] = password_hash
allowed_changes['password_reset'] = False
allowed_changes['password_salt'] = salt
# Create an expiry date 7 days from today
expiry_date = datetime.today()
expiry_date = User.updateTokenExpiryDate(expiry_date)
try:
activeUser = session.query(ActiveUser).filter(ActiveUser.user_id == user_id).filter(
ActiveUser.access_token == access_token).first()
# Create a hash of the users information and save it as their access_token
access_token = hashlib.sha256(str(user_id) + password_hash + expiry_date.strftime(
string_constants.kDateFormatMinimalDate)).hexdigest()
activeUser.access_token = access_token
activeUser.expiry_date = expiry_date
activeUser.active = True
except exc.SQLAlchemyError:
response = jsonify(message=string_constants.kServerGeneric500Error,
status=False,
HTTP_CODE=500
)
response.status_code = 500
session.close()
return response
else:
activeUser = None
# Update each property of the user
if len(allowed_changes) > 0:
try:
session.query(UserModel).filter(UserModel.user_id == user_id). \
update(allowed_changes, synchronize_session='fetch')
session.commit()
except exc.SQLAlchemyError:
response = jsonify(message=string_constants.kServerGeneric500Error,
status=False,
HTTP_CODE=500
)
response.status_code = 500
session.close()
return response
# Get updated user to return
user = session.query(UserModel).filter(UserModel.user_id == user_id).first()
response = jsonify(message=string_constants.kServerUserInfoUpdatedSuccess,
status=True,
HTTP_CODE=200,
User={'username': user.username,
'user_id': user.user_id,
'connections': None,
'display_name': user.display_name,
'deactivated': user.deactivated,
'profile_image': User.getProfileImage(user_id)
},
access_token=activeUser is not None and activeUser.access_token or "",
expiry_date=activeUser is not None and activeUser.expiry_date.strftime(
string_constants.kDateFormatFullDate) or ""
)
response.status_code = 200
session.close()
return response
# Update Token for user
# Required Params:
# user_id - string
# access_token - string
@app.route('/api/'+app.config["API_VERSION"]+'/user/update_token', methods=["POST"])
def updateToken():
session = database.DBSession()
req = flask.request.get_json()['params']
user_id = req['user_id']
access_token = req['access_token']
if user_id is None and access_token is None:
return authorized.wrongParams()
activeUser = session.query(ActiveUser).filter(ActiveUser.user_id == user_id).filter(
ActiveUser.access_token == access_token).first()
if activeUser is None:
return authorized.notActiveResponse()
else:
user = session.query(UserModel).filter(UserModel.user_id == user_id).first()
if user is None:
response = jsonify(message=string_constants.kServerGeneric500Error,
status=False,
HTTP_CODE=500
)
response.status_code = 500
return response
else:
expiry_date = datetime.today()
expiry_date = User.updateTokenExpiryDate(expiry_date)
# Create a hash of the users information and save it as their access_token
try:
access_token = hashlib.sha256(str(user.user_id) + user.password + expiry_date.strftime(
string_constants.kDateFormatMinimalDate)).hexdigest()
activeUser.access_token = access_token
activeUser.expiry_date = expiry_date
activeUser.active = True
session.commit()
except exc.SQLAlchemyError:
response = jsonify(message=string_constants.kServerGeneric500Error,
status=False,
HTTP_CODE=500
)
response.status_code = 500
session.close()
return response
response = jsonify(message=string_constants.kServerUserTokenUpdatedSuccess,
status=True,
HTTP_CODE=200,
access_token=access_token,
expiry_date=expiry_date
)
response.status_code = 200
return response
# Check username
# Required Params:
# username - string
@app.route('/api/'+app.config["API_VERSION"]+'/user/check_username', methods=["GET"])
def checkUsername():
session = database.DBSession()
req = flask.request.args
username = req['username']
username_check = session.query(UserModel).filter(UserModel.username == username).first()
response = jsonify(
message=username_check is None and string_constants.kServerUserUsernameAvailableSuccess or string_constants.kServerUserUsernameAvailableError,
status=username_check is None and True or False,
HTTP_CODE=200
)
response.status_code = 200
session.close()
return response
# Logout a user
# Required Params:
# user_id - string
# access_token = string
@app.route('/api/'+app.config["API_VERSION"]+'/user/logout/', methods=["POST"])
def logout():
req = flask.request.get_json()['params']
user_id = req['user_id']
access_token = req['access_token']
session = database.DBSession()
if user_id is None and access_token is None:
session.close()
return authorized.wrongParams()
active_user = session.query(ActiveUser).filter(ActiveUser.user_id == user_id).filter(
ActiveUser.access_token == access_token).first()
if active_user is not None:
session.delete(active_user)
session.commit()
response = jsonify(message=string_constants.kServerUserLoggedOutSuccess,
status=True,
HTTP_CODE=200
)
response.status_code = 200
session.close()
return response
else:
response = jsonify(message=string_constants.kServerUserLoggedOutError,
status=False,
HTTP_CODE=200
)
response.status_code = 200
session.close()
return response
# Login a user
# Required Params:
# username - string
# password - string (can be nil)
# access_token = string (can be nil)
@app.route('/api/'+app.config["API_VERSION"]+'/user/login/', methods=["POST"])
def login():
req = flask.request.get_json()['params']
username = req['username']
if req['password'] != '':
password = req['password']
else:
password = ''
access_token = None
if 'access_token' in req:
access_token = req['access_token']
session = database.DBSession()
if username is None:
session.close()
return authorized.wrongParams()
def loginWithAccess_tokenAndUsername(username, access_token):
user = checkUserWithNoPassword(username)
if user is not None:
access_check = checkActiveUser(user.user_id, access_token)
if access_check is True:
return setActiveUser(user)
else:
return access_check
else:
return incorrectUser()
def checkUserWithNoPassword(username):
user = session.query(UserModel).filter(UserModel.username == username).first()
if (user is None):
return None
else:
return user
def checkUser(username, password):
user = session.query(UserModel).filter(UserModel.username == username).first()
if user is None or user.password != hashlib.sha256(str(password) + user.password_salt).hexdigest():
return None
else:
return user
def checkActiveUser(user_id, access_token):
activeUser = session.query(ActiveUser).filter(ActiveUser.user_id == user_id).filter(access_token == ActiveUser.access_token).first()
if activeUser is None:
return incorrectUser()
else:
return True
def setActiveUser(user):
activeUser = session.query(ActiveUser).filter(ActiveUser.user_id == user.user_id).first()
expiry_date = None
if activeUser is None:
# User exists but isn't logged in already, add user to active user and return access_token
# Create an expiry date 7 days from today
expiry_date = datetime.today()
expiry_date = User.updateTokenExpiryDate(expiry_date)
# Create a hash of the users information and save it as their access_token
access_token = hashlib.sha256(str(user.user_id) + user.password + expiry_date.strftime(
string_constants.kDateFormatMinimalDate)).hexdigest()
activeUser = ActiveUser(str(user.user_id), access_token, expiry_date, True)
try:
session.add(activeUser)
session.commit()
except exc.SQLAlchemyError:
response = jsonify(message=string_constants.kServerGeneric500Error,
status=False,
HTTP_CODE=500
)
response.status_code = 500
session.close()
return response
try:
if user.deactivated == 1:
user.deactivated = 0
activeUser.expiry_date = User.updateTokenExpiryDate(activeUser.expiry_date)
session.commit()
except exc.SQLAlchemyError:
response = jsonify(message=string_constants.kServerGeneric500Error,
status=False,
HTTP_CODE=500
)
response.status_code = 500
session.close()
return response
return returnUserInfo(user, activeUser.access_token, activeUser.expiry_date)
def returnUserInfo(user, access_token, expiry_date):
UTC_tz = pytz.timezone('UTC')
expiry_date = UTC_tz.localize(expiry_date).astimezone(pytz.utc)
response = jsonify(message=string_constants.kServerUserLoggedInSuccess,
status=True,
HTTP_CODE=200,
User={'username': user.username,
'user_id': user.user_id,
'profile_image': User.getProfileImage(user.user_id),
'connections': None,
'display_name': user.display_name,
'deactivated': user.deactivated
},
access_token=access_token,
expiry_date=expiry_date.strftime(string_constants.kDateFormatFullDate)
)
response.status_code = 200
session.commit()
session.close()
return response
def incorrectUser():
response = jsonify(message=string_constants.kServerUserLoggedInError,
status=False,
HTTP_CODE=401,
User=None,
access_token=None,
expiry_date=None
)
response.status_code = 401
session.close()
return response
# Check if username/password match if not then use access token and username, if that fails then return 401
if ((username is not None and password is not None) or username is not '' and password is not '') and access_token is None or access_token is '':
user = checkUser(username, password)
if user is not None:
return setActiveUser(user)
else:
return incorrectUser()
elif (username is not None and access_token is not None) or (username is not '' and access_token is not ''):
return loginWithAccess_tokenAndUsername(username, access_token)
return incorrectUser()
# Create a connection between two users
# Required Params:
# user_id - string
# user2 - string
# access_token - string
@app.route('/api/'+app.config["API_VERSION"]+'/user/connection/', methods=["POST"])
def connection():
req = flask.request.get_json()['params']
user_id = req['user_id']
user2 = req['user2']
access_token = req['access_token']
session = database.DBSession()
if user_id is None and access_token is None and user2 is None:
return authorized.wrongParams()
# Check if the user is allowed to access this method
allowed = authorized.authorized(user_id, access_token, session)
if allowed is not True:
session.close()
return allowed
connection_check_query_a = session.query(Connection).filter(Connection.user1 == user_id).filter(
Connection.user2 != user_id).filter(Connection.user2 == user2).filter(Connection.user1 != user2)
connection_check_query_b = session.query(Connection).filter(Connection.user1 == user2).filter(
Connection.user1 != user_id).filter(Connection.user2 == user_id).filter(Connection.user2 != user2)
connection_check = connection_check_query_a.union(connection_check_query_b).first()
if connection_check is not None:
if connection_check.disabled == 0:
response = jsonify(message=string_constants.kServerUserConnectionRequestExistsError,
status=False,
HTTP_CODE=200
)
response.status_code = 200
session.close()
return response
try:
if connection_check is not None:
if connection_check.disabled == 1:
connection_check.disabled = 0
connection_check.user2 = (connection_check.user1 == int(user_id)) and connection_check.user2 or connection_check.user1
connection_check.user1 = int(user_id)
connection_id = connection_check.connection_id
else:
connection_id = hashlib.sha256(str(user_id) + str(user2)).hexdigest()
connection = Connection(connection_id, int(user_id), user2, datetime.utcnow(), False)
session.add(connection)
userDisplayName = session.query(UserModel.display_name).filter(UserModel.user_id == int(user_id)).first()
userDisplayName = userDisplayName[0]
# Add the notification for the connection request
from notification import NotificationModel, RegisteredNotificationUserModel
notification = NotificationModel(user_id, user2, {
string_constants.kServerNotificationsType: string_constants.kServerNotificationsTypeConnectionsRequest,
string_constants.kServerNotificationsUser_idKey: user_id,
string_constants.kServerNotificationsConnection_idKey: connection_id,
string_constants.kServerNotificationsUser_NameKey: userDisplayName
}, calendar.timegm(datetime.utcnow().timetuple()))
session.add(notification)
session.commit()
response = jsonify(message=string_constants.kServerUserConnectionRequestSentSuccess,
status=True,
HTTP_CODE=200
)
response.status_code = 200
session.close()
return response
except exc.SQLAlchemyError as e:
response = jsonify(message=string_constants.kServerUserConnectionRequestSentError,
status=False,
HTTP_CODE=200
)
response.status_code = 200
session.close()
return response
# Accept or decline connection request
# Required Params:
# user_id - string
# connection_id - string
# status - bool
# access_token - string
@app.route('/api/'+app.config["API_VERSION"]+'/user/connection_status_change', methods=["POST"])
def connection_status_change():
req = flask.request.get_json()['params']
user_id = req['user_id']
connection_id = req['connection_id']
status = req['status']
access_token = req['access_token']
session = database.DBSession()
if user_id is None and access_token is None and connection_id is None and status is not None:
return authorized.wrongParams()
# Check if the user is allowed to access this method
allowed = authorized.authorized(user_id, access_token, session)
if allowed is not True:
session.close()
return allowed
connection = session.query(Connection).filter(Connection.connection_id == connection_id).filter(Connection.user2 == int(user_id)).first()
if connection is not None:
from video import Timeline, Video
timeline_check = session.query(Timeline).filter(Timeline.connection_id == connection.connection_id).first()
if status is False or status == 0:
if timeline_check is None:
session.delete(connection)
session.commit()
session.close()
else:
try:
if timeline_check is None:
timeline_id = Video.createTimeline(session, user_id, connection_id)
else:
timeline_id = timeline_check.timeline_id
userDisplayName = connection.user2_model.display_name
# Add the notification for the connection request confirmation
from notification import NotificationModel, RegisteredNotificationUserModel
notification = NotificationModel(user_id, connection.user1, {
string_constants.kServerNotificationsType: string_constants.kServerNotificationsTypeConnectionsRequestConfirmation,
string_constants.kServerNotificationsUser_idKey: connection.user2,
string_constants.kServerNotificationsTimeline_idKey: timeline_id,
string_constants.kServerNotificationsUser_NameKey: userDisplayName
}, calendar.timegm(datetime.utcnow().timetuple()))
session.add(notification)
session.query(Connection).filter(Connection.connection_id == connection_id).filter(Connection.user2 == user_id).update({'approved': 1})
if timeline_id is not None:
session.commit()
session.close()
except exc.SQLAlchemyError as e:
response = jsonify(message=string_constants.kServerUserAcceptConnectionRequestError,
status=False,
HTTP_CODE=200
)
response.status_code = 200
session.close()
return response
response = jsonify(message=string_constants.kServerUserAcceptConnectionRequestSuccess,
status=True,
HTTP_CODE=200
)
response.status_code = 200
return response
else:
session.close()
response = jsonify(message=string_constants.kServerUserAcceptConnectionRequestError,
status=False,
HTTP_CODE=200
)
response.status_code = 200
return response
# Get a list of connections for a user
# Required Params:
# user_id - string
# access_token - string
@app.route('/api/'+app.config["API_VERSION"]+'/user/connections/', methods=["GET"])
def connections():
req = flask.request.args
user_id = req['user_id']
access_token = req['access_token']
session = database.DBSession()
if user_id is None and access_token is None:
return authorized.wrongParams()
# Check if the user is allowed to access this method
allowed = authorized.authorized(user_id, access_token, session)
if allowed is not True:
session.close()
return allowed
connection_list_query_a = session.query(Connection).filter(Connection.user1 == user_id).filter(
Connection.approved is True or Connection.approved == 1)
connection_list_query_b = session.query(Connection).filter(Connection.user2 == user_id).filter(
Connection.approved is True or Connection.approved == 1)
from notification import NotificationModel
notification_list = session.query(NotificationModel).filter(NotificationModel.notification_receiver_id == user_id).filter(NotificationModel.notification_sent == 1).all()
connection_list = connection_list_query_a.union(connection_list_query_b).all()
connection_id_list = []
for c in connection_list:
connection_id_list.append(c.connection_id)
from video import Timeline
timelines = None
if len(connection_id_list) > 0:
timelines = session.query(Timeline).filter(Timeline.connection_id.in_(connection_id_list)).all()
connection_list_r = {'connections': [], 'requests': []}
if connection_list is not None and len(connection_list) > 0:
for timeline in timelines:
for connection in connection_list:
if connection.connection_id == timeline.connection_id:
new_connection = False
video_count = 0
if notification_list is not None and len(notification_list) > 0:
for notification in notification_list:
if notification.notification_payload[string_constants.kServerNotificationsType] == string_constants.kServerNotificationsTypeNewVideo:
if notification.notification_payload[string_constants.kServerNotificationsTimeline_idKey] == timeline.timeline_id:
video_count += 1
if notification.notification_payload[string_constants.kServerNotificationsType] == string_constants.kServerNotificationsTypeConnectionsRequestConfirmation:
if notification.notification_payload[string_constants.kServerNotificationsTimeline_idKey] == timeline.timeline_id:
new_connection = True
connection_list_r['connections'].append(User.getConnectionModelForReturn(connection, user_id, timeline.timeline_id, video_count, new_connection))
if notification_list is not None and len(notification_list) > 0:
request_ids = []
for notification in notification_list:
if notification.notification_payload['NotificationType'] == string_constants.kServerNotificationsTypeConnectionsRequest:
request_ids.append(notification.notification_sender)
if len(request_ids) > 0:
friend_requests = session.query(Connection).filter(Connection.user1.in_(request_ids)).filter(Connection.approved == False or Connection.approved == 0).filter(Connection.user2 == int(user_id)).all()
for request_model in friend_requests:
connection_list_r['requests'].append({
'user': User.getSerializedUserModel(request_model.user1_model),
'connection_id': request_model.connection_id
})
session.close()
response = jsonify(message=string_constants.kServerUserConnectionListSuccess,
connections=connection_list_r['connections'],
requests=connection_list_r['requests'],
status=True,
HTTP_CODE=200
)
response.status_code = 200
return response
# Connections Profile with Timelines
# Required Params:
# user_id - string
# timeline_id - string
# access_token - string
@app.route('/api/'+app.config["API_VERSION"]+'/user/connection/timeline/', methods=["GET"])
def connectionFromTimeline():
from video import Timeline, Video
req = flask.request.args
user_id = int(req['user_id'])
timeline_id = req['timeline_id']
access_token = req['access_token']
session = database.DBSession()
if user_id is None and access_token is None and timeline_id is None:
return authorized.wrongParams()
# Check if the user is allowed to access this method
allowed = authorized.authorized(user_id, access_token, session)
if allowed is not True:
session.close()
return allowed
timeline = session.query(Timeline).filter(Timeline.timeline_id == timeline_id).first()
# _response = {
# "message": string_constants.kServerUserConnectionProfileSuccess,
# "status": True,
# "HTTP_CODE": 200
# }
# if connection is not None:
# _response["connection"] = connection
if timeline is not None:
connection = User.getConnectionModelForReturn(timeline.connection, user_id, timeline_id, timeline.video_count, False)
if connection is not None:
session.close()
response = jsonify(message=string_constants.kServerUserConnectionProfileSuccess,
status=True,
connection=connection,
HTTP_CODE=200
)
response.status_code = 200
return response
session.close()
response = jsonify(message=string_constants.kServerUserConnectionProfileError,
status=False,
HTTP_CODE=200
)
response.status_code = 200
return response
# Get a limited profile for a user
# Required Params:
# user_id - string
# user - string
# access_token - string
@app.route('/api/'+app.config["API_VERSION"]+'/user/remove_connection/', methods=["POST"])
def removeConnection():
req = flask.request.get_json()['params']
user_id = req['user_id']
connection_id = req['connection_id']
access_token = req['access_token']
session = database.DBSession()
if user_id is None and access_token is None and connection_id is None:
return authorized.wrongParams()
# Check if the user is allowed to access this method
allowed = authorized.authorized(user_id, access_token, session)
if allowed is not True:
session.close()
return allowed
connection_exists = session.query(exists().where(Connection.connection_id == connection_id)).scalar()
if connection_exists is not None:
try:
connection = session.query(Connection).filter(Connection.connection_id == connection_id).first()
connection.approved = 0
connection.disabled = 1
session.commit()
except exc.SQLAlchemyError as e:
session.close()
response = jsonify( message=string_constants.kServerGeneric500Error,
status=True,
HTTP_CODE=500
)
response.status_code = 500
return response
session.close()
response = jsonify( message=string_constants.kServerUserConnectionRemoveProfileSuccess,
status=True,
HTTP_CODE=200
)
response.status_code = 200
return response
else:
session.close()
response = jsonify( message=string_constants.kServerUserConnectionRemoveProfileFailure,
status=False,
HTTP_CODE=200
)
response.status_code = 200
return response
@app.route('/api/'+app.config["API_VERSION"]+'/user/limited/', methods=["GET"])
def getLimitedProfile():
import video
req = flask.request.args
user_id = req['user_id']
access_token = req['access_token']
user = req['user']
session = database.DBSession()
if user_id is None and access_token is None or user is None:
return authorized.wrongParams()
# Check if the user is allowed to access this method
allowed = authorized.authorized(user_id, access_token, session)
if allowed is not True:
session.close()
return allowed
limited_user = session.query(UserModel).filter(UserModel.user_id == user).first()
# Get the count of the users video list
limited_users_video_count = session.query(video.VideoModel).filter(video.VideoModel.user == user).count()
# Get the count of the users connection list
connection_list_query_a = session.query(Connection).filter(Connection.user1 == user).filter(
Connection.approved is True or Connection.approved == 1)
connection_list_query_b = session.query(Connection).filter(Connection.user2 == user).filter(
Connection.approved is True or Connection.approved == 1)
limited_users_connection_list_count = connection_list_query_a.union(connection_list_query_b).count()
if limited_user is not None:
session.close()
response = jsonify(message=string_constants.kServerUserLimitedProfileSuccess,
status=True,
HTTP_CODE=200,
user=User.getSerializedLimitedUserModel(limited_user, limited_users_video_count,
limited_users_connection_list_count)
)
response.status_code = 200
return response
else:
session.close()
response = jsonify(message=string_constants.kServerUserLimitedProfileError,
user=None,
status=False,
HTTP_CODE=200
)
response.status_code = 200
return response
# Search endpoint for finding users by display_name
# Required Params:
# user_id - string
# search_query - string
# access_token - string
@app.route('/api/'+app.config["API_VERSION"]+'/user/search/', methods=["GET"])
def searchUsers():
req = flask.request.args
user_id = req['user_id']
access_token = req['access_token']
search_query = req['search_query'].strip().replace("%", "").replace("_", "").replace("?", "").replace("*", "")
session = database.DBSession()
if user_id is None and access_token is None or search_query is None or search_query is None or search_query == '':
return authorized.wrongParams()
# Check if the user is allowed to access this method
allowed = authorized.authorized(user_id, access_token, session)
if allowed is not True:
session.close()
return allowed
connection_list_query_a = session.query(Connection.user2).filter(Connection.user1 == user_id).filter(
Connection.approved is True or Connection.approved == 1).filter(Connection.disabled == 0)
connection_list_query_b = session.query(Connection.user1).filter(Connection.user2 == user_id).filter(
Connection.approved is True or Connection.approved == 1).filter(Connection.disabled == 0)
connection_list_query_c = session.query(Connection.user1).filter(Connection.user1 != user_id).filter(Connection.user2 == user_id).filter(
Connection.approved is False or Connection.approved == 0).filter(Connection.disabled == 0)
connection_list = connection_list_query_a.union(connection_list_query_b).union(connection_list_query_c).all()
friends = []
for friend in connection_list:
friends.append(friend[0])
if len(friends) <= 0:
friends.append(-1)
filter_search = UserModel.display_name.startswith(search_query)
if len(search_query) > 1:
search_query = '%{0}%'.format(search_query)
filter_search = UserModel.display_name.ilike(search_query)
search_results = session.query(UserModel).filter(filter_search).filter(UserModel.deactivated == 0).filter(not_(UserModel.user_id.in_(friends))).filter(UserModel.user_id != user_id).all() #TODO maybe add pagination support
search_results_r = []
if search_results is not None:
for returned_user in search_results:
search_results_r.append(User.getSerializedLimitedUserModel(returned_user))
session.close()
response = jsonify(message=string_constants.kServerUserSearchResponse,
users=search_results_r,
status=True,
HTTP_CODE=200
)
response.status_code = 200
return response
# Allow user to select new passworduser_id
# Required Params:
# username - username
# token - string
# new_password - string
@app.route('/api/'+app.config["API_VERSION"]+'/user/reset_password/confirm', methods=["GET"])
def resetPasswordConfirmation():
req = flask.request.args
username = req['username']
token = req['token']
new_password = req['new_password']
device_token = req['device_token']
request_timestamp = req['request_timestamp']
version = app.config["API_VERSION"]
if username is None and token is None:
return authorized.wrongParams()
session = database.DBSession()
user = session.query(UserModel).filter(UserModel.username == username).first()
if new_password == '':
session.close()
return flask.render_template(string_constants.kResetPasswordTemplateName, username=username, token=token, confirm=False, date=datetime.utcnow().strftime('%Y'), device_token=device_token, request_timestamp=request_timestamp, version=version)
elif user is None:
session.close()
return flask.render_template(string_constants.kResetPasswordTemplateName, username=username, token=token, confirm=False,
confirm_message=string_constants.kResetPasswordError, date=datetime.utcnow().strftime('%Y'), device_token=device_token, request_timestamp=request_timestamp, version=version)
elif token != hashlib.sha256(user.username + str(0) + user.password_salt).hexdigest():
session.close()
return flask.render_template(string_constants.kResetPasswordTemplateName, username=username, token=token, confirm=False,
confirm_message=string_constants.kResetPasswordError, date=datetime.utcnow().strftime('%Y'), device_token=device_token, request_timestamp=request_timestamp, version=version)
else:
new_password = req['new_password']
reset_check = session.query(UserModel).filter(UserModel.username == username).first().password_reset
if user is not None and reset_check == 1:
if token == hashlib.sha256(user.username + str(0) + user.password_salt).hexdigest():
salt = hashlib.sha256(str(time.time() * random.randint(1, 9999999))).hexdigest()
password_hash = hashlib.sha256(str(new_password) + str(salt)).hexdigest()
session.query(UserModel).filter(UserModel.username == username).update({'password_reset': 0,
'password': password_hash,
'password_salt': salt},
synchronize_session='fetch')
active_user = session.query(ActiveUser).filter(ActiveUser.user_id == user.user_id).first()
if active_user is not None:
session.delete(active_user)
session.commit()
session.close()
return flask.render_template(string_constants.kResetPasswordTemplateName, confirm=True,
confirm_message=string_constants.kResetPasswordConfirmation, date=datetime.utcnow().strftime('%Y'), device_token=device_token, request_timestamp=request_timestamp, version=version)
else:
session.close()
return flask.render_template(string_constants.kResetPasswordTemplateName, username=username, token=token, confirm=False,
confirm_message=string_constants.kResetPasswordError, date=datetime.utcnow().strftime('%Y'), device_token=device_token, request_timestamp=request_timestamp, version=version)
# Reset password using email address and username
# Required Params:
# username - username
@app.route('/api/'+app.config["API_VERSION"]+'/user/reset_password/', methods=["POST"])
def resetPassword():
req = flask.request.get_json()['params']
username = req['username']
device_token = req["device_token"]
request_timestamp = req["request_timestamp"]
session = database.DBSession()
if username is None is None:
return authorized.wrongParams()
user = session.query(UserModel).filter(UserModel.username == username).first()
if user is None:
response = jsonify(message=string_constants.kServerUserPasswordResetUserDoesNotExistRequestError,
status=False,
HTTP_CODE=200
)
response.status_code = 200
return response
if user.password_reset == 1:
session.close()
response = jsonify(message=string_constants.kServerUserPasswordResetRequestAlreadyPresent,
status=False,
HTTP_CODE=200
)
response.status_code = 200
return response
else:
email_handling.send_email(username,
string_constants.kEmailResetResponseMessage % string_constants.kResetPasswordLink % (
app.config['API_ADDRESS'], username,
hashlib.sha256(username + str(user.password_reset) + user.password_salt).hexdigest(),
device_token,
request_timestamp),
string_constants.kResetPasswordSubject,
string_constants.kEmailTypeReset)
session.query(UserModel).filter(UserModel.username == username).update({'password_reset': 1},
synchronize_session='fetch')
session.commit()
session.close()
# temp putting password in response
response = jsonify(message=string_constants.kServerUserPasswordRequestSentSuccess,
status=True,
HTTP_CODE=200
)
response.status_code = 200
return response
# Creating a Receipt
# Required Params:
# user_id - string
# access_token - string
# receipt_timestamp - int
# receipt_data - string
# receipt_product_id - string
@app.route('/api/'+app.config["API_VERSION"]+'/receipts/', methods=["POST"])
def createReceipt():
req = flask.request.get_json()['params']
user_id = req['user_id']
access_token = req['access_token']
receipt_data = req['receiptData']
receipt_date = req['receiptDate']
receipt_product_id = req['receiptProductID']
session = database.DBSession()
if user_id is None and access_token is None and receipt_data is None and receipt_date is None and receipt_product_id is None:
return authorized.wrongParams()
# Check if the user is allowed to access this method
allowed = authorized.authorized(user_id, access_token, session)
if allowed is not True:
session.close()
return allowed
receipt = Receipt(receipt_data, receipt_date, user_id, receipt_product_id)
try:
session.add(receipt)
session.commit()
except exc.SQLAlchemyError:
session.close()
response = jsonify( message=string_constants.kServerGeneric500Error,
status=True,
HTTP_CODE=500
)
response.status_code = 500
return response
session.close()
response = jsonify( message=string_constants.kServerReceiptsPostResponse,
status=True,
HTTP_CODE=200
)
response.status_code = 200
return response
# Receipts for User
# Required Params:
# user_id - string
# access_token - string
@app.route('/api/'+app.config["API_VERSION"]+'/receipts/retrieve', methods=["GET"])
def receiptsForUser():
req = flask.request.args
user_id = req['user_id']
access_token = req['access_token']
session = database.DBSession()
if user_id is None and access_token is None:
return authorized.wrongParams()
# Check if the user is allowed to access this method
allowed = authorized.authorized(user_id, access_token, session)
if allowed is not True:
session.close()
return allowed
receipts = session.query(Receipt).filter(Receipt.receipt_user_id == int(user_id)).all()
receipts_return = []
if len(receipts) > 0:
for receipt in receipts:
receipts_return.append({
'receipt_product_id': receipt.receipt_product_id,
'receipt_date': receipt.receipt_date,
'receipt_data': receipt.receipt_data
})
session.close()
response = jsonify(message=string_constants.kServerReceiptsResponse,
receipts=receipts_return,
status=True,
HTTP_CODE=200
)
response.status_code = 200
return response
# Get limited userModel ready for JSON for user
# Required Params:
# user - UserModel
@classmethod
def getSerializedLimitedUserModel(cls, user, videoCount=None, connectionCount=None):
return {'user_id': user.user_id, # **
'profile_image': User.getProfileImage(user.user_id),
'connections': None, # *
'display_name': user.display_name,
'deactivated': user.deactivated, # *
'video_count': videoCount, # *
'connection_count': connectionCount # *
# * = #No need for this as this is a limited profile
# ** = Not shown in search results but used to grab full profile if the user searching clicks on this user
}
@classmethod
def getConnectionModelForReturn(cls, connection, username, timeline_id, video_count, new_connection):
if connection.approved == 0 or connection.disabled == 1:
return None
user_model = int(connection.user1) != int(username) and connection.user1_model or connection.user2_model
connection_r = {
'friend': User.getSerializedUserModel(user_model),
'connection_id': connection.connection_id,
'start_date': connection.start_date,
'timeline_id': timeline_id,
'video_count': video_count,
'new_connection': new_connection
}
return connection_r
# Get userModel ready for JSON for user
# Required Params:
# user - UserModel
@classmethod
def getSerializedUserModel(cls, user):
return {'user_id': user.user_id,
'profile_image': User.getProfileImage(user.user_id),
'connections': None, # *
'display_name': user.display_name,
'deactivated': user.deactivated
# * = #No need as this is only ever called from the connection endpoint
}
# Get userModel for the username
# Required Params:
# username - string
# session - DBSession
@classmethod
def userObjectForUsername(cls, user_id, session, forJSON):
if user_id is not None or user_id == '':
user = session.query(UserModel).filter(UserModel.user_id == user_id).first()
if user is not None:
if forJSON is True:
return User.getSerializedUserModel(user)
else:
return user
else:
return None
else:
return None
@classmethod
def getUserImagePath(cls):
return app.config["STATIC_FILES_FOLDER"] + "%s"
@classmethod
def getUserPath(cls):
return app.config["STATIC_FILES_FOLDER"] + "/users/%s/"
# Get profile picture for user
# Required Params:
# profile_image_id - string
# user_id - string
@classmethod
def getProfileImage(cls, user_id):
path = User.getProfileImageDirectory(user_id)+'profileImage'
return path + '.jpg'
@classmethod
def getProfileImageDirectory(cls, user_id):
path = app.config["STATIC_FILES_FOLDER"] + '/users/' + str(user_id) + '/profileImage/'
return path
# Updates the Expiry Date of an access_token to be 7 days
# If the user doesn't use the app at least once every 7 days they will have to re-login
@classmethod
def updateTokenExpiryDate(cls, expiryDate):
belowNextMonth = True
if expiryDate.month == 2:
if expiryDate.day + 7 > 28:
belowNextMonth = False
expiryDate = expiryDate.replace(month=3, day=1)
if expiryDate.month == 4 or expiryDate.month == 6 or expiryDate.month == 9 or expiryDate.month == 11:
if expiryDate.day + 7 > 30:
belowNextMonth = False
expiryDate = expiryDate.replace(month=expiryDate.month + 1, day=1)
if expiryDate.month == 1 or expiryDate.month == 3 or expiryDate.month == 5 or expiryDate.month == 7 or expiryDate.month == 8 or expiryDate.month == 10 or expiryDate.month == 12:
if expiryDate.day + 7 > 31:
if expiryDate.month == 12:
belowNextMonth = False
expiryDate = expiryDate.replace(year=expiryDate.year + 1, month=1, day=1)
else:
belowNextMonth = False
expiryDate = expiryDate.replace(month=expiryDate.month + 1, day=1)
if belowNextMonth == True:
expiryDate = expiryDate.replace(day=expiryDate.day + 7)
return expiryDate
|
from onegov.election_day import ElectionDayApp
from onegov.election_day.layouts import ManageLayout
from onegov.election_day.models import Principal
@ElectionDayApp.manage_html(
model=Principal,
name='provoke_error',
template='manage/provoke_error.pt'
)
def view_provoke_error(self, request):
""" Provokes a JavaScript Error for testing.
This view is not linked anywhere.
"""
return {
'layout': ManageLayout(self, request)
}
|
from abc import ABC, abstractmethod
class Motorcycle(ABC):
@abstractmethod
def useful_function_b(self) -> str:
pass
|
"""
In order to facilitate data exchange between nodes written in potentially
different languages we use these intermediate data types to encode
to/from the data types used inside of the nodes.
This submodule contains the following user-facing parts:
- Primitive data types:
:py:class:`Bool`
:py:class:`Char`
:py:class:`Int`
:py:class:`UInt`
:py:class:`Float`
:py:class:`Complex`
- Compound data types:
:py:class:`Array`
:py:class:`Str`
:py:class:`Tuple`
:py:class:`Record`
- Special data types:
:py:class:`Top`
:py:class:`Raw`
:py:class:`Union`
:py:class:`Size`
:py:class:`Optional`
:py:class:`Void`
- Auxiliary functions:
:py:func:`as_delta_type`
:py:func:`delta_type`
For convenience the classes and functions listed above are also exposed to
the top level module and can be used like this:
.. code-block:: python
import deltalanguage as dl
print(dl.Bool())
print(dl.delta_type(5))
More information about this submodule as well as examples of use can be
found in `Deltaflow Data Types <tutorials/data_types.html>`_.
"""
from ._delta_types import (BaseDeltaType,
PrimitiveDeltaType,
CompoundDeltaType,
Top,
Bool,
Char,
Complex,
Float,
Int,
Array,
Optional,
Raw,
Record,
Str,
Tuple,
UInt,
Union,
as_delta_type,
delta_type)
from ._exceptions import DeltaIOError, DeltaTypeError
from ._special import Size, Void
# user-facing classes
__all__ = ["Top",
"Bool",
"Char",
"Int",
"UInt",
"Float",
"Complex",
"Array",
"Str",
"Tuple",
"Record",
"Raw",
"Top",
"Size",
"Union",
"Optional",
"Void",
"as_delta_type",
"delta_type"]
|
class Solution:
def subarrayBitwiseORs(self, A):
"""
:type A: List[int]
:rtype: int
"""
res, cur = set(), set()
for x in A:
cur = {x | y for y in cur} | {x}
res |= cur
return len(res)
|
from .main import parent_child
|
#
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Metadata:
"""
Default metadata for an object.
Attributes:
- object_id
- object_name
- object_reference
- object_reference_versioned
- type_string
- save_date
- version
- saved_by
- workspace_id
- workspace_name
- object_checksum
- object_size
- object_metadata
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'object_id', None, None, ), # 1
(2, TType.STRING, 'object_name', None, None, ), # 2
(3, TType.STRING, 'object_reference', None, None, ), # 3
(4, TType.STRING, 'object_reference_versioned', None, None, ), # 4
(5, TType.STRING, 'type_string', None, None, ), # 5
(6, TType.STRING, 'save_date', None, None, ), # 6
(7, TType.STRING, 'version', None, None, ), # 7
(8, TType.STRING, 'saved_by', None, None, ), # 8
(9, TType.I64, 'workspace_id', None, None, ), # 9
(10, TType.STRING, 'workspace_name', None, None, ), # 10
(11, TType.STRING, 'object_checksum', None, None, ), # 11
(12, TType.I64, 'object_size', None, None, ), # 12
(13, TType.STRING, 'object_metadata', None, None, ), # 13
)
def __init__(self, object_id=None, object_name=None, object_reference=None, object_reference_versioned=None, type_string=None, save_date=None, version=None, saved_by=None, workspace_id=None, workspace_name=None, object_checksum=None, object_size=None, object_metadata=None,):
self.object_id = object_id
self.object_name = object_name
self.object_reference = object_reference
self.object_reference_versioned = object_reference_versioned
self.type_string = type_string
self.save_date = save_date
self.version = version
self.saved_by = saved_by
self.workspace_id = workspace_id
self.workspace_name = workspace_name
self.object_checksum = object_checksum
self.object_size = object_size
self.object_metadata = object_metadata
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.object_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.object_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.object_reference = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.object_reference_versioned = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.type_string = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.save_date = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.version = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.saved_by = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I64:
self.workspace_id = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.STRING:
self.workspace_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRING:
self.object_checksum = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.I64:
self.object_size = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.STRING:
self.object_metadata = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Metadata')
if self.object_id is not None:
oprot.writeFieldBegin('object_id', TType.STRING, 1)
oprot.writeString(self.object_id)
oprot.writeFieldEnd()
if self.object_name is not None:
oprot.writeFieldBegin('object_name', TType.STRING, 2)
oprot.writeString(self.object_name)
oprot.writeFieldEnd()
if self.object_reference is not None:
oprot.writeFieldBegin('object_reference', TType.STRING, 3)
oprot.writeString(self.object_reference)
oprot.writeFieldEnd()
if self.object_reference_versioned is not None:
oprot.writeFieldBegin('object_reference_versioned', TType.STRING, 4)
oprot.writeString(self.object_reference_versioned)
oprot.writeFieldEnd()
if self.type_string is not None:
oprot.writeFieldBegin('type_string', TType.STRING, 5)
oprot.writeString(self.type_string)
oprot.writeFieldEnd()
if self.save_date is not None:
oprot.writeFieldBegin('save_date', TType.STRING, 6)
oprot.writeString(self.save_date)
oprot.writeFieldEnd()
if self.version is not None:
oprot.writeFieldBegin('version', TType.STRING, 7)
oprot.writeString(self.version)
oprot.writeFieldEnd()
if self.saved_by is not None:
oprot.writeFieldBegin('saved_by', TType.STRING, 8)
oprot.writeString(self.saved_by)
oprot.writeFieldEnd()
if self.workspace_id is not None:
oprot.writeFieldBegin('workspace_id', TType.I64, 9)
oprot.writeI64(self.workspace_id)
oprot.writeFieldEnd()
if self.workspace_name is not None:
oprot.writeFieldBegin('workspace_name', TType.STRING, 10)
oprot.writeString(self.workspace_name)
oprot.writeFieldEnd()
if self.object_checksum is not None:
oprot.writeFieldBegin('object_checksum', TType.STRING, 11)
oprot.writeString(self.object_checksum)
oprot.writeFieldEnd()
if self.object_size is not None:
oprot.writeFieldBegin('object_size', TType.I64, 12)
oprot.writeI64(self.object_size)
oprot.writeFieldEnd()
if self.object_metadata is not None:
oprot.writeFieldBegin('object_metadata', TType.STRING, 13)
oprot.writeString(self.object_metadata)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.object_id)
value = (value * 31) ^ hash(self.object_name)
value = (value * 31) ^ hash(self.object_reference)
value = (value * 31) ^ hash(self.object_reference_versioned)
value = (value * 31) ^ hash(self.type_string)
value = (value * 31) ^ hash(self.save_date)
value = (value * 31) ^ hash(self.version)
value = (value * 31) ^ hash(self.saved_by)
value = (value * 31) ^ hash(self.workspace_id)
value = (value * 31) ^ hash(self.workspace_name)
value = (value * 31) ^ hash(self.object_checksum)
value = (value * 31) ^ hash(self.object_size)
value = (value * 31) ^ hash(self.object_metadata)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class History:
"""
Object history.
thrift_spec = (
None, # 0
(1, TType.STRING, 'object_id', None, None, ), # 1
(2, TType.STRING, 'object_name', None, None, ), # 2
(3, TType.STRING, 'object_reference', None, None, ), # 3
(4, TType.STRING, 'object_reference_versioned', None, None, ), # 4
(5, TType.STRING, 'type_string', None, None, ), # 5
(6, TType.STRING, 'save_date', None, None, ), # 6
(7, TType.STRING, 'version', None, None, ), # 7
(8, TType.STRING, 'saved_by', None, None, ), # 8
(9, TType.I64, 'workspace_id', None, None, ), # 9
(10, TType.STRING, 'workspace_name', None, None, ), # 10
(11, TType.STRING, 'object_checksum', None, None, ), # 11
(12, TType.I64, 'object_size', None, None, ), # 12
(13, TType.STRING, 'object_metadata', None, None, ), # 13
)
Attributes:
- events
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'events', (TType.STRING,None), None, ), # 1
)
def __init__(self, events=None,):
self.events = events
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.events = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = iprot.readString();
self.events.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('History')
if self.events is not None:
oprot.writeFieldBegin('events', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.events))
for iter6 in self.events:
oprot.writeString(iter6)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.events)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Provenance:
"""
Object provenance.
Attributes:
- where_i_came_from
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'where_i_came_from', None, None, ), # 1
)
def __init__(self, where_i_came_from=None,):
self.where_i_came_from = where_i_came_from
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.where_i_came_from = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Provenance')
if self.where_i_came_from is not None:
oprot.writeFieldBegin('where_i_came_from', TType.STRING, 1)
oprot.writeString(self.where_i_came_from)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.where_i_came_from)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AuthInfo:
"""
Authorization info
Attributes:
- token
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'token', None, None, ), # 1
)
def __init__(self, token=None,):
self.token = token
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.token = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AuthInfo')
if self.token is not None:
oprot.writeFieldBegin('token', TType.STRING, 1)
oprot.writeString(self.token)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.token)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
command = input()
all_language = {}
submissions = {}
max_points = {}
while command != "exam finished":
command = command.split("-")
# check for no banned
if len(command) == 3:
language = command[1]
username = command[0]
points = int(command[2])
# collect all submissions
submissions.setdefault(language, []).append(username)
# collect data
if language not in all_language:
all_language[language] = {}
all_language[language][username] = points
# add new name + points in existing language
if language in all_language and username not in all_language[language]:
all_language[language][username] = points
# separate only high scores
if points > all_language[language][username]:
all_language[language][username] = points
# check for banned
elif len(command) == 2:
username = command[0]
banned = command[1]
# remove banned
for key in all_language.keys():
if username in all_language[key]:
del all_language[key][username]
command = input()
# result with name + max points without language
for name_p in all_language.values():
for current_name, current_points in name_p.items():
max_points[current_name] = current_points
sorted_max_points = dict(sorted(max_points.items(), key=lambda x: (-x[1], x[0])))
print("Results:")
print("\n".join(f"{k} | {v}" for k, v in sorted_max_points.items()))
print("Submissions:")
print("\n".join(f"{lang} - {len(attempts)}" for lang, attempts in sorted(submissions.items())))
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#things to do
#1 comment and review
#imports
import numpy as np
import matplotlib.pyplot as plt
import lightkurve as lk
import tqdm as tq
from scipy.interpolate import interp1d
# In[ ]:
# In[2]:
#downloading the lightcurve file for our example star KIC 10685175
lcfc = lk.search_lightcurvefile("KIC 10685175",mission="Kepler").download_all()
lc = lcfc.PDCSAP_FLUX.stitch().remove_nans()
# In[3]:
#noise threshhold function, determines at what noise levels the frequency crosses the .99 percent correct recovery line
def noise_threshold(time,noise ,frequency , max_frequency, min_frequency = 0, fap = .01, max_runs= 1000):
#creating sinusoidal light curve
flux = np.sin(2*np.pi*frequency*time)
lc = lk.LightCurve(time,flux)
#lightcurve to frequency periodogram
per = lc.to_periodogram(nyquist_factor = 0.01)
nyquist = per.nyquist.value
frequency_candidates = []
min_factor = int(np.floor(min_frequency/nyquist))
max_factor = int(np.ceil(max_frequency/nyquist))
#picking out which peaks to sample in each nyquist 'zone'
for i in range(min_factor, max_factor):
per = lc.to_periodogram(minimum_frequency=i*nyquist,maximum_frequency=(i+1)*nyquist)
frequency_candidates.append(per.frequency_at_max_power.value)
frequency_candidates = np.array(frequency_candidates)
frequency_candidates = frequency_candidates[(frequency_candidates > min_frequency)&(frequency_candidates < max_frequency)]
#sampling only near the peaks, increasing effeciency
frequency_resolution = 1/(time[-1]-time[0])
n_samples = 41
local_sampling = np.linspace(-.1*frequency_resolution,.1*frequency_resolution,n_samples)
frequency_sample = np.zeros(n_samples*len(frequency_candidates))
for i in range(len(frequency_candidates)):
frequency_sample[i*n_samples:(i+1)*n_samples] = local_sampling + frequency_candidates[i]
results = np.zeros(max_runs)
max_incorrect_runs = (fap*max_runs)
incorrect_results = 0
percentage = 0
for i in tq.tqdm(range(max_runs)):
flux = np.sin(2*np.pi*(frequency*time + np.random.rand())) + np.random.randn(len(time))*noise
lc = lk.LightCurve(time,flux)
per = lc.to_periodogram(frequency=frequency_sample,ls_method="slow")
frequency_dif = np.abs(per.frequency_at_max_power.value - frequency)
results[i] = (frequency_dif < frequency_resolution)
if(frequency_dif > frequency_resolution):
incorrect_results += 1
if(incorrect_results > max_incorrect_runs):
break
percentage = np.sum(results)/(i+1)
return percentage
# In[4]:
lc = lcfc.PDCSAP_FLUX.stitch().remove_nans()
time = lc.time
print(time)
print(noise_threshold(time,60.3, 289.39094, 300, min_frequency = 50, max_runs=100))
# In[ ]:
|
from datetime import datetime, timedelta, timezone
import json
from .test_envelope import generate_transaction_item
TEST_CONFIG = {
"aggregator": {"bucket_interval": 1, "initial_delay": 0, "debounce_delay": 0,}
}
def test_metrics(mini_sentry, relay):
relay = relay(mini_sentry, options=TEST_CONFIG)
project_id = 42
mini_sentry.add_basic_project_config(project_id)
timestamp = int(datetime.now(tz=timezone.utc).timestamp())
metrics_payload = f"foo:42|c\nbar:17|c"
relay.send_metrics(project_id, metrics_payload, timestamp)
envelope = mini_sentry.captured_events.get(timeout=2)
assert len(envelope.items) == 1
metrics_item = envelope.items[0]
assert metrics_item.type == "metric_buckets"
received_metrics = metrics_item.get_bytes()
assert json.loads(received_metrics.decode()) == [
{"timestamp": timestamp, "name": "foo", "value": 42.0, "type": "c"},
{"timestamp": timestamp, "name": "bar", "value": 17.0, "type": "c"},
]
def test_metrics_backdated(mini_sentry, relay):
relay = relay(mini_sentry, options=TEST_CONFIG)
project_id = 42
mini_sentry.add_basic_project_config(project_id)
timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - 24 * 60 * 60
metrics_payload = f"foo:42|c"
relay.send_metrics(project_id, metrics_payload, timestamp)
envelope = mini_sentry.captured_events.get(timeout=2)
assert len(envelope.items) == 1
metrics_item = envelope.items[0]
assert metrics_item.type == "metric_buckets"
received_metrics = metrics_item.get_bytes()
assert json.loads(received_metrics.decode()) == [
{"timestamp": timestamp, "name": "foo", "value": 42.0, "type": "c"},
]
def test_metrics_with_processing(mini_sentry, relay_with_processing, metrics_consumer):
relay = relay_with_processing(options=TEST_CONFIG)
metrics_consumer = metrics_consumer()
project_id = 42
mini_sentry.add_full_project_config(project_id)
timestamp = int(datetime.now(tz=timezone.utc).timestamp())
metrics_payload = f"foo:42|c\nbar@s:17|c"
relay.send_metrics(project_id, metrics_payload, timestamp)
metric = metrics_consumer.get_metric()
assert metric == {
"org_id": 1,
"project_id": project_id,
"name": "foo",
"unit": "",
"value": 42.0,
"type": "c",
"timestamp": timestamp,
}
metric = metrics_consumer.get_metric()
assert metric == {
"org_id": 1,
"project_id": project_id,
"name": "bar",
"unit": "s",
"value": 17.0,
"type": "c",
"timestamp": timestamp,
}
metrics_consumer.assert_empty()
def test_metrics_full(mini_sentry, relay, relay_with_processing, metrics_consumer):
metrics_consumer = metrics_consumer()
upstream_config = {
"aggregator": {
"bucket_interval": 1,
"initial_delay": 2, # Give upstream some time to process downstream entries:
"debounce_delay": 0,
}
}
upstream = relay_with_processing(options=upstream_config)
downstream = relay(upstream, options=TEST_CONFIG)
# Create project config
project_id = 42
mini_sentry.add_full_project_config(project_id)
# Send two events to downstream and one to upstream
timestamp = int(datetime.now(tz=timezone.utc).timestamp())
downstream.send_metrics(project_id, f"foo:7|c", timestamp)
downstream.send_metrics(project_id, f"foo:5|c", timestamp)
upstream.send_metrics(project_id, f"foo:3|c", timestamp)
metric = metrics_consumer.get_metric(timeout=4)
metric.pop("timestamp")
assert metric == {
"org_id": 1,
"project_id": project_id,
"name": "foo",
"unit": "",
"value": 15.0,
"type": "c",
}
metrics_consumer.assert_empty()
def test_session_metrics_feature_disabled(mini_sentry, relay):
relay = relay(mini_sentry, options=TEST_CONFIG)
project_id = 42
mini_sentry.add_basic_project_config(project_id)
timestamp = datetime.now(tz=timezone.utc)
started = timestamp - timedelta(hours=1)
session_payload = {
"sid": "8333339f-5675-4f89-a9a0-1c935255ab58",
"did": "foobarbaz",
"seq": 42,
"init": True,
"timestamp": timestamp.isoformat(),
"started": started.isoformat(),
"duration": 1947.49,
"status": "exited",
"errors": 0,
"attrs": {"release": "sentry-test@1.0.0", "environment": "production",},
}
relay.send_session(project_id, session_payload)
# Get session envelope
mini_sentry.captured_events.get(timeout=2)
# Get metrics envelope
assert mini_sentry.captured_events.empty()
def test_session_metrics(mini_sentry, relay_with_processing, metrics_consumer):
relay = relay_with_processing(options=TEST_CONFIG)
project_id = 42
mini_sentry.add_full_project_config(project_id)
metrics_consumer = metrics_consumer()
mini_sentry.project_configs[project_id]["config"]["features"] = [
"organizations:metrics-extraction"
]
timestamp = datetime.now(tz=timezone.utc)
started = timestamp - timedelta(hours=1)
session_payload = {
"sid": "8333339f-5675-4f89-a9a0-1c935255ab58",
"did": "foobarbaz",
"seq": 42,
"init": True,
"timestamp": timestamp.isoformat(),
"started": started.isoformat(),
"duration": 1947.49,
"status": "exited",
"errors": 0,
"attrs": {"release": "sentry-test@1.0.0", "environment": "production",},
}
relay.send_session(project_id, session_payload)
metric = metrics_consumer.get_metric()
assert metric == {
"org_id": 1,
"project_id": 42,
"timestamp": int(timestamp.timestamp()),
"name": "session",
"type": "c",
"unit": "",
"value": 1.0,
"tags": {
"environment": "production",
"release": "sentry-test@1.0.0",
"session.status": "init",
},
}
metric = metrics_consumer.get_metric()
assert metric == {
"org_id": 1,
"project_id": 42,
"timestamp": int(timestamp.timestamp()),
"name": "user",
"type": "s",
"unit": "",
"value": [1617781333],
"tags": {
"environment": "production",
"release": "sentry-test@1.0.0",
"session.status": "init",
},
}
metric = metrics_consumer.get_metric()
assert metric == {
"org_id": 1,
"project_id": 42,
"timestamp": int(timestamp.timestamp()),
"name": "session.duration",
"type": "d",
"unit": "s",
"value": [1947.49],
"tags": {"environment": "production", "release": "sentry-test@1.0.0",},
}
metrics_consumer.assert_empty()
def test_transaction_metrics(mini_sentry, relay_with_processing, metrics_consumer):
metrics_consumer = metrics_consumer()
for feature_enabled in (True, False):
relay = relay_with_processing(options=TEST_CONFIG)
project_id = 42
mini_sentry.add_full_project_config(project_id)
timestamp = datetime.now(tz=timezone.utc)
mini_sentry.project_configs[project_id]["config"]["features"] = (
["organizations:metrics-extraction"] if feature_enabled else []
)
transaction = generate_transaction_item()
transaction["timestamp"] = timestamp.isoformat()
transaction["measurements"] = {
"foo": {"value": 1.2},
"bar": {"value": 1.3},
}
transaction["breakdowns"] = {"breakdown1": {"baz": {"value": 1.4},}}
relay.send_event(42, transaction)
# Send another transaction:
transaction["measurements"] = {
"foo": {"value": 2.2},
}
transaction["breakdowns"] = {"breakdown1": {"baz": {"value": 2.4},}}
relay.send_event(42, transaction)
if not feature_enabled:
message = metrics_consumer.poll(timeout=None)
assert message is None, message.value()
continue
metrics = {
metric["name"]: metric
for metric in [metrics_consumer.get_metric() for _ in range(3)]
}
metrics_consumer.assert_empty()
assert "measurement.foo" in metrics
assert metrics["measurement.foo"] == {
"org_id": 1,
"project_id": 42,
"timestamp": int(timestamp.timestamp()),
"name": "measurement.foo",
"type": "d",
"unit": "",
"value": [1.2, 2.2],
}
assert metrics["measurement.bar"] == {
"org_id": 1,
"project_id": 42,
"timestamp": int(timestamp.timestamp()),
"name": "measurement.bar",
"type": "d",
"unit": "",
"value": [1.3],
}
assert metrics["breakdown.breakdown1.baz"] == {
"org_id": 1,
"project_id": 42,
"timestamp": int(timestamp.timestamp()),
"name": "breakdown.breakdown1.baz",
"type": "d",
"unit": "",
"value": [1.4, 2.4],
}
|
from .ahk_binding import AhkBinding
|
#!/usr/bin/env python3
import dbus
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GObject
import array
DBusGMainLoop(set_as_default=True)
def get_ble():
return dbus.Interface(dbus.SystemBus().get_object("com.devicehive.bluetooth", '/com/devicehive/bluetooth'), "com.devicehive.bluetooth")
ble = get_ble()
def device_discovered(mac, name, rssi):
if (name == 'SATECHILED-0'):
print("Discovered %s (%s) " % (mac, name))
ble.ScanStop(ignore_reply=True)
ble.Disconnect(mac, ignore_reply=True)
ble.Connect(mac, False, ignore_reply=True)
def device_connected(mac):
print("Connected to %s" % (mac))
try:
ble.GattWrite(mac, "fff3", "0f0d0300ffffff0000c800c8000091ffff", ignore_reply=True)
# ble.GattWrite(mac, "fff3", "0f0d0300ffffffc800c800c8000059ffff", ignore_reply=True)
except dbus.DBusException as e:
print(e)
def main():
print('Scanning ...')
ble.connect_to_signal("PeripheralDiscovered", device_discovered)
ble.connect_to_signal("PeripheralConnected", device_connected)
ble.ScanStart(ignore_reply=True)
GObject.MainLoop().run()
if __name__ == '__main__':
main()
|
from django.contrib import messages
from django.contrib.auth import get_user_model, login, logout
from django.contrib.auth.views import LoginView
from django.shortcuts import HttpResponseRedirect
from django.urls import reverse_lazy
from django.views.generic import TemplateView, RedirectView
from .forms import UserRegistrationForm, UserAddressForm
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import CreateView, ListView
from django.http import HttpResponse
from django.shortcuts import render
from django.template import Context, Template
User = get_user_model()
class UserRegistrationView(TemplateView):
model = User
form_class = UserRegistrationForm
template_name = 'accounts/user_registration.html'
def dispatch(self, request, *args, **kwargs):
if self.request.user.is_authenticated:
return HttpResponseRedirect(
reverse_lazy('transactions:transaction_report')
)
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
registration_form = UserRegistrationForm(self.request.POST)
address_form = UserAddressForm(self.request.POST)
if registration_form.is_valid() and address_form.is_valid():
user = registration_form.save()
address = address_form.save(commit=False)
address.user = user
address.save()
login(self.request, user)
messages.success(
self.request,
(
f'Thank You For Creating A Bank Account. '
f'Your Account Number is {user.account.account_no}. '
)
)
return HttpResponseRedirect(
reverse_lazy('transactions:deposit_money')
)
return self.render_to_response(
self.get_context_data(
registration_form=registration_form,
address_form=address_form
)
)
def get_context_data(self, **kwargs):
if 'registration_form' not in kwargs:
kwargs['registration_form'] = UserRegistrationForm()
if 'address_form' not in kwargs:
kwargs['address_form'] = UserAddressForm()
return super().get_context_data(**kwargs)
class UserLoginView(LoginView):
template_name='accounts/user_login.html'
redirect_authenticated_user = True
class LogoutView(RedirectView):
pattern_name = 'home'
def get_redirect_url(self, *args, **kwargs):
if self.request.user.is_authenticated:
logout(self.request)
return super().get_redirect_url(*args, **kwargs)
class UserDetails(LoginRequiredMixin, ListView):
template_name = 'accounts/user_details.html'
form_data = {}
def get(self, request, *args, **kwargs):
# import pdb
# pdb.set_trace()
c_user = self.request.user
# form = TransactionDateRangeForm(request.GET or None)
# if form.is_valid():
# self.form_data = form.cleaned_data
args = {}
args['name'] = c_user.first_name + ' ' + c_user.last_name
args['email'] = c_user
args['phone'] = "20111435465"
args['bdate'] = c_user.account.birth_date
args['acc_no'] = c_user.account.account_no
args['card'] = "22344532212"
args['balance'] = c_user.account.balance
return render(request, 'accounts/user_details.html', args)
# def get_queryset(self):
# queryset = super().get_queryset().filter(
# account=self.request.user.account
# )
# daterange = self.form_data.get("daterange")
# if daterange:
# queryset = queryset.filter(timestamp__date__range=daterange)
# return queryset.distinct()
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context.update({
# 'account': self.request.user.account,
# 'form': TransactionDateRangeForm(self.request.GET or None)
# })
|
# Copyright (c) 2021 Jana Darulova
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import logging
from nanotune.device.device_channel import DeviceChannel
from typing import Mapping, Optional, Sequence
from nanotune.device.device import Device
from nanotune.device_tuner.tuner import (Tuner, set_back_voltages,
DataSettings, SetpointSettings, Classifiers)
from nanotune.device_tuner.tuningresult import MeasurementHistory
logger = logging.getLogger(__name__)
class Characterizer(Tuner):
"""Tuner sub-class specializing on device characterization.
Parameters:
classifiers (Classifiers): a setting.Classifiers instance
holding all required classifiers. Eg. pinchoff.
data_settings (DataSettings): A settings.DataSettings instance with
data related information such as `db_name` and
`normalization_constants`.
setpoint_settings (SetpointSettings): A settings.SetpointSettings
instance with setpoint related information such as
`voltage_precision`.
tuning_history (TuningHistory): A TuningHistory instance holding all
tuning results.
"""
def __init__(
self,
name: str,
data_settings: DataSettings,
classifiers: Classifiers,
setpoint_settings: SetpointSettings,
) -> None:
super().__init__(
name,
data_settings,
classifiers,
setpoint_settings,
)
def characterize(
self,
device: Device,
skip_gates: Optional[Sequence[DeviceChannel]] = None,
gate_configurations: Optional[
Mapping[int, Mapping[int, float]]] = None,
) -> MeasurementHistory:
"""Characterizes a device by characterizing each gate individually.
Specific gates can be skipped, eg. the top barrier of a 2DEG device.
Args:
device (nt.Device): device to tune.
skip_gates (Sequence[DeviceChannel]): optional list of gates which
should not be characterized.
gate_configurations (Dict[int, Dict[int, float]]): optional gate
voltage combinations at which gates should be characterized.
Maps gate IDs of gates to characteris onto dictionaries, which
in turn map gate IDs of gates to set to their respective
voltages.
Returns:
MeasurementHistory: Collection of all tuning results.
"""
if gate_configurations is None:
gate_configurations = {}
if skip_gates is None:
skip_gates = []
measurement_result = MeasurementHistory(device.name)
for gate in device.gates:
if gate not in skip_gates:
with set_back_voltages(device.gates):
gate_id = gate.gate_id
assert gate_id is not None
if gate_id in gate_configurations.keys():
gate_conf = gate_configurations[gate_id]
for other_id, voltage in gate_conf.items():
device.gates[other_id].voltage(voltage)
sub_result = self.characterize_gate(
device,
gate,
use_safety_voltage_ranges=True,
)
measurement_result.add_result(sub_result)
return measurement_result
|
# Importar librerias
import pandas # importar libreria pandas
import time # importar libreria time
import datetime # importar libreria de fecha y hora
from datetime import datetime # importar la libreria de datetime
import os # importar la libreria de os
from termcolor import colored # importar la libreria termcolor
import sqlite3 # importar libreria sqlite3
os.system('CLS') # limpiar la terminal
# Seccion carga de datos desde CSV (base de datos)
"""-----------------------------------------------------------------------------------------------------------------------"""
conn = sqlite3.connect('./database.db') # conexion a la base de datos
matrixpandas = pandas.read_sql_query("SELECT * FROM productos", conn) # carga de datos desde la base de datos de stock
matriz = matrixpandas.values.tolist() # convertir la matriz a una lista
registros = pandas.read_sql_query("SELECT * FROM registros", conn) # carga de datos desde la base de datos de registros
registros = registros.values.tolist() # convertir la matriz a una lista
"""-----------------------------------------------------------------------------------------------------------------------"""
# Seccion de funciones
"""-----------------------------------------------------------------------------------------------------------------------"""
# funcion para imprimir la matriz de productos
def print_data(matriz):
os.system('CLS')
print_matriz = pandas.DataFrame(
matriz, columns=["code", "name", "type", "stock", "repos", "price", "last_update"]) # generar la matriz en formato pandas
print("Imprimiendo matriz de datos...") # mensaje de impresion
time.sleep(1) # esperar 1 segundo
print(print_matriz) # imprimir la matriz de stock
print(" ")
decition = input(
"Cuando desee regresar al menú principal ingrese cualquier tecla: ") # volver al menu principal
os.system('CLS') # limpiar la terminal
time.sleep(1) # esperar 1 segundo
# funcion para imprimir la matriz de registros
def print_registros(registros):
print_registros = pandas.DataFrame(
registros, columns=["code", "variacion", "motivo", "timestamp"]) # generar la matriz en formato pandas
print("Imprimiendo matriz de datos...") # mensaje de impresion
time.sleep(1) # esperar 1 segundo
print(print_registros) # imprimir la matriz de registros
print(" ")
decition = input(
"Cuando desee regresar al menú principal ingrese cualquier tecla: ") # volver al menu principal
os.system('CLS') # limpiar la terminal
time.sleep(1) # esperar 1 segundo
# funcion para consultar el stock de un producto
def product_stock(matriz):
os.system("CLS") # limpiar la terminal
founded = False # variable para saber si se encontro el producto
stock = (input("Ingrese el código del producto a consultar stock: ")).upper() # capturar el codigo del producto a buscar
os.system('CLS') # limpiar la terminal
for i in range(len(matriz)): # recorrer la matriz
if stock == matriz[i][0]: # si se encontró el codigo del producto en la matriz
print("El stock actual del producto ", stock, "es: ", matriz[i][3]) # imprimir el stock del producto
founded = True # cambiar la variable a True
input("Ingrese cualquier tecla cuando desee volver al menu principal: ") # volver al menu principal
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
if founded == False: # si no se encontró el codigo del producto en la matriz
print("No se encontro el codigo") # mensaje de error
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
print(colored("- 1.", "blue", attrs=["bold"]), "Volver a intentar ") # mensaje de volver a intentar
print(colored("- 2.", "blue",
attrs=["bold"]), "Volver al menú principal") # mensaje de volver al menu principal
choose = (input("Ingrese una opción: ")).upper() # capturar la opcion
if choose == "1": # si la opcion es 1
product_stock(matriz) # volver a intentar
elif choose == "2": # si la opcion es 2
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
# funcion para filtrar los productos por tipo
def product_type(matriz):
type_product = input(
"Ingrese la categoria de producto por el que desea filtrar: ") # capturar el tipo de producto para filtrar
a = len(matriz) # obtener la longitud de la matriz
lista = list() # crear una lista
for i in range(a): # recorrer la matriz
if (matriz[i][2]).upper() == (type_product).upper(): # si el tipo de producto es igual al tipo de producto capturado
lista.append(matriz[i]) # agregar el producto a la lista
if len(lista) != 0:
c = pandas.DataFrame(
lista, columns=["code", "name", "type", "stock", "repos", "price", "last_update"]) # generar la matriz en formato pandas
os.system('CLS') # limpiar la terminal
print(c) # imprimir la matriz de productos
print(" ")
decition = input(
"Cuando desee regresar al menú principal ingrese cualquier tecla: ") # volver al menu principal
os.system('CLS') # limpiar la terminal
time.sleep(1) # esperar 1 segundo
else:
print("No se encontraron productos con ese tipo") # mensaje de error
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
print(colored("- 1.", "blue", attrs=["bold"]), "Volver a intentar ") # mensaje de volver a intentar
print(colored("- 2.", "blue",
attrs=["bold"]), "Volver al menú principal") # mensaje de volver al menu principal
choose = (input("Ingrese una opción: ")).upper() # capturar la opcion
if choose == "1": # si la opcion es 1
product_type(matriz) # volver a intentar
elif choose == "2": # si la opcion es 2
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
# funcion para obtener el tiempo actual
def get_current_time():
time_update = datetime.now() # obtener la fecha y hora actual
now = time_update.strftime("%d/%m/%Y %H:%M:%S") # formatear la fecha y hora actual
return now # retornar fecha
# funcion para alertar si hay que reponer un producto
def alert(matriz):
time.sleep(0.2) # esperar 0.2 segundos
os.system("CLS") # limpiar la terminal
to_repos = list() # crear una lista para los productos a reponer
codes_to_repos = list() # crear una lista para los codigos de los productos a reponer
for i in range(len(matriz)): # recorrer la matriz
if int(matriz[i][3]) <= int(matriz[i][4]): # si el stock es menor o igual al reposicion
to_repos.append(matriz[i]) # agregar el producto a la lista
codes_to_repos.append(matriz[i][0]) # agregar el codigo del producto a la lista
to_repos = pandas.DataFrame(to_repos, columns=["code", "name", "type", "stock", "repos", "price", "last_update"]) # generar la matriz en formato pandas
if len(codes_to_repos) > 0: # si hay productos a reponer
print("Los codigos a reponer son: ") # mensaje de los codigos a reponer
for i in codes_to_repos: # recorrer la lista de codigos a reponer
print(i, end=" ") # imprimir los codigos a reponer
print("")
print("-----------------------------")
print(" ")
print(to_repos) # imprimir la matriz de productos a reponer
print(" ")
a = input("Ingrese una tecla cuando desee volver al menu principal: ") # volver al menu principal
os.system('CLS') # limpiar la terminal
else:
print("No hay ningun codigo a reponer por el momento.") # mensaje de error
os.system('CLS') # limpiar la terminal
# funcion para agregar un nuevo producto
def add_new_product(matriz):
new_product = list() # crear una lista para almacenar los datos del nuevo producto
code = input("Ingresa el codigo del producto que desea agregar: ") # capturar el codigo del producto
name = input("Ingresa el nombre del producto que va a agregar: ") # capturar el nombre del producto
type_product = input("Ingresa la categoria del producto: ") # capturar el tipo de producto
stock = int(input("Ingresa el stock inicial del producto, puede ser 0: ")) # capturar el stock inicial del producto
reposition = int(input("Punto de reposicion del producto: ")) # capturar el punto de reposicion del producto
price = input("Ingresa el precio del producto: ") # capturar el precio del producto
new_product.append(code.upper()) # agregar el codigo al nuevo producto
new_product.append(name) # agregar el nombre al nuevo producto
new_product.append(type_product) # agregar el tipo de producto al nuevo producto
new_product.append(stock) # agregar el stock al nuevo producto
new_product.append(reposition) # agregar el punto de reposicion al nuevo producto
new_product.append(price) # agregar el precio al nuevo producto
new_product.append(get_current_time()) # agregar la fecha y hora actual al nuevo producto
matriz.append(new_product) # agregar el nuevo producto a la matriz
print("El producto " + code.upper() + " fue agregado") # mensaje de confirmacion
time.sleep(2) # esperar 2 segundos
os.system('CLS') # limpiar la terminal
df = pandas.DataFrame(matriz) # generar la matriz en formato pandas
df.to_sql('productos', conn, if_exists='replace', index=False) # almacenar la matriz de stock en la base de datos
ajuste = [code.upper(), "Se añadió un producto",
"Producto agregado", get_current_time()] # crear una lista para almacenar los datos del ajuste
registros.append(ajuste) # agregar el ajuste a la matriz de registros
df = pandas.DataFrame(registros) # generar la matriz en formato pandas
df.to_sql('registros', conn, if_exists='replace', index=False) # almacenar la matriz de registros en la base de datos
# funcion para eliminar producto
def delete_product(matriz):
long = len(matriz) # obtener la longitud de la matriz
eliminated = False # variable para saber si se elimino un producto
code_eliminate = input(
"Ingresa el codigo del producto que quieres eliminar: ") # capturar el codigo del producto a eliminar
for i in range(long): # recorrer la matriz
try:
pos = matriz[i][0].index(code_eliminate) # obtener la posicion del codigo capturado
name1 = matriz[i][1] # obtener el nombre del producto
print("El producto ", name1, " fue encontrado, eliminando...") # mensaje de código encontrado
matriz.pop(i) # eliminar el producto de la matriz
time.sleep(1) # esperar 1 segundo
print("El producto fue eliminado") # mensaje de confirmacion
time.sleep(1.5) # esperar 1.5 segundos
os.system('CLS') # limpiar la terminal
eliminated = True # cambiar la variable a True
except:
continue
if eliminated == False: # si no se eliminó ningun producto
print("El codigo no es correcto") # mensaje de error
df = pandas.DataFrame(matriz) # generar la matriz en formato pandas
df.to_sql('productos', conn, if_exists='replace', index=False) # almacenar la matriz de stock en la base de datos
ajuste = "Se borro el producto"
motivo = "Producto eliminado"
ajuste = [code_eliminate, ajuste, motivo, get_current_time()] # crear una lista para almacenar los datos del ajuste
registros.append(ajuste) # agregar el ajuste a la matriz de registros
df = pandas.DataFrame(registros) # generar la matriz de registros en formato pandas
df.to_sql('registros', conn, if_exists='replace', index=False) # almacenar la matriz de registros en la base de datos
# fución para modificar el stock de un producto
def modificate_stock(matriz, code_modified):
time.sleep(0.5) # esperar 0.5 segundos
long = len(matriz) # obtener la longitud de la matriz
os.system("CLS") # limpiar la terminal
os.system("CLS") # limpiar la terminal
code_founded = False # variable para saber si se encontro el codigo
for i in range(long): # recorrer la matriz
try:
pos = matriz[i][0].index(code_modified) # obtener la posicion del codigo capturado
pos_change = i # obtener la posicion del producto a modificar
code_founded = True # cambiar la variable a True
print(f"Se encontro el producto {matriz[pos_change][1]}...") # mensaje de confirmacion de encontrado
time.sleep(2) # esperar 2 segundos
os.system("CLS") # limpiar la terminal
except:
continue
print(colored("- 1.", "blue", attrs=["bold"]), "Aumentar stock") # mensaje de opcion 1
print(colored("- 2.", "blue", attrs=["bold"]), "Disminuir stock") # mensaje de opcion 2
print(colored("- 3.", "blue", attrs=["bold"]), # mensaje de opcion 3
"Ajuste por perdida de stock")
egressingress = (input("Ingrese una opción: ")).upper() # capturar la opcion del usuario
os.system("CLS") # limpiar la terminal
if egressingress == "1" and code_founded == True or egressingress == "AUMENTAR" and code_founded == True: # si la opcion es 1 y el codigo fue encontrado
actual_stock = int(matriz[pos_change][3]) # obtener el stock actual del producto
time.sleep(1) # esperar 1 segundo
print(f"El stock actual de {code_modified} es: ", actual_stock) # mensaje de stock actual
increase = int(
input(f"Cuanto stock desea agregar al stock de {code_modified}: ")) # capturar el stock a aumentar
suma = actual_stock + increase # sumar el stock actual mas el stock a aumentar
suma = str(suma) # convertir el stock a string
matriz[pos_change][3] = suma # cambiar el stock del producto
matriz[pos_change][6] = get_current_time() # cambiar la fecha y hora de modificacion del producto
df = pandas.DataFrame(matriz) # generar la matriz en formato pandas
df.to_sql('productos', conn, if_exists='replace', index=False) # almacenar la matriz de stock en la base de datos
ajuste = "+" + str(increase)
motivo = "Ingreso de stock"
ajuste = [code_modified, ajuste, motivo, get_current_time()] # crear una lista para almacenar los datos del ajuste
registros.append(ajuste) # agregar el ajuste a la matriz de registros
df = pandas.DataFrame(registros) # generar la matriz de registros en formato pandas
df.to_sql('registros', conn, if_exists='replace', index=False) # almacenar la matriz de registros en la base de datos
time.sleep(2) # esperar 2 segundos
print(
f"El stock de {code_modified} ha sido modificado, ahora es: {matriz[pos_change][3]}") # mensaje de confirmacion de modificacion
time.sleep(2) # esperar 2 segundos
os.system("CLS") # limpiar la terminal
elif egressingress == "2" and code_founded == True or egressingress == "DISMINUIR" and code_founded == True: # si la opcion es 2 y el codigo fue encontrado
actual_stock = int(matriz[pos_change][3]) # obtener el stock actual del producto
print(
f"El stock actual de {code_modified} producto es: ", actual_stock) # mensaje de stock actual
time.sleep(1) # esperar 1 segundo
decrease = int(
input(f"Cuanto stock desea restar al stock de {code_modified}: ")) # capturar el stock a disminuir
resta = actual_stock - decrease # restar el stock actual menos el stock a disminuir
resta = str(resta) # convertir el stock a string
matriz[pos_change][3] = resta # cambiar el stock del producto
matriz[pos_change][6] = get_current_time() # cambiar la fecha de modificacion
print(
f"El stock de {code_modified} ha sido modificado, ahora es: {matriz[pos_change][3]}") # mensaje de confirmacion de modificacion
time.sleep(2) # esperar 2 segundos
df = pandas.DataFrame(matriz) # generar la matriz en formato pandas
df.to_sql('productos', conn, if_exists='replace', index=False) # almacenar la matriz de stock en la base de datos
ajuste = "-" + str(decrease)
motivo = "Egreso de stock"
ajuste = [code_modified, ajuste, motivo, get_current_time()] # crear una lista para almacenar los datos del ajuste
registros.append(ajuste) # agregar el ajuste a la matriz de registros
df = pandas.DataFrame(registros) # generar la matriz de registros en formato pandas
df.to_sql('registros', conn, if_exists='replace', index=False) # almacenar la matriz de registros en la base de datos
time.sleep(2) # esperar 2 segundos
os.system("CLS") # limpiar la terminal
elif egressingress == "3" and code_founded == True: # si la opcion es 3 y el codigo fue encontrado
actual_stock = int(matriz[pos_change][3]) # obtener el stock actual del producto
print(
f"El stock actual de {code_modified} producto es: ", actual_stock) # mensaje de stock actual
time.sleep(1) # esperar 1 segundo
ajustar = int(input(f"Cuanto stock se extravio de {code_modified}: ")) # capturar el stock a ajustar
motivo = input("Motivo del ajuste: ") # capturar el motivo del ajuste
os.system("CLS") # limpiar la terminal
print("Vamos a modificar el stock restando lo que se perdio, y lo que tiene que volver a enviar al cliente. ¿Es usted conciente?") # mensaje de confirmacion
print(colored("- 1.", "blue", attrs=["bold"]), "Si") # opcion si
print(colored("- 2.", "blue", attrs=["bold"]), "No") # opcion no
choose = (input("Ingrese una opción: ")).upper() # capturar la opcion
if choose == "1": # si la opcion es 1
mod = actual_stock - (ajustar+ajustar) # modificar el stock
mod = str(mod) # convertir el stock a string
ajuste = "-"+str(ajustar+ajustar) # crear string del ajuste
matriz[pos_change][3] = mod # cambiar el stock del producto
os.system("CLS") # limpiar la terminal
ajuste = [code_modified, ajuste, motivo, get_current_time()] # crear una lista para almacenar los datos del ajuste
registros.append(ajuste) # agregar el ajuste a la matriz de registros
print(
f"Ahora el stock de {code_modified} es: ", (matriz[pos_change][3])) # mensaje de confirmacion de modificacion
print(f"Ajuste de {code_modified} realizado con exito") # mensaje de confirmacion de ajuste
df = pandas.DataFrame(registros) # generar la matriz de registros en formato pandas
df.to_sql('registros', conn, if_exists='replace', index=False) # almacenar la matriz de registros en la base de datos
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
elif choose == "2": # si la opcion es 2
print("Cancelando...") # mensaje de cancelacion
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
elif code_founded == False: # si el codigo no fue encontrado
print(f"El codigo {code_modified} no se encontro") # mensaje de codigo no encontrado
print(colored("- 1.", "blue", attrs=["bold"]), "Volver a intentar") # opcion 1
print(colored("- 2.", "blue",
attrs=["bold"]), "Volver al menu principal") # opcion 2
choose = (input("Ingrese una opción: ")).upper() # capturar la opcion
if choose == "1": # si la opcion es 1
modificate_stock(matriz) # llamar a la funcion modificar stock
elif choose == "2": # si la opcion es 2
print("Volviendo al menu principal...") # mensaje de volver al menu principal
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
else:
print("No ingreso una opcion correcta, volviendo al menu principal...") # mensaje de opcion incorrecta
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
else: # si no se ingreso una opcion correcta
print("Usted no ingreso una opcion correcta") # mensaje de opcion incorrecta
print(colored("- 1.", "blue", attrs=["bold"]), "Volver a intentar") # opcion 1
print(colored("- 2.", "blue",
attrs=["bold"]), "Volver al menu principal") # opcion 2
choose = (input("Ingrese una opción: ")).upper() # capturar la opcion
if choose == "1": # si la opcion es 1
modificate_stock(matriz) # llamar a la funcion modificar stock
elif choose == "2": # si la opcion es 2
print("Volviendo al menu principal...") # mensaje de volver al menu principal
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
else: # si la opcion es incorrecta
print("No ingreso una opcion correcta, volviendo al menu principal...") # mensaje de opcion incorrecta
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
# funcion para editar un producto
def update_product(matriz):
os.system("CLS") # limpiar la terminal
code = (input("Ingrese el codigo del producto que quiere modificar: ")).upper() # capturar el codigo del producto
founded = False # variable para saber si el codigo fue encontrado
long = len(matriz) # obtener la longitud de la matriz
try:
for i in range(long): # recorrer la matriz
if code == matriz[i][0]: # si el codigo ingresado es igual al codigo de la matriz
print(" ")
print(f"El producto {matriz[i][1]} fue encontrado") # mensaje de producto encontrado
pos = i # obtener la posicion del producto
founded = True # cambiar la variable a True
time.sleep(2) # esperar 2 segundos
os.system("CLS") # limpiar la terminal
except:
print("El codigo no es correcto") # mensaje de codigo incorrecto
update_product(matriz) # llamar a la funcion update product
if founded == True: # si el codigo fue encontrado
print(" ")
print(colored("- 1.", "blue", attrs=["bold"]), "Modificar nombre") # opcion 1
print(colored("- 2.", "blue", attrs=["bold"]), "Modificar precio") # opcion 2
print(colored("- 3.", "blue", attrs=["bold"]), "Modificar stock") # opcion 3
print(colored("- 4.", "blue", attrs=["bold"]), "Modificar codigo") # opcion 4
print(colored("- 5.", "blue", attrs=["bold"]), "Modificar categoria") # opcion 5
print(colored("- 6.", "blue",
attrs=["bold"]), "Modificar punto de reposicion") # opcion 6
print(" ")
choose = input("Ingrese una opcion: ") # capturar la opcion
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
if choose == "1": # si la opcion es 1
name = input("Ingrese el nuevo nombre: ") # capturar el nuevo nombre
matriz[pos][1] = name # cambiar el nombre del producto
matriz[pos][6] = get_current_time() # cambiar la fecha y hora de modificacion del producto
print(" ")
print("El nombre del producto fue modificado") # mensaje de nombre modificado
time.sleep(1.5) # esperar 1.5 segundos
print(" ")
elif choose == "2": # si la opcion es 2
price = input("Ingrese el nuevo precio: ") # capturar el nuevo precio
matriz[pos][5] = price # cambiar el precio del producto
matriz[pos][6] = get_current_time() # cambiar la fecha y hora de modificacion del producto
print(" ")
print("El precio del producto fue modificado") # mensaje de precio modificado
time.sleep(1.5) # esperar 1.5 segundos
print(" ")
elif choose == "3": # si la opcion es 3
modificate_stock(matriz, code) # llamar a la funcion modificar stock
elif choose == "4": # si la opcion es 4
code = input("Ingrese el nuevo codigo del producto: ") # capturar el nuevo codigo
matriz[pos][0] = code # cambiar el codigo del producto
matriz[pos][6] = get_current_time() # cambiar la fecha y hora de modificacion del producto
print(" ")
print("El codigo del producto fue modificado") # mensaje de codigo modificado
time.sleep(1.5) # esperar 1.5 segundos
print(" ")
elif choose == "5": # si la opcion es 5
category = input("Ingrese la nueva categoria: ") # capturar la nueva categoria
matriz[pos][2] = category # cambiar la categoria del producto
matriz[pos][6] = get_current_time() # cambiar la fecha y hora de modificacion del producto
print(" ")
print("La categoria del producto fue modificada") # mensaje de categoria modificada
time.sleep(1.5) # esperar 1.5 segundos
print(" ")
elif choose == "6": # si la opcion es 6
repos = input("Ingrese el nuevo punto de reposicion: ") # capturar el nuevo punto de reposicion
matriz[pos][4] = repos # cambiar el punto de reposicion del producto
matriz[pos][6] = get_current_time() # cambiar la fecha y hora de modificacion del producto
print(" ")
print("El punto de reposicion del producto fue modificado") # mensaje de punto de reposicion modificado
time.sleep(1.5) # esperar 1.5 segundos
print(" ")
os.system("CLS") # limpiar la terminal
else: # si el codigo no fue encontrado
print("El codigo no se encontro") # mensaje de codigo no encontrado
time.sleep(1.5) # esperar 1.5 segundos
os.system("CLS") # limpiar la terminal
update_product(matriz)
df = pandas.DataFrame(matriz) # convertir la matriz en un dataframe
df.to_sql('productos', conn, if_exists='replace', index=False) # guardar los datos en la base de datos
"""-----------------------------------------------------------------------------------------------------------------------"""
# Seccion menu principal-programa
"""-----------------------------------------------------------------------------------------------------------------------"""
print()
print(colored("Bienvenido a AutoStocker", "blue", attrs=["bold", "underline"])) # mensaje de bienvenida
date_update = datetime.now() # obtener la fecha actual
now = date_update.strftime("%d/%m/%Y") # convertir la fecha actual a string
print(colored("Hoy es " + now, "white")) # imprimir la fecha actual
print()
o = "INICIAR" # opcion iniciar
while o != "9": # mientras la opcion no sea cerrar
print(colored("--- Menú Principal ---", "blue", attrs=["bold"])) # imprimir el menu principal
print(colored("- 1.", "blue", attrs=["bold"]), "Imprimir Data") # opcion 1
print(colored("- 2.", "blue", attrs=["bold"]), "Agregar Producto") # opcion 2
print(colored("- 3.", "blue", attrs=["bold"]), "Eliminar Producto") # opcion 3
print(colored("- 4.", "blue", attrs=["bold"]), "Modificar producto") # opcion 4
print(colored("- 5.", "blue", attrs=["bold"]), "Filtrar por categoria") # opcion 5
print(colored("- 6.", "blue", attrs=["bold"]),
"Consultar stock del producto") # opcion 6
print(colored("- 7.", "blue", attrs=["bold"]), "Alertas de reposición") # opcion 7
print(colored("- 8.", "blue", attrs=["bold"]), "Imprimir registros") # opcion 8
print(colored("- 9.", "blue", attrs=["bold"]), "Cerrar") # opcion 9
o = (input("> Ingrese una opcion: ")).upper() # capturar la opcion
if o == "CERRAR" or o == "9": # si la opcion es cerrar o 7
print("Guardando datos en la base de datos...") # mensaje de guardando datos
df = pandas.DataFrame(matriz) # convertir la matriz en un dataframe
df.to_sql('productos', conn, if_exists='replace', index=False) # guardar los datos en la base de datos
time.sleep(1) # esperar 1 segundo
print("Cerrando AutoStocker...") # mensaje de cerrando AutoStocker
time.sleep(1) # esperar 1 segundo
elif o == "PRINT_DATA" or o == "1": # si la opcion es imprimir data o 1
print_data(matriz) # llamar a la funcion print data
elif o == "AGREGAR PRODUCTO" or o == "2": # si la opcion es agregar producto o 2
add_new_product(matriz) # llamar a la funcion agregar producto
elif o == "ELIMINAR PRODUCTO" or o == "3": # si la opcion es eliminar producto o 3
delete_product(matriz) # llamar a la funcion eliminar producto
elif o == "MODIFICAR PRODUCTO" or o == "4": # si la opcion es modificar producto o 4
update_product(matriz) # llamar a la funcion modificar producto
elif o == "FILTRAR CATEGORIA" or o == "5": # si la opcion es filtrar categoria o 5
product_type(matriz) # llamar a la funcion filtrar categoria
elif o == "CONSULTAR STOCK" or o == "6": # si la opcion es consultar stock o 6
product_stock(matriz) # llamar a la funcion consultar stock
elif o == "7": # si la opcion es alert o 7
alert(matriz) # llamar a la funcion alertas de reposicion
elif o == "8": # si la opcion es 8
print_registros(registros) # llamar a la funcion imprimir registros
else: # si la opcion no es ninguna de las anteriores
print("No has ingresado un comando valido") # mensaje de comando invalido
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
"""-----------------------------------------------------------------------------------------------------------------------"""
conn.close()
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
class Database:
_conn_string = None
_connection = None
_engine = None
_session_factory = None
_session = None
def __init__(self, *, db, engine, **kwargs):
if not self._conn_string:
self._conn_string = self._form_connection_string(
db,
engine,
**kwargs,
)
if not self._engine:
self._engine = create_engine(self._conn_string)
if not self._connection:
self._connection = self._engine.connect()
if not self._session_factory:
self._session_factory = sessionmaker(bind=self._engine)
@staticmethod
def _form_connection_string(db, engine, user=None, password=None,
host=None, port=None):
conn_str = f'{engine}://'
if user and password:
conn_str += f'{user}:{password}'
if host:
conn_str += f'@{host}'
if port:
conn_str += f':{port}'
conn_str += f'/{db}'
return conn_str
def get_scoped_session(self):
return scoped_session(self._session_factory)
@property
def connection_string(self):
return self._conn_string
@property
def connection(self):
return self._connection
@property
def engine(self):
return self._engine
|
cont = ("zero","um", "dois", "três", "quatro",
"cinco", "seis", "sete", "oito", "nove")
while True:
x = int(input("Digite um número entre 1 e 9: "))
if 1 <= x <= 9:
break
print("Tente outra vez. ", end="")
print(f"O número digitado foi {cont[x]}")
|
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Core functionality for step one of DeepVariant: Making examples."""
import collections
import dataclasses
import os
import time
from typing import Dict, List, Optional, Sequence, Tuple
from absl import logging
import numpy as np
import tensorflow as tf
from deepvariant import allele_frequency
from deepvariant import dv_constants
from deepvariant import pileup_image
from deepvariant import resources
from deepvariant import tf_utils
from deepvariant import variant_caller as vc_base
from deepvariant import vcf_candidate_importer
from deepvariant import very_sensitive_caller
from deepvariant.labeler import customized_classes_labeler
from deepvariant.labeler import haplotype_labeler
from deepvariant.labeler import positional_labeler
from deepvariant.protos import deepvariant_pb2
from deepvariant.python import allelecounter
from deepvariant.realigner import realigner
from deepvariant.vendor import timer
from google.protobuf import text_format
from third_party.nucleus.io import fasta
from third_party.nucleus.io import sam
from third_party.nucleus.io import tfrecord
from third_party.nucleus.io import vcf
from third_party.nucleus.protos import reads_pb2
from third_party.nucleus.protos import variants_pb2
from third_party.nucleus.util import ranges
from third_party.nucleus.util import struct_utils
from third_party.nucleus.util import utils
from third_party.nucleus.util import variant_utils
# For --runtime_by_region, these columns will be written out in this order.
RUNTIME_BY_REGION_COLUMNS = ('region', 'get reads', 'find candidates',
'make pileup images', 'write outputs', 'num reads',
'num candidates', 'num examples')
# The name used for a sample if one is not specified or present in the reads.
_UNKNOWN_SAMPLE = 'UNKNOWN'
# ---------------------------------------------------------------------------
# Selecting variants of specific types (e.g., SNPs)
# ---------------------------------------------------------------------------
def _select_biallelic_snps(v):
return variant_utils.is_snp(v) and variant_utils.is_biallelic(v)
def _select_biallelic_indels(v):
return variant_utils.is_indel(v) and variant_utils.is_biallelic(v)
def _select_biallelic_insertions(v):
return variant_utils.has_insertion(v) and variant_utils.is_biallelic(v)
def _select_biallelic_deletions(v):
return variant_utils.has_deletion(v) and variant_utils.is_biallelic(v)
VARIANT_TYPE_SELECTORS = {
'snps': _select_biallelic_snps,
'indels': _select_biallelic_indels,
'insertions': _select_biallelic_insertions,
'deletions': _select_biallelic_deletions,
'multi-allelics': variant_utils.is_multiallelic,
'all': lambda v: True,
}
# ---------------------------------------------------------------------------
# Option handling
# ---------------------------------------------------------------------------
def assign_sample_name(sample_name_flag, reads_filenames):
"""Returns sample name derived from either sample_name flag or input BAM.
Function derives sample_name from the flag. If flag is not set then
sample_name is derived from input BAM.
Args:
sample_name_flag: string. sample_name flag value.
reads_filenames: A list of filenames of an alignments file, e.g. BAM. The
first of these will be used. May be empty.
Returns:
string. Derived sample name.
"""
if sample_name_flag:
sample_name = sample_name_flag
elif reads_filenames:
with sam.SamReader(reads_filenames.split(',')[0]) as sam_reader:
sample_name = extract_sample_name_from_sam_reader(sam_reader)
else:
sample_name = _UNKNOWN_SAMPLE
return sample_name
def make_vc_options(sample_name, flags_obj):
return deepvariant_pb2.VariantCallerOptions(
min_count_snps=flags_obj.vsc_min_count_snps,
min_count_indels=flags_obj.vsc_min_count_indels,
min_fraction_snps=flags_obj.vsc_min_fraction_snps,
min_fraction_indels=flags_obj.vsc_min_fraction_indels,
min_fraction_multiplier=flags_obj.vsc_min_fraction_multiplier,
# Not specified by default: fraction_reference_sites_to_emit,
# Fixed random seed produced with 'od -vAn -N4 -tu4 < /dev/urandom'.
random_seed=1400605801,
sample_name=sample_name,
p_error=0.001,
max_gq=50,
gq_resolution=flags_obj.gvcf_gq_binsize,
ploidy=2,
skip_uncalled_genotypes=flags_obj.mode == 'training')
def parse_proto_enum_flag(proto_enum_pb2,
flag_value,
skip_unspecified_option=True):
"""Parses a command line flag string value into a protobuf Enum value.
Args:
proto_enum_pb2: a enum_type_wrapper.EnumTypeWrapper type containing a proto
enum definition. For example, this would be
deepvariant_pb2.MakeExamplesOptions.Mode to get the MakeExamplesOptions
Mode enum. See:
https://developers.google.com/protocol-buffers/docs/reference/python-generated#enum
for more information.
flag_value: str. The name of the proto enum option from the command line we
want to convert into the enum value.
skip_unspecified_option: bool. If True, any enum options that include the
string 'unspecified' (in any case) will be excluded from the list of
allowed options in the ValueError raised if flag_value isn't valid.
Returns:
The enum value for flag_value in proto_enum_pb2
Raises:
ValueError: if flag_value isn't a valid enum name in proto_enum_pb2.
"""
try:
return proto_enum_pb2.Value(flag_value)
except ValueError:
options = proto_enum_pb2.keys()
if skip_unspecified_option:
options = [o for o in options if 'unspecified' not in o.lower()]
raise ValueError('Unknown enum option "{}". Allowed options are {}'.format(
flag_value, ','.join(sorted(options))))
def resolve_sam_aux_fields(flags_obj):
"""Decide value of parse_sam_aux_fields based on other flags."""
flags_requiring_sam_aux_fields = [
'sort_by_haplotypes', 'use_original_quality_scores'
]
flags_using_sam_aux_fields_optionally = ['add_hp_channel']
parse_sam_aux_fields = flags_obj.parse_sam_aux_fields
if parse_sam_aux_fields is None:
# User didn't set the 'parse_sam_aux_fields' flag, so default to False
# unless a flag is on that would use it.
parse_sam_aux_fields = False
for flag_name in (flags_requiring_sam_aux_fields +
flags_using_sam_aux_fields_optionally):
if flags_obj[flag_name].value:
logging.info(
'Because --%s=true, --parse_sam_aux_fields is set to '
'true to enable reading auxiliary fields from reads.', flag_name)
parse_sam_aux_fields = True
if not parse_sam_aux_fields:
for flag_name in flags_requiring_sam_aux_fields:
if flags_obj[flag_name].value:
raise ValueError(f'If --{flag_name} is '
'set then --parse_sam_aux_fields must be set too.')
for flag_name in flags_using_sam_aux_fields_optionally:
if flags_obj[flag_name].value:
logging.info(
'Note that --%s is set but --parse_sam_aux_fields is not '
'set. This is fine unless you are expecting to use aux fields from '
'the alignments file, such as haplotype tags from phasing. '
'If you do need to use aux fields, enable --parse_sam_aux_fields.',
flag_name)
return parse_sam_aux_fields
def parse_regions_flag(regions_flag_value):
if isinstance(regions_flag_value, str):
regions_flag_value = regions_flag_value.split()
return regions_flag_value
def logging_with_options(options, message):
"""If options contain multiple shards, log with task/shard prefix."""
if options.num_shards > 1:
prefix = 'Task {}/{}: '.format(options.task_id, options.num_shards)
else:
prefix = ''
logging.info('%s%s', prefix, message)
# ---------------------------------------------------------------------------
# Simple utilities
# ---------------------------------------------------------------------------
def in_training_mode(options):
return options.mode == deepvariant_pb2.MakeExamplesOptions.TRAINING
def gvcf_output_enabled(options):
"""Returns True if we should be generating gVCF output."""
return bool(options.gvcf_filename)
def only_true(*elts):
"""Returns the sublist of elements that evaluate to True."""
return [elt for elt in elts if elt]
def extract_sample_name_from_sam_reader(sam_reader):
"""Returns the sample name as derived from the BAM file of reads.
Args:
sam_reader: Already opened sam_reader to use to extract the sample names
from. This sam_reader will not be closed after this function returns.
Returns:
The sample ID annotated in the read group.
"""
samples_list = [
rg.sample_id for rg in sam_reader.header.read_groups if rg.sample_id
]
samples = set(samples_list)
if not samples:
logging.warning(
'No non-empty sample name found in the input reads. '
'DeepVariant will use %s as the sample name. You can also '
'provide a sample name with the --sample_name argument.',
dv_constants.DEFAULT_SAMPLE_NAME)
return dv_constants.DEFAULT_SAMPLE_NAME
elif len(samples) > 1:
logging.warning(
'Multiple samples (%s) were found in the input reads. '
'Please confirm this is intended. For now, DeepVariant '
'will use the first sample name %s.', ', '.join(sorted(samples)),
samples_list[0])
return samples_list[0]
return next(iter(samples))
def trim_runtime(seconds: float) -> float:
"""Round seconds (float) to the nearest millisecond."""
return round(seconds, 3)
# ---------------------------------------------------------------------------
# Utilities for working with labeling metrics
#
# ---------------------------------------------------------------------------
def read_make_examples_run_info(path):
"""Reads a MakeExamplesRunInfo proto in text_format from path."""
with tf.io.gfile.GFile(path) as f:
return text_format.Parse(f.read(), deepvariant_pb2.MakeExamplesRunInfo())
def write_make_examples_run_info(run_info_proto, path):
"""Writes a MakeExamplesRunInfo proto in text_format to path."""
with tf.io.gfile.GFile(path, mode='w') as writer:
writer.write(
'# proto-file: learning/genomics/deepvariant/protos/deepvariant.proto\n'
'# proto-message: MakeExamplesRunInfo\n')
writer.write(text_format.MessageToString(run_info_proto, float_format=''))
# ---------------------------------------------------------------------------
# Region processing
# ---------------------------------------------------------------------------
def _ensure_consistent_contigs(ref_contigs,
sam_contigs,
vcf_contigs,
exclude_contig_names=None,
min_coverage_fraction=1.0):
"""Returns the common contigs after ensuring 'enough' overlap.
Args:
ref_contigs: list of reference_pb2.ContigInfo protos in the reference
genome.
sam_contigs: list of reference_pb2.ContigInfo protos in the SAM/BAM file.
vcf_contigs: list of reference_pb2.ContigInfo protos in the VCF if in
training mode, or None otherwise.
exclude_contig_names: list of strings of contig names to exclude from
overlap consideration.
min_coverage_fraction: The fraction of the reference contigs that must be
shared with all inputs.
Returns:
The list of contigs common between all input sources.
Raises:
ValueError: The contigs are not sufficiently similar across input sources.
"""
# Remove any excluded contigs from the ref_contigs, as we want to use the
# selected contigs for our overlap comparison.
if exclude_contig_names:
ref_contigs = [c for c in ref_contigs if c.name not in exclude_contig_names]
# Compute the common contigs among our inputs, and check that the contigs are
# sufficiently consistent among each other.
contigs = common_contigs(only_true(ref_contigs, sam_contigs))
if vcf_contigs:
# If VCF contigs exist, we just check the name (not the length).
vcf_contigs_names = set([x.name for x in vcf_contigs])
contigs = [x for x in contigs if x.name in vcf_contigs_names]
validate_reference_contig_coverage(ref_contigs, contigs,
min_coverage_fraction)
return contigs
def common_contigs(contigs_list):
"""Gets a list of contigs found in all contigs in contigs_list.
A common contig is considered one where the name and length in basepairs are
the same.
Args:
contigs_list: A sequence of lists of ContigInfo protos.
Returns:
A list of ContigInfo protos. Note that the individual protos found in this
returned list are shared with the ContigInfo protos found in contigs_list,
so should not be modified.
"""
def common2(contigs1, contigs2):
"""Computes the common contigs between contigs1 and contigs2."""
map2 = ranges.contigs_dict(contigs2)
def is_common(contig1):
contig2 = map2.get(contig1.name, None)
return contig2 and contig1.n_bases == contig2.n_bases
return [c for c in contigs1 if is_common(c)]
# Compute the common contigs by recursively getting common contigs of our
# cumulative set of contigs (common) and each contig in other_contigs.
common = contigs_list[0]
for other_contigs in contigs_list[1:]:
common = common2(common, other_contigs)
return common
def validate_reference_contig_coverage(ref_contigs, shared_contigs,
min_coverage_fraction):
"""Validates that shared_contigs spans a sufficient amount of ref_contigs.
Args:
ref_contigs: List of ContigInfo protos. All of the contigs from our
reference genome.
shared_contigs: The subset of ref_contigs that we found in common with
ref_contigs and all other genomics data sources.
min_coverage_fraction: The minimum fraction of basepairs of ref_contigs that
should be found among the shared_contigs.
Raises:
ValueError: If the fraction of covered bases is less than
min_coverage_fraction.
"""
def format_contig_matches():
pieces = []
common_map = ranges.contigs_dict(shared_contigs)
for ref_contig in ref_contigs:
status = 'matched' if ref_contig.name in common_map else 'IS MISSING'
pieces.append('\n"{}" is {} bp and {}'.format(ref_contig.name,
ref_contig.n_bases, status))
return ', '.join(pieces)
ref_bp = ranges.contigs_n_bases(ref_contigs)
common_bp = ranges.contigs_n_bases(shared_contigs)
coverage = common_bp / (1. * ref_bp)
if not shared_contigs or coverage < min_coverage_fraction:
raise ValueError('Reference contigs span {} bases but only {} bases '
'({:.2%}) were found in common among our input files. '
'Check that the sources were created on a common genome '
'reference build. Contig matches were: {}. Here is a '
'useful article about different human genome reference '
'builds:\n'
'https://gatkforums.broadinstitute.org/gatk/discussion/'
'11010/human-genome-reference-builds-grch38-hg38-b37-hg19'
'\nPlease make sure the --ref input matches the build '
'used for the input in --reads.'.format(
ref_bp, common_bp, coverage, format_contig_matches()))
def build_calling_regions(contigs, regions_to_include, regions_to_exclude):
"""Builds a RangeSet containing the regions we should call variants in.
This function intersects the Ranges spanning all of the contigs with those
from regions_to_include, if not empty, and removes all of the regions in
regions_to_exclude.
Args:
contigs: Sequence of ContigInfo protos. Used to determine the initial ranges
to process (i.e., all bases of these contigs).
regions_to_include: RangeSet or iterable that can be converted to a
RangeSet.
regions_to_exclude: RangeSet or iterable that can be converted to a
RangeSet.
Returns:
A RangeSet.
"""
# Initially we are going to call everything in the reference.
regions = ranges.RangeSet.from_contigs(contigs)
# If we provided a regions to include, intersect it with all of the regions,
# producing a common set of regions between the reference and the provided
# calling regions.
contig_dict = ranges.contigs_dict(contigs)
if regions_to_include:
regions = regions.intersection(
ranges.RangeSet.from_regions(regions_to_include, contig_dict))
# If we provided regions to exclude, intersect those with the existing calling
# regions to further refine our set of contigs to process.
if regions_to_exclude:
# exclude_regions mutates regions.
regions.exclude_regions(
ranges.RangeSet.from_regions(regions_to_exclude, contig_dict))
return regions
def regions_to_process(contigs,
partition_size,
calling_regions=None,
task_id=None,
num_shards=None):
"""Determines the regions to process and partitions them into pieces.
This function divides the genomes into regions we should process by
intersecting the Ranges spanning all of the contigs with those from
calling_regions, if provided. These intersected regions are then partitioned
into pieces no bigger than partition_size bp in length.
By construction we ensure that the regions are in genomic order, first w.r.t.
the contigs and then within each contig by start and end of each region.
This function can further subdivide these regions into a subset appropriate
for a single task (task_id) among N tasks (num_shards) to process. The
function ensures that:
set(all_regions) = union(regions(task_0), ..., regions(task_n))
when called with task_ids 0 ... N for num_shards = N.
Args:
contigs: Sequence of ContigInfo protos. Used to determine the initial ranges
to process (i.e., all bases of these contigs) and the order of returned
ranges.
partition_size: The maximum size to make any region when partitioning.
calling_regions: None or RangeSet. If provided, we will intersect the
regions to process so that only those that overlap a region in this set
are included.
task_id: int >= 0 or None. The task_id of this job, which will be used to
subdivide the total set of regions to process into just those that should
be processed by this job. Must be < num_shards.
num_shards: int >= 0 or None. The number of shards (i.e., the total number
of tasks) we are running in parallel. Together with task_id determines the
subset of regions we want to process.
Returns:
An iterable of nucleus.genomics.v1.Range objects.
Raises:
ValueError: if task_id and num_shards are bad or inconsistent.
"""
if (task_id is None) != (num_shards is None):
raise ValueError('Both task_id and num_shards must be present if either is',
task_id, num_shards)
if num_shards:
if num_shards < 0:
raise ValueError('num_shards={} must be >= 0'.format(num_shards))
if task_id < 0 or task_id >= num_shards:
raise ValueError('task_id={} should be >= 0 and < num_shards={}'.format(
task_id, num_shards))
regions = ranges.RangeSet.from_contigs(contigs)
if calling_regions:
regions = regions.intersection(calling_regions)
partitioned = regions.partition(partition_size)
if num_shards:
return (r for i, r in enumerate(partitioned) if i % num_shards == task_id)
else:
return partitioned
def fetch_vcf_positions(vcf_path, contigs, calling_regions):
"""Fetches variants present in calling_regions.
Args:
vcf_path: Path to VCF from which to fetch positions.
contigs: Sequence of ContigInfo protos. Used to determine the initial ranges
to process (i.e., all bases of these contigs) and the order of returned
ranges.
calling_regions: A list of acceptable calling regions.
Returns:
Variant positions present in calling_regions.
"""
# Fetch the set of regions being queried.
regions = ranges.RangeSet.from_contigs(contigs)
if calling_regions:
regions = regions.intersection(calling_regions)
variant_positions = []
with vcf.VcfReader(vcf_path) as vcf_reader:
for region in regions:
for variant in vcf_reader.query(region):
variant_positions.append(variant_utils.variant_position(variant))
return variant_positions
def filter_regions_by_vcf(regions, variant_positions):
"""Filter a list of regions to only those that contain variants.
Args:
regions: a list of Range objects representing regions to filter on.
variant_positions: a list of Range objects containing the positions of
variants.
Returns:
filtered_regions: a list of Range objects, each of which appeared in the
input regions and contains at least one of the input variants.
"""
def dict_by_chromosome(list_of_ranges):
d = collections.defaultdict(list)
for r in list_of_ranges:
d[r.reference_name].append(r)
for c in d:
d[c] = sorted(d[c], key=lambda x: (x.start, x.end))
return d
region_dict = dict_by_chromosome(regions)
variant_dict = dict_by_chromosome(variant_positions)
filtered_regions = []
for c in region_dict:
ri = 0
vi = 0
if c not in variant_dict:
# Skip chromosomes with no variants.
continue
while ri < len(region_dict[c]) and vi < len(variant_dict[c]):
region = region_dict[c][ri]
variant = variant_dict[c][vi]
if variant.start >= region.start and variant.start < region.end:
# When the variant falls within the region, then keep the region.
filtered_regions.append(region)
# Move both indices because we're already keeping this region, and we
# don't need to see any more variants inside this same region.
ri += 1
vi += 1
elif region.start < variant.start:
# Move past this region since the next variant comes later.
ri += 1
else:
# Found another variant in the previous region we already included.
vi += 1
return filtered_regions
# ---------------------------------------------------------------------------
# Working with samples
# ---------------------------------------------------------------------------
@dataclasses.dataclass
class Sample(object):
"""Organizes sample-level properties.
options: A SampleOptions proto containing instructions for how to treat the
sample, most of which will be set from flags.
sam_readers: SamReader objects with handles on the `reads_filenames` from the
options.
in_memory_sam_reader: InMemorySamReader for this sample, which stores the
alignments for this sample that have been read into memory from the
sam_readers.
reads: A list of reads queried from the sam readers.
allele_counter: An allele counter object for the sample.
variant_caller: A variant caller for the sample, should be instantiated using
the options.variant_caller_options.
"""
options: deepvariant_pb2.SampleOptions
sam_readers: Optional[Sequence[sam.SamReader]] = None
in_memory_sam_reader: Optional[sam.InMemorySamReader] = None
reads: Optional[List[reads_pb2.Read]] = None
allele_counter: Optional[allelecounter.AlleleCounter] = None
variant_caller: Optional[vc_base.VariantCaller] = None
def __repr__(self):
return '<Sample {}>'.format(str(self.__dict__))
# ---------------------------------------------------------------------------
# Region processor
# ---------------------------------------------------------------------------
def read_confident_regions(options):
if options.confident_regions_filename:
return ranges.RangeSet.from_bed(options.confident_regions_filename)
else:
return None
def filter_candidates(candidates, select_variant_types):
"""Yields the candidate variants whose type is one of select_variant_types.
This function iterates through candidates and yield each candidate in order
if it satisfies any of the type constraints implied by select_variant_types.
For example, if select_variant_types = ['snps'] this function will yield
candidates that are bi-allelic SNPs only. Multiple select types are treated
as OR'd together, so ['snps', 'indels'] yields candidates that are bi-allelic
SNPs or indels.
Args:
candidates: Iterable of Variant protos. The candidates we want to select
from.
select_variant_types: List of str. The names of the variant type selectors
we want to use to keep/remove variants. Each string must be part of
VARIANT_TYPE_SELECTORS or an error will be raised.
Raises:
ValueError: if any str in select_variant_types isn't present in
VARIANT_TYPE_SELECTORS.
Yields:
Candidates in order.
"""
if not all(s in VARIANT_TYPE_SELECTORS for s in select_variant_types):
raise ValueError('Unexpected select variant type', select_variant_types)
for candidate in candidates:
v = candidate.variant
for select_type in select_variant_types:
selector = VARIANT_TYPE_SELECTORS[select_type]
if selector(v):
yield candidate
break
class RegionProcessor(object):
"""Creates DeepVariant example protos for a single region on the genome.
This class helps us to run the very sensitive caller, pileup image creator,
and variant labeler operations on a single region in parallel across many
regions using the PoolExecutor API. In order to do this we need separate three
key operations:
(1) Collect all of the info needed to create our resources (e.g., ref reader)
at construction. We cannot actually initialize those resources in the
constructor, though, since we actually want different resources in each
worker process/thread. I.e., we need lazy resource initialization.
(2) Actually initialize these resources *after* the worker has been forked
in our process pool. This gives us a fresh resource to use in each
separate process.
(3) Process the region to find candidate variants and process those into our
tf.Example protos.
"""
def __init__(self, options):
"""Creates a new RegionProcess.
Args:
options: deepvariant.MakeExamplesOptions proto used to specify our
resources for calling (e.g., reference_filename).
"""
self.options = options
self.samples = [Sample(options=x) for x in self.options.sample_options]
self.initialized = False
self.ref_reader = None
self.realigner = None
self.pic = None
self.labeler = None
self.population_vcf_readers = None
def _make_allele_counter_for_region(self, region, candidate_positions):
return allelecounter.AlleleCounter(self.ref_reader.c_reader, region,
candidate_positions,
self.options.allele_counter_options)
def _encode_tensor(self, image_tensor):
return image_tensor.tostring(), image_tensor.shape, 'raw'
def _make_sam_readers(
self, reads_filenames: Sequence[str],
downsample_fraction: float) -> Optional[List[sam.SamReader]]:
"""Creates a list of SamReaders, one from each filename.
Args:
reads_filenames: A list of string read filenames (e.g. for BAM/CRAM
files). The list may contain empty strings or None, which will be
skipped.
downsample_fraction: Fraction by which to downsample. This applies to each
file in reads_filenames separately.
Returns:
A list of sam readers with handles to the files. This may be shorter than
the input reads_filenames if any of the filenames were empty.
"""
logging_with_options(
self.options,
'Starting from v0.9.0, --use_ref_for_cram is default to true. '
'If you are using CRAM input, note that we will decode CRAM '
'using the reference you passed in with --ref')
readers = []
for reads_filename in reads_filenames:
if reads_filename:
readers.append(
sam.SamReader(
reads_filename,
ref_path=self.options.reference_filename
if self.options.use_ref_for_cram else None,
read_requirements=self.options.read_requirements,
parse_aux_fields=self.options.parse_sam_aux_fields,
hts_block_size=self.options.hts_block_size,
downsample_fraction=downsample_fraction,
random_seed=self.options.random_seed,
use_original_base_quality_scores=self.options
.use_original_quality_scores))
return readers
def _initialize(self):
"""Initialize the resources needed for this work in the current env."""
if self.initialized:
raise ValueError('Cannot initialize this object twice')
self.ref_reader = fasta.IndexedFastaReader(self.options.reference_filename)
for sample in self.samples:
sample.sam_readers = self._make_sam_readers(
reads_filenames=sample.options.reads_filenames,
downsample_fraction=sample.options.downsample_fraction)
sample.in_memory_sam_reader = sam.InMemorySamReader([])
sample.variant_caller = self._make_variant_caller_from_options(
sample.options.variant_caller_options)
if self.options.use_allele_frequency:
population_vcf_readers = allele_frequency.make_population_vcf_readers(
self.options.population_vcf_filenames)
self.population_vcf_readers = population_vcf_readers
if (self.options.realigner_enabled or
self.options.pic_options.alt_aligned_pileup != 'none' or
self.options.allele_counter_options.track_ref_reads):
main_sample = self.samples[self.options.main_sample_index]
input_bam_header = sam.SamReader(
main_sample.options.reads_filenames[0]).header
self.realigner = realigner.Realigner(
self.options.realigner_options,
self.ref_reader,
shared_header=input_bam_header)
self.pic = pileup_image.PileupImageCreator(
ref_reader=self.ref_reader,
options=self.options.pic_options,
samples=self.samples)
if in_training_mode(self.options):
self.labeler = self._make_labeler_from_options()
self.initialized = True
def initialize(self):
if not self.initialized:
self._initialize()
def _make_labeler_from_options(self):
"""Creates the labeler from options."""
truth_vcf_reader = vcf.VcfReader(
self.options.truth_variants_filename,
excluded_format_fields=['GL', 'GQ', 'PL'])
confident_regions = read_confident_regions(self.options)
if (self.options.variant_caller ==
deepvariant_pb2.MakeExamplesOptions.VCF_CANDIDATE_IMPORTER):
logging.info('For --variant_caller=vcf_candidate_importer, we '
'default the labeler_algorithm to positional_labler.')
return positional_labeler.PositionalVariantLabeler(
truth_vcf_reader=truth_vcf_reader,
confident_regions=confident_regions)
if (self.options.labeler_algorithm ==
deepvariant_pb2.MakeExamplesOptions.POSITIONAL_LABELER):
return positional_labeler.PositionalVariantLabeler(
truth_vcf_reader=truth_vcf_reader,
confident_regions=confident_regions)
elif (self.options.labeler_algorithm ==
deepvariant_pb2.MakeExamplesOptions.HAPLOTYPE_LABELER):
return haplotype_labeler.HaplotypeLabeler(
truth_vcf_reader=truth_vcf_reader,
ref_reader=self.ref_reader,
confident_regions=confident_regions)
elif (self.options.labeler_algorithm ==
deepvariant_pb2.MakeExamplesOptions.CUSTOMIZED_CLASSES_LABELER):
if (not self.options.customized_classes_labeler_classes_list or
not self.options.customized_classes_labeler_info_field_name):
raise ValueError('For -labeler_algorithm=customized_classes_labeler, '
'you need to set '
'-customized_classes_labeler_classes_list and '
'-customized_classes_labeler_info_field_name.')
return customized_classes_labeler.CustomizedClassesVariantLabeler(
truth_vcf_reader=truth_vcf_reader,
confident_regions=confident_regions,
classes_list=self.options.customized_classes_labeler_classes_list,
info_field_name=self.options
.customized_classes_labeler_info_field_name)
else:
raise ValueError('Unexpected labeler_algorithm',
self.options.labeler_algorithm)
def _make_variant_caller_from_options(self, variant_caller_options):
"""Creates the variant_caller from options."""
if (self.options.variant_caller ==
deepvariant_pb2.MakeExamplesOptions.VCF_CANDIDATE_IMPORTER):
if in_training_mode(self.options):
candidates_vcf = self.options.truth_variants_filename
else:
candidates_vcf = self.options.proposed_variants_filename
return vcf_candidate_importer.VcfCandidateImporter(
variant_caller_options, candidates_vcf)
elif (self.options.variant_caller ==
deepvariant_pb2.MakeExamplesOptions.VERY_SENSITIVE_CALLER):
return very_sensitive_caller.VerySensitiveCaller(variant_caller_options)
else:
raise ValueError('Unexpected variant_caller', self.options.variant_caller)
def process(self, region):
"""Finds candidates and creates corresponding examples in a region.
Args:
region: A nucleus.genomics.v1.Range proto. Specifies the region on the
genome we should process.
Returns:
(candidates_by_sample, examples_by_sample, gvcfs_by_sample, runtimes)
1. candidates_by_sample: A dict keyed by sample role, each a list of
candidates found, which are deepvariant.DeepVariantCall objects.
2. examples_by_sample: A dict keyed by sample, each a list of filled
in tf.Example protos. For example, these will include the candidate
variant, the pileup image, and, if in training mode, the truth variants
and labels needed for training.
3. gvcfs_by_sample: A dict keyed by sample, each a list of
nucleus.genomics.v1.Variant protos containing gVCF information for all
reference sites, if gvcf generation is enabled, otherwise this value is
[].
4. runtimes: A dict of runtimes in seconds keyed by stage.
"""
region_timer = timer.TimerStart()
runtimes = {}
if not self.initialized:
self.initialize()
before_get_reads = time.time()
runtimes['num reads'] = 0
for sample in self.samples:
if sample.in_memory_sam_reader is not None:
reads = self.region_reads(
region=region,
sam_readers=sample.sam_readers,
reads_filenames=sample.options.reads_filenames)
runtimes['num reads'] += len(reads)
sample.in_memory_sam_reader.replace_reads(reads)
runtimes['get reads'] = trim_runtime(time.time() - before_get_reads)
before_find_candidates = time.time()
candidates_by_sample, gvcfs_by_sample = self.candidates_in_region(region)
examples_by_sample = {}
for sample in self.samples:
role = sample.options.role
if role not in candidates_by_sample:
continue
candidates = candidates_by_sample[role]
examples_by_sample[role] = []
if self.options.select_variant_types:
candidates = list(
filter_candidates(candidates, self.options.select_variant_types))
runtimes['find candidates'] = trim_runtime(time.time() -
before_find_candidates)
before_make_pileup_images = time.time()
# Get allele frequencies for candidates.
if self.options.use_allele_frequency:
candidates = list(
allele_frequency.add_allele_frequencies_to_candidates(
candidates=candidates,
population_vcf_reader=self.population_vcf_readers[
region.reference_name],
ref_reader=self.ref_reader))
if in_training_mode(self.options):
for candidate, label in self.label_candidates(candidates, region):
for example in self.create_pileup_examples(
candidate, sample_order=sample.options.order):
self.add_label_to_example(example, label)
examples_by_sample[role].append(example)
else:
for candidate in candidates:
for example in self.create_pileup_examples(
candidate, sample_order=sample.options.order):
examples_by_sample[role].append(example)
# After any filtering and other changes above, set candidates for sample.
candidates_by_sample[role] = candidates
logging.vlog(2, 'Found %s candidates in %s [%d bp] [%0.2fs elapsed]',
len(examples_by_sample[role]), ranges.to_literal(region),
ranges.length(region), region_timer.Stop())
runtimes['make pileup images'] = trim_runtime(time.time() -
before_make_pileup_images)
runtimes['num examples'] = sum(
[len(x) for x in examples_by_sample.values()])
runtimes['num candidates'] = sum(
[len(x) for x in candidates_by_sample.values()])
return candidates_by_sample, examples_by_sample, gvcfs_by_sample, runtimes
def region_reads(self, region, sam_readers, reads_filenames):
"""Gets read alignments overlapping the region and optionally realigns them.
If self.options.realigner_enabled is set, uses realigned reads, otherwise
original reads are returned.
Args:
region: A nucleus.genomics.v1.Range object specifying the region we want
to realign reads.
sam_readers: An iterable of sam.SamReader to query from.
reads_filenames: Filenames matching sam_readers. This is only used for
throwing more informative error messages.
Returns:
[genomics.deepvariant.core.genomics.Read], reads overlapping the region.
"""
if sam_readers is None:
return []
reads = []
for sam_reader_index, sam_reader in enumerate(sam_readers):
try:
reads.extend(sam_reader.query(region))
except ValueError as err:
error_message = str(err)
if error_message.startswith('Data loss:'):
raise ValueError(error_message + '\nFailed to parse BAM/CRAM file. '
'This is often caused by:\n'
'(1) When using a CRAM file, and setting '
'--use_ref_for_cram to false (which means you want '
'to use the embedded ref instead of a ref file), '
'this error could be because of inability to find '
'the embedded ref file.\n'
'(2) Your BAM/CRAM file could be corrupted. Please '
'check its md5.\n'
'If you cannot find out the reason why this error '
'is occurring, please report to '
'https://github.com/google/deepvariant/issues')
elif error_message.startswith('Not found: Unknown reference_name '):
raise ValueError('{}\nThe region {} does not exist in {}.'.format(
error_message, ranges.to_literal(region),
reads_filenames[sam_reader_index]))
else:
# By default, raise the ValueError as is for now.
raise err
if self.options.max_reads_per_partition > 0:
random_for_region = np.random.RandomState(self.options.random_seed)
reads = utils.reservoir_sample(reads,
self.options.max_reads_per_partition,
random_for_region)
reads = list(reads)
if self.options.realigner_enabled:
max_read_length_to_realign = 500
if max_read_length_to_realign > 0:
long_reads = [
read for read in reads
if len(read.aligned_sequence) > max_read_length_to_realign
]
short_reads = [
read for read in reads
if len(read.aligned_sequence) <= max_read_length_to_realign
]
_, realigned_short_reads = self.realigner.realign_reads(
short_reads, region)
# Long reads will be listed before short reads when both are present.
# Examples with only short or only long reads will be unaffected.
return long_reads + realigned_short_reads
_, reads = self.realigner.realign_reads(reads, region)
return reads
def candidates_in_region(
self, region
) -> Tuple[Dict[str, deepvariant_pb2.DeepVariantCall], Dict[
str, variants_pb2.Variant]]:
"""Finds candidates in the region using the designated variant caller.
Args:
region: A nucleus.genomics.v1.Range object specifying the region we want
to get candidates for.
Returns:
A 2-tuple of (candidates, gvcfs).
The first value, candidates, is a dict keyed by sample role, where each
item is a list of deepvariant_pb2.DeepVariantCalls objects, in
coordidate order.
The second value, gvcfs, is a dict keyed by sample role, where
each item is a list of nucleus.genomics.v1.Variant protos containing gVCF
information for all reference sites, if gvcf generation is enabled,
otherwise the gvcfs value is [].
"""
for sample in self.samples:
sample.reads = sample.in_memory_sam_reader.query(region)
main_sample = self.samples[self.options.main_sample_index]
if not main_sample.reads and not gvcf_output_enabled(self.options):
# If we are generating gVCF output we cannot safely abort early here as
# we need to return the gVCF records calculated by the caller below.
return {}, {}
allele_counters = {}
for sample in self.samples:
if sample.options.reads_filenames:
# Calculate potential candidate positions from allele counts.
candidate_positions = []
if self.options.allele_counter_options.track_ref_reads:
candidate_positions = self.realigner.get_candidate_positions(
sample.reads, region)
sample.reads = sample.in_memory_sam_reader.query(region)
# Final allele counts calculation.
sample.allele_counter = self._make_allele_counter_for_region(
region, candidate_positions)
for read in sample.reads:
sample.allele_counter.add(read, sample.options.name)
allele_counters[sample.options.name] = sample.allele_counter
candidates = {}
gvcfs = {}
for sample in self.samples:
role = sample.options.role
if in_training_mode(
self.options) and self.options.sample_role_to_train != role:
continue
if not sample.options.reads_filenames:
continue
candidates[role], gvcfs[role] = sample.variant_caller.calls_and_gvcfs(
allele_counters=allele_counters,
target_sample=sample.options.name,
include_gvcfs=gvcf_output_enabled(self.options),
include_med_dp=self.options.include_med_dp)
return candidates, gvcfs
def align_to_all_haplotypes(self, variant, reads):
"""For each alternate allele, realign reads to it and get "ref" sequences.
For alt-aligned pileups, this realigns the reads to each of the alternate
haplotypes. It also outputs the sequence for each alternate allele, which
is also needed to build the pileup image.
Args:
variant: a nucleus.genomics.v1.Variant containing the alt alleles to align
against.
reads: a list of reads (nucleus.genomics.v1.Read) to be realigned around
the variant.
Returns:
dict of alignments keyed by haplotype, dict of window sequences keyed by
haplotype.
"""
window_width = self.pic.width
window_half_width = self.pic.half_width
alt_alleles = list(variant.alternate_bases)
contig = variant.reference_name
ref_start = variant.start
ref_bases = variant.reference_bases
ref_end = ref_start + len(ref_bases)
# Sanity check that the reference_bases in the variant match the reference.
ref_query_at_variant = self.realigner.ref_reader.query(
ranges.make_range(contig, ref_start, ref_end))
if ref_bases != ref_query_at_variant:
raise ValueError('Error: reference_bases property in variant ({})'
'does not match the bases in the reference ({}) at that '
'position.'.format(ref_bases, ref_query_at_variant))
# Margin must be equal to or more than half the window width.
# Some extra prefix/suffix can be added to anchor alignments, but currently
# we don't add extra.
margin = window_half_width
valid_end = min(
self.realigner.ref_reader.contig(contig).n_bases, ref_end + margin)
alignment_region = ranges.make_range(contig, max(ref_start - margin, 0),
valid_end)
trimmed_reads = [realigner.trim_read(r, alignment_region) for r in reads]
# Filter reads to a minimum read length of 15 bp after trimming.
reads = [r for r in trimmed_reads if len(r.aligned_sequence) >= 15]
prefix = self.realigner.ref_reader.query(
ranges.make_range(contig, max(ref_start - margin, 0), ref_start))
suffix = self.realigner.ref_reader.query(
ranges.make_range(contig, ref_end, valid_end))
alignments_by_haplotype = {}
sequences_by_haplotype = {}
for hap in alt_alleles:
# Align to each of the alt_alleles:
alignments_by_haplotype[hap] = self.realigner.align_to_haplotype(
this_haplotype=hap,
haplotypes=[hap],
prefix=prefix,
suffix=suffix,
reads=reads,
contig=contig,
ref_start=ref_start - len(prefix))
# Sequence of the alt haplotype in the window:
end_of_prefix = prefix[-window_half_width:]
beginning_of_suffix = suffix[:max(window_half_width + 1 - len(hap), 0)]
sequences_by_haplotype[hap] = end_of_prefix + hap + beginning_of_suffix
# Long haplotypes can extend past the window, so enforce the width here.
sequences_by_haplotype[hap] = sequences_by_haplotype[hap][0:window_width]
return {
'alt_alignments': alignments_by_haplotype,
'alt_sequences': sequences_by_haplotype
}
def create_pileup_examples(self, dv_call, sample_order=None):
"""Creates a tf.Example for DeepVariantCall.
This function calls PileupImageCreator.create_pileup_images on dv_call to
get raw image tensors for each alt_allele option (see docs for details).
These tensors are encoded as pngs, and all of the key information is encoded
as a tf.Example via a call to tf_utils.make_example.
Args:
dv_call: A DeepVariantCall.
sample_order: A list of indices representing the order in which samples
should be represented in the pileup image. Example: [1,0,2] to swap the
first and second samples. This is None by default which puts the
samples in order.
Returns:
A list of tf.Example protos.
"""
reads_for_samples = [
self.pic.get_reads(
dv_call.variant, sam_reader=sample.in_memory_sam_reader)
for sample in self.samples
]
logging.vlog(
3, 'create_pileup_examples for variant: {}:{}_{}'.format(
dv_call.variant.reference_name, dv_call.variant.start,
dv_call.variant.reference_bases))
# Decide whether each candidate needs ALT-alignment.
alt_align_this_variant = False
if self.options.pic_options.alt_aligned_pileup != 'none':
if self.options.pic_options.types_to_alt_align == 'indels':
alt_align_this_variant = variant_utils.is_indel(dv_call.variant)
else: # types_to_alt_align can only be 'all' or 'indels'.
alt_align_this_variant = True
haplotype_alignments_for_samples = None
haplotype_sequences = None
if alt_align_this_variant:
# Align the reads against each alternate allele, saving the sequences of
# those alleles along with the alignments for pileup images.
alt_info_for_samples = [
self.align_to_all_haplotypes(dv_call.variant, reads)
for reads in reads_for_samples
]
# Each sample has different reads and thus different alt-alignments.
haplotype_alignments_for_samples = [
sample['alt_alignments'] for sample in alt_info_for_samples
]
# All samples share the same alt sequences, so select the first one.
haplotype_sequences = alt_info_for_samples[0]['alt_sequences']
pileup_images = self.pic.create_pileup_images(
dv_call=dv_call,
reads_for_samples=reads_for_samples,
sample_order=sample_order,
haplotype_alignments_for_samples=haplotype_alignments_for_samples,
haplotype_sequences=haplotype_sequences)
if pileup_images is None:
# We cannot build a PileupImage for dv_call, issue a warning.
logging.warning('Could not create PileupImage for candidate at %s:%s',
dv_call.variant.reference_name, dv_call.variant.start)
return []
examples = []
for alt_alleles, image_tensor in pileup_images:
encoded_tensor, shape, tensor_format = self._encode_tensor(image_tensor)
examples.append(
tf_utils.make_example(
dv_call.variant,
alt_alleles,
encoded_tensor,
shape=shape,
image_format=tensor_format,
sequencing_type=self.options.pic_options.sequencing_type))
return examples
def label_candidates(self, candidates, region):
"""Gets label information for each candidate.
Args:
candidates: list[DeepVariantCalls]: The list of candidate variant calls we
want to label.
region: A nucleus.genomics.v1.Range object specifying the region we want
to get candidates for.
Yields:
Tuples of (candidate, label_variants.Label objects) for each candidate in
candidates that could be assigned a label. Candidates that couldn't be
labeled will not be returned.
"""
# Set BAM filename (used for training stats).
for candidate in candidates:
struct_utils.set_string_field(candidate.variant.info, 'BAM_FNAME',
self.options.bam_fname)
# Get our list of labels for each candidate variant.
labels = self.labeler.label_variants(
[candidate.variant for candidate in candidates], region)
# Remove any candidates we couldn't label, yielding candidate, label pairs.
for candidate, label in zip(candidates, labels):
if label.is_confident:
yield candidate, label
def add_label_to_example(self, example, label):
"""Adds label information about the assigned label to our example.
Args:
example: A tf.Example proto. We will write truth_variant and label into
this proto.
label: A variant_labeler.Label object containing the labeling information
to add to our example.
Returns:
The example proto with label fields added.
Raises:
ValueError: if label isn't confident.
"""
if not label.is_confident:
raise ValueError('Cannot add a non-confident label to an example',
example, label)
alt_alleles_indices = tf_utils.example_alt_alleles_indices(example)
tf_utils.example_set_variant(example, label.variant)
# Set the label of the example to the # alts given our alt_alleles_indices.
tf_utils.example_set_label(example,
label.label_for_alt_alleles(alt_alleles_indices))
return example
def processing_regions_from_options(options):
"""Computes the calling regions from our options.
This function does all of the work needed to read our input files and region
specifications to determine the list of regions we should generate examples
over. It also computes the confident regions needed to label variants.
Args:
options: deepvariant.MakeExamplesOptions proto containing information about
our input data sources.
Raises:
ValueError: if the regions to call is empty.
Returns:
Two values. The first is a list of nucleus.genomics.v1.Range protos of the
regions we should process. The second is a RangeSet containing the confident
regions for labeling, or None if we are running in training mode.
"""
ref_contigs = fasta.IndexedFastaReader(
options.reference_filename).header.contigs
# Add in confident regions and vcf_contigs if in training mode.
vcf_contigs = None
if in_training_mode(options):
vcf_contigs = vcf.VcfReader(options.truth_variants_filename).header.contigs
if all([x.n_bases == 0 for x in vcf_contigs]):
logging.info(
'%s header does not contain contig lengths. Will skip contig '
'consistency checking for this file.',
options.truth_variants_filename)
vcf_contigs = None
main_sample = options.sample_options[options.main_sample_index]
all_sam_contigs = [
sam.SamReader(reads_file).header.contigs
for reads_file in main_sample.reads_filenames
]
sam_contigs = common_contigs(only_true(*all_sam_contigs))
contigs = _ensure_consistent_contigs(ref_contigs, sam_contigs, vcf_contigs,
options.exclude_contigs,
options.min_shared_contigs_basepairs)
logging_with_options(options,
'Common contigs are %s' % [c.name for c in contigs])
calling_regions = build_calling_regions(ref_contigs, options.calling_regions,
options.exclude_calling_regions)
if not calling_regions:
raise ValueError('The regions to call is empty. Check your --regions and '
'--exclude_regions flags to make sure they are not '
'resulting in set of empty region to process. This also '
'happens if you use "chr20" for a BAM where contig names '
'don\'t have "chr"s (or vice versa).')
regions = regions_to_process(
contigs=contigs,
partition_size=options.allele_counter_options.partition_size,
calling_regions=calling_regions,
task_id=options.task_id,
num_shards=options.num_shards)
region_list = list(regions)
# When using VcfCandidateImporter, it is safe to skip regions without
# candidates as long as gVCF output is not needed. There is a tradeoff
# though because it takes time to read the VCF, which is only worth it if
# there are enough regions.
if options.proposed_variants_filename and not gvcf_output_enabled(options):
logging_with_options(
options, 'Reading VCF to skip processing some regions without '
'variants in the --proposed_variants VCF.')
before = time.time()
variant_positions = fetch_vcf_positions(options.proposed_variants_filename,
contigs, calling_regions)
filtered_regions = filter_regions_by_vcf(region_list, variant_positions)
time_elapsed = time.time() - before
logging_with_options(
options, 'Filtering regions took {} seconds and reduced the number of '
'regions to process from {} to {} regions containing variants '
'from the supplied VCF of proposed variants.'.format(
trim_runtime(time_elapsed), len(region_list),
len(filtered_regions)))
return filtered_regions
return region_list
class OutputsWriter(object):
"""Manages all of the outputs of make_examples in a single place."""
def __init__(self, options, suffix=None):
self._writers = {
k: None for k in ['candidates', 'examples', 'gvcfs', 'runtime']
}
self.examples_filename = None
if options.candidates_filename:
self._add_writer(
'candidates',
tfrecord.Writer(
self._add_suffix(options.candidates_filename, suffix)))
if options.examples_filename:
self.examples_filename = self._add_suffix(options.examples_filename,
suffix)
self._add_writer('examples', tfrecord.Writer(self.examples_filename))
if options.gvcf_filename:
self._add_writer(
'gvcfs',
tfrecord.Writer(self._add_suffix(options.gvcf_filename, suffix)))
if options.runtime_by_region:
self._add_writer('runtime',
tf.io.gfile.GFile(options.runtime_by_region, mode='w'))
writer = self._writers['runtime']
writer.__enter__()
writer.write('\t'.join(RUNTIME_BY_REGION_COLUMNS) + '\n')
def _add_suffix(self, file_path, suffix):
"""Adds suffix to file name if a suffix is given."""
if not suffix:
return file_path
file_dir, file_base = os.path.split(file_path)
file_split = file_base.split('.')
file_split[0] = f'{file_split[0]}_{suffix}'
new_file_base = ('.').join(file_split)
new_file = os.path.join(file_dir, new_file_base)
return new_file
def write_examples(self, *examples):
self._write('examples', *examples)
def write_gvcfs(self, *gvcfs):
self._write('gvcfs', *gvcfs)
def write_candidates(self, *candidates):
self._write('candidates', *candidates)
def write_runtime(self, stats_dict):
columns = [str(stats_dict.get(k, 'NA')) for k in RUNTIME_BY_REGION_COLUMNS]
writer = self._writers['runtime']
writer.write('\t'.join(columns) + '\n')
def _add_writer(self, name, writer):
if name not in self._writers:
raise ValueError(
'Expected writer {} to have a None binding in writers.'.format(name))
if self._writers[name] is not None:
raise ValueError('Expected writer {} to be bound to None in writers but '
'saw {} instead'.format(name, self._writers[name]))
self._writers[name] = writer
def __enter__(self):
"""API function to support with syntax."""
for writer in self._writers.values():
if writer is not None:
writer.__enter__()
return self
def __exit__(self, exception_type, exception_value, traceback):
for writer in self._writers.values():
if writer is not None:
writer.__exit__(exception_type, exception_value, traceback)
def _write(self, writer_name, *protos):
writer = self._writers[writer_name]
if writer:
for proto in protos:
writer.write(proto)
def close_all(self):
for writer in self._writers.values():
if writer is not None:
writer.close()
def get_example_counts(examples, num_classes):
"""Returns a breakdown of examples by categories (label and type)."""
labels = {i: 0 for i in range(0, num_classes)}
types = {
tf_utils.EncodedVariantType.SNP: 0,
tf_utils.EncodedVariantType.INDEL: 0,
tf_utils.EncodedVariantType.UNKNOWN: 0
}
for example in examples:
example_label = tf_utils.example_label(example)
example_type = tf_utils.encoded_variant_type(
tf_utils.example_variant(example))
labels[example_label] += 1
types[example_type] += 1
return labels, types
def make_examples_runner(options):
"""Runs examples creation stage of deepvariant."""
resource_monitor = resources.ResourceMonitor().start()
before_initializing_inputs = time.time()
logging_with_options(options, 'Preparing inputs')
regions = processing_regions_from_options(options)
# Create a processor to create candidates and examples for each region.
region_processor = RegionProcessor(options)
region_processor.initialize()
if options.candidates_filename:
logging_with_options(
options, 'Writing candidates to %s' % options.candidates_filename)
if options.gvcf_filename:
logging_with_options(options,
'Writing gvcf records to %s' % options.gvcf_filename)
n_regions, n_candidates, n_examples = 0, 0, 0
# Ideally this would use dv_constants.NUM_CLASSES, which requires generalizing
# deepvariant_pb2.MakeExamplesStats to use an array for the class counts.
n_class_0, n_class_1, n_class_2 = 0, 0, 0
n_snps, n_indels = 0, 0
last_reported = 0
writers_dict = {}
if in_training_mode(options) or len(options.sample_options) == 1:
writers_dict[options.sample_role_to_train] = OutputsWriter(
options, suffix=None)
else:
for sample in region_processor.samples:
if sample.sam_readers is not None:
writers_dict[sample.options.role] = OutputsWriter(
options, suffix=sample.options.role)
logging_with_options(
options, 'Writing examples to %s' %
', '.join([writer.examples_filename for writer in writers_dict.values()]))
logging_with_options(
options, 'Overhead for preparing inputs: %d seconds' %
(time.time() - before_initializing_inputs))
running_timer = timer.TimerStart()
for region in regions:
(candidates_by_sample, examples_by_sample, gvcfs_by_sample,
runtimes) = region_processor.process(region)
for sample in candidates_by_sample:
candidates = candidates_by_sample[sample]
examples = examples_by_sample[sample]
gvcfs = gvcfs_by_sample[sample]
writer = writers_dict[sample]
n_candidates += len(candidates)
n_examples += len(examples)
n_regions += 1
if in_training_mode(options) and options.run_info_filename:
labels, types = get_example_counts(
examples, num_classes=dv_constants.NUM_CLASSES)
n_class_0 += labels[0]
n_class_1 += labels[1]
n_class_2 += labels[2]
n_snps += types[tf_utils.EncodedVariantType.SNP]
n_indels += types[tf_utils.EncodedVariantType.INDEL]
before_write_outputs = time.time()
writer.write_candidates(*candidates)
# If we have any gvcf records, write them out. This also serves to
# protect us from trying to write to the gvcfs output of writer when gvcf
# generation is turned off. In that case, gvcfs will always be empty and
# we'll never execute the write.
if gvcfs:
writer.write_gvcfs(*gvcfs)
writer.write_examples(*examples)
if options.runtime_by_region:
runtimes['write outputs'] = runtimes.get('write outputs', 0) + (
trim_runtime(time.time() - before_write_outputs))
runtimes['region'] = ranges.to_literal(region)
# Output timing for every N candidates.
if (int(n_candidates / options.logging_every_n_candidates) > last_reported
or n_regions == 1):
last_reported = int(n_candidates / options.logging_every_n_candidates)
logging_with_options(
options, '%s candidates (%s examples) [%0.2fs elapsed]' %
(n_candidates, n_examples, running_timer.Stop()))
running_timer = timer.TimerStart()
if options.runtime_by_region:
# Runtimes are for all samples, so write this only once.
writers_dict[options.sample_role_to_train].write_runtime(
stats_dict=runtimes)
for writer in writers_dict.values():
writer.close_all()
# Construct and then write out our MakeExamplesRunInfo proto.
if options.run_info_filename:
make_examples_stats = deepvariant_pb2.MakeExamplesStats(
num_examples=n_examples,
num_snps=n_snps,
num_indels=n_indels,
num_class_0=n_class_0,
num_class_1=n_class_1,
num_class_2=n_class_2)
run_info = deepvariant_pb2.MakeExamplesRunInfo(
options=options,
resource_metrics=resource_monitor.metrics(),
stats=make_examples_stats)
if in_training_mode(options):
if region_processor.labeler.metrics is not None:
run_info.labeling_metrics.CopyFrom(region_processor.labeler.metrics)
else:
logging.warning(
'Labeling metrics requested but the selected labeling '
'algorithm %s does not collect metrics; skipping.',
options.labeler_algorithm)
logging_with_options(
options,
'Writing MakeExamplesRunInfo to %s' % options.run_info_filename)
write_make_examples_run_info(run_info, path=options.run_info_filename)
logging_with_options(options, 'Found %s candidate variants' % n_candidates)
logging_with_options(options, 'Created %s examples' % n_examples)
|
import numpy as np
import torch
from torch import nn
from torch.autograd import Function
from Code.broyden import broyden
class Broyden_RootFind(Function):
""" Generic layer module that uses bad broyden's method to find the solution """
@staticmethod
def fval(func, q_next, *args):
q_past, q, u_past, u, u_next = args
return func(torch.cat((q_past, q, q_next, u_past, u, u_next), 1))
@staticmethod
def broyden_find_root(func, q_next0, tol, maxiter, *args):
g_func = lambda q_next: Broyden_RootFind.fval(func, q_next, *args)
results = broyden(g_func, q_next0, tol, maxiter)
return results
@staticmethod
def forward(ctx, func, q_next0, *args):
bsz, d_f = args[0].size()
root_find = Broyden_RootFind.broyden_find_root
ctx.args_len = len(args)
q_past, q, u_past, u, u_next = args[:-2]
tol = args[-2]*np.sqrt(bsz*d_f)
maxiter = args[-1]
with torch.no_grad():
guess = q_next0.clone().detach()
args = [q_past, q, u_past, u, u_next]
results = root_find(func, guess, tol, maxiter, *args)
return results
@staticmethod
def backward(ctx, grad_q_next):
grad_args = [None for _ in range(ctx.args_len)]
return (None, grad_q_next, *grad_args)
class BroydenLayer(Function):
"""
Call this line to apply this implicit layer
self.NewtonLayer.apply(self.func_copy, ...)
"""
@staticmethod
def forward(ctx, func_copy, q_next, B, *args):
q_past, q, u_past, u, u_next = args[:-2]
ctx.args_len = len(args)
# ctx.tol = tol
ctx.B = B
# ctx.maxiter = maxiter
ctx.save_for_backward(q_next, q_past, q, u_past, u, u_next)
ctx.func = func_copy
ctx.args_len = len(args) + 2
return q_next
@staticmethod
def backward(ctx, grad):
torch.cuda.empty_cache()
# grad should have dimension (bsz x d_f)
grad = grad.clone()
q_next, q_past, q, u_past, u, u_next = ctx.saved_tensors
bsz, d_f = q_next.size()
func = ctx.func
q_next = q_next.clone().detach()
q_past = q_past.clone().detach()
q = q.clone().detach()
u_past = u_past.clone().detach()
u = u.clone().detach()
u_next = u_next.clone().detach()
args = [q_past, q, u_past, u, u_next]
# with torch.enable_grad():
# y = Broyden_RootFind.fval(func, q_next, *args)
# def g(x):
# y.backward(x, retain_graph=True) # Retain for future calls to g
# JTx = q_next.grad.clone().detach()
# q_next.grad.zero_()
# return JTx + grad
# maxiter = ctx.maxiter
# tol = ctx.tol*np.sqrt(bsz * d_f)
#Initial Guess
# dl_df_est = torch.zeros_like(grad)
# result_info = broyden(g, dl_df_est, tol, maxiter)
# dl_df_est = result_info['result']
# y.backward(torch.zeros_like(dl_df_est), retain_graph=False)
grad_args = [None for _ in range(ctx.args_len)]
dl_df_est = - grad.view(bsz, 1, d_f) @ ctx.B
return (None, dl_df_est.view(bsz, d_f), *grad_args)
|
"""
Hackerrank Hourglass Solution
"""
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the hourglassSum function below.
def hourglassSum(arr):
largest = -float("inf");pos=-1
for i in range(1, len(arr) -1) :
for j in range(1, len(arr[i])-1) :
sum = arr[i][j] + arr[i-1][j] + arr[i-1][j-1] + arr[i-1][j+1] + arr[i+1][j]+arr[i+1][j-1]+arr[i+1][j+1]
if sum > largest :
largest = sum
pos = (i, j)
return(largest)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr = []
for _ in range(6):
arr.append(list(map(int, input().rstrip().split())))
result = hourglassSum(arr)
fptr.write(str(result) + '\n')
fptr.close()
|
import unittest
from parameterized import parameterized as p
from solns.naryTreePreorderTraversal.naryTreePreorderTraversal import *
class UnitTest_NaryTreePreorderTraversal(unittest.TestCase):
@p.expand([
[[1,3,5,6,2,4]]
])
def test_naive(self,expected):
n5 = Node(5)
n6 = Node(6)
n3 = Node(3,[n5,n6])
n2 = Node(2)
n4 = Node(4)
n1 = Node(1,[n3,n2,n4])
self.assertEqual(Solution.naive(n1),expected)
|
import uuid
from datetime import date, datetime
from sqlalchemy import BigInteger, Boolean, Column, Date, DateTime, Enum, ForeignKey, Integer, String, Text, func
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
from app.database.dbo.baseclass import Base
from app.domain.models.Arkivkopi import ArkivkopiStatus
from app.domain.models.Arkivuttrekk import ArkivuttrekkStatus, ArkivuttrekkType
from app.domain.models.Invitasjon import InvitasjonStatus
from app.domain.models.Metadatafil import MetadataType
from app.domain.models.Overforingspakke import OverforingspakkeStatus
from app.domain.models.Depotinstitusjoner import DepotinstitusjonerEnum
class Metadatafil(Base):
"""The metadata file which contains the METS file which is used as a basis for the
archive. If we move away from METS we should change the ENUM field to support other file types."""
id: int = Column(Integer(), autoincrement=True, nullable=False, primary_key=True, unique=True)
type: MetadataType = Column(Enum('xml/mets', name='metadata_type_type', create_type=True), nullable=False, unique=False)
innhold: str = Column(Text(), nullable=False)
filnavn: str = Column(String(), nullable=False)
opprettet: datetime = Column(DateTime(), server_default=func.now(), nullable=False)
# Backrefs. These create virtual columns on the other side of the relation.
arkivuttrekk = relationship('Arkivuttrekk', backref='metadatafil')
class Arkivuttrekk(Base):
"""This is the class that represents an archive that is being processed in mottak."""
id: int = Column(Integer(), autoincrement=True, nullable=False, primary_key=True, unique=True)
obj_id: uuid.UUID = Column(UUID(as_uuid=True), nullable=False, index=True, unique=True)
status: ArkivuttrekkStatus = Column(
Enum(
'Opprettet',
'Under behandling',
'Avvist',
'Sendt til bevaring',
'Lastet ned',
name='arkivuttrekk_status_type',
create_type=True,
),
nullable=False,
index=True,
)
type: ArkivuttrekkType = Column(
Enum('Noark3', 'Noark5', 'Fagsystem', 'SIARD', name='arkivvuttrekk_type_type', create_type=True), nullable=False
)
tittel: str = Column(String(), nullable=False)
sjekksum_sha256: str = Column(String(length=64), nullable=False)
avgiver_navn: str = Column(String(), nullable=False)
avgiver_epost: str = Column(String(), nullable=False)
koordinator_epost: str = Column(String(), nullable=False)
metadatafil_id: int = Column(Integer(), ForeignKey('metadatafil.id'), nullable=False, unique=True)
arkiv_startdato: date = Column(Date, nullable=False)
arkiv_sluttdato: date = Column(Date, nullable=False)
storrelse: int = Column(BigInteger(), nullable=False)
avtalenummer: str = Column(String(), nullable=False)
opprettet: datetime = Column(DateTime(), server_default=func.now(), nullable=False)
endret: datetime = Column(DateTime(), server_default=func.now(), onupdate=func.current_timestamp(), nullable=False)
# depotinstitusjon is text field in DB
depotinstitusjon: DepotinstitusjonerEnum = Column(Enum(DepotinstitusjonerEnum), nullable=False)
# Backrefs. These create virtual columns on the other side of the relation.
invitasjoner = relationship('Invitasjon', backref='arkivuttrekk')
class ArkivuttrekkLokasjon(Base):
id: int = Column(Integer(), autoincrement=True, nullable=False, primary_key=True, unique=True)
overforingspakke_id: int = Column(Integer(), ForeignKey('overforingspakke.id'), nullable=False)
bucket: str = Column(String(), nullable=False)
opprettet: datetime = Column(DateTime(), server_default=func.now(), nullable=False)
endret: datetime = Column(DateTime(), server_default=func.now(), onupdate=func.current_timestamp(), nullable=False)
class Invitasjon(Base):
"""An invitation. When we send an invitation to upload we create such an object and connect it to an archive.
Not sure if we should create more than one if we send out several invitations.
Perhaps it should contain a reference to the actual invitation being sent.
"""
id: int = Column(Integer(), autoincrement=True, nullable=False, primary_key=True, unique=True)
ekstern_id: uuid.UUID = Column(UUID(as_uuid=True), nullable=False, index=True, unique=True)
arkivuttrekk_id: int = Column(Integer(), ForeignKey('arkivuttrekk.id'), nullable=False, unique=False)
avgiver_epost: str = Column(String(), nullable=False)
status: InvitasjonStatus = Column(Enum('Sendt', 'Feilet', name='invitasjon_status_type', create_type=True), nullable=False)
opprettet: datetime = Column(DateTime(), server_default=func.now(), nullable=False)
# Backrefs. These create virtual columns on the other side of the relation.
overforingspakker = relationship('Overforingspakke', backref='invitasjon')
arkivkopier = relationship('Arkivkopi', backref='invitasjon')
class Overforingspakke(Base):
"""When we accept an upload we create a 'overforingspakke' object that points to the object which
contains the tar file."""
id: int = Column(Integer(), autoincrement=True, nullable=False, primary_key=True, unique=True)
invitasjon_id: int = Column(Integer(), ForeignKey('invitasjon.id'), nullable=False, unique=True)
tusd_id: str = Column(String(length=60), nullable=False, unique=True, index=True)
tusd_objekt_navn: str = Column(String(), nullable=False)
storrelse: int = Column(BigInteger(), nullable=False)
status: OverforingspakkeStatus = Column(
Enum('Startet', 'OK', 'Avbrutt', 'Feilet', name='overforingspakke_status_type', create_type=True), nullable=False
)
opprettet: datetime = Column(DateTime(), server_default=func.now(), nullable=False)
endret: datetime = Column(DateTime(), server_default=func.now(), onupdate=func.current_timestamp(), nullable=False)
# Backrefs. These create virtual columns on the other side of the relation.
workflow_overforingspakker = relationship('WorkflowOverforingspakke', backref='overforingspakke')
class WorkflowOverforingspakke(Base):
"""Information about a started (Argo) workflow for an Overføringspakke."""
id: int = Column(Integer(), autoincrement=True, nullable=False, primary_key=True, unique=True)
overforingspakke_id: int = Column(Integer(), ForeignKey('overforingspakke.id'), nullable=False)
workflow_navn: str = Column(String(), nullable=False)
workflow_uid: uuid.UUID = Column(UUID(as_uuid=True), nullable=False, unique=True)
opprettet: datetime = Column(DateTime(), server_default=func.now(), nullable=False)
class Arkivkopi(Base):
"""A request to copy an archive to on-prem storage."""
id: int = Column(Integer(), autoincrement=True, nullable=False, primary_key=True, unique=True)
invitasjon_id: int = Column(Integer(), ForeignKey('invitasjon.id'), nullable=False, unique=False)
status: ArkivkopiStatus = Column(
Enum('Bestilt', 'Startet', 'OK', 'Feilet', name='arkivkopi_status_type', create_type=True), nullable=False
)
is_object: bool = Column(Boolean(), nullable=False)
target_name: str = Column(String(), nullable=False)
storage_account: str = Column(String(), nullable=False)
container: str = Column(String(), nullable=False)
sas_token_start: datetime = Column(DateTime(timezone=True), nullable=False)
sas_token_slutt: datetime = Column(DateTime(timezone=True), nullable=False)
opprettet: datetime = Column(DateTime(), server_default=func.now(), nullable=False)
endret: datetime = Column(DateTime(), server_default=func.now(), onupdate=func.current_timestamp(), nullable=False)
|
import numpy as np
import sys
import os
import bpy
def ShowMessageBox(message = "", title = "Message Box", icon = 'INFO'):
def draw(self, context):
self.layout.label(text=message)
bpy.context.window_manager.popup_menu(draw, title = title, icon = icon)
def is_module_available(module_name):
if sys.version_info < (3, 0):
# python 2
import importlib
torch_loader = importlib.find_loader(module_name)
elif sys.version_info <= (3, 3):
# python 3.0 to 3.3
import pkgutil
torch_loader = pkgutil.find_loader(module_name)
elif sys.version_info >= (3, 4):
# python 3.4 and above
import importlib
torch_loader = importlib.util.find_spec(module_name)
return torch_loader is not None
def ensure_site_packages(packages):
""" `packages`: list of tuples (<import name>, <pip name>) """
if not packages:
return
import site
import importlib
import importlib.util
user_site_packages = site.getusersitepackages()
os.makedirs(user_site_packages, exist_ok = True)
sys.path.append(user_site_packages)
modules_to_install = [module[1] for module in packages if not importlib.util.find_spec(module[0])]
if modules_to_install:
import subprocess
if bpy.app.version < (2,91,0):
python_binary = bpy.app.binary_path_python
else:
python_binary = sys.executable
subprocess.run([python_binary, '-m', 'ensurepip'], check=True)
subprocess.run([python_binary, '-m', 'pip', 'install', *modules_to_install, "--user"], check=True)
def read_verts(mesh):
vn = len(mesh.vertices)
v = np.zeros((vn*3), dtype=np.float)
mesh.vertices.foreach_get("co", v)
return np.reshape(v, (vn, 3))
def read_faces(mesh):
fn = len(mesh.loop_triangles)
f = np.zeros((fn*3), dtype=np.int)
mesh.loop_triangles.foreach_get("vertices", f)
return np.reshape(f, (fn, 3))
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import unittest
from azure_devtools.scenario_tests import AllowLargeResponse
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
class ApimgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_apimgmt')
def test_apimgmt(self, resource_group):
self.kwargs.update({
'name': 'test1'
})
# create_or_update -- create
self.cmd('aro create --resource-group "rg1" --name "clustername1" --location "location1" --open-shift-version "v3.11"', checks=[
])
self.cmd('aro create --resource-group "rg1" --name "clustername1"', checks=[
])
# create_or_update -- update
self.cmd('aro update --resource-group "rg1" --name "clustername1" --location "location1" --open-shift-version "v3.11"', checks=[
])
self.cmd('aro update --resource-group "rg1" --name "clustername1"', checks=[
])
# delete -- delete
self.cmd('aro delete --resource-group "rg1" --name "clustername1"', checks=[
])
self.cmd('aro delete --resource-group "rg1" --name "clustername1"', checks=[
])
# list_by_resource_group -- list
self.cmd('aro list --resource-group "rg1"', checks=[
])
self.cmd('aro list --resource-group "rg1"', checks=[
])
# list -- list
self.cmd('aro list --resource-group "rg1"', checks=[
])
self.cmd('aro list --resource-group "rg1"', checks=[
])
# get -- show
self.cmd('aro show --resource-group "rg1" --name "clustername1"', checks=[
])
self.cmd('aro show --resource-group "rg1" --name "clustername1"', checks=[
])
|
#!/usr/bin/env python3
# Copyright IAIK TU Graz.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from argparse import ArgumentParser, FileType
from helpers import *
from CircuitGraph import CircuitGraph
from time import perf_counter, process_time
from Z3Checker import Z3Checker
from IndepChecker import IndepChecker
from sys import setrecursionlimit
from multiprocessing import Pool
from logger import logger
setrecursionlimit(10000)
def check_file(file_name, file_ending, file_type):
if not file_name.endswith(file_ending):
print('ERR: specified ' + str(file_type) + ' ' + str(file_name) + ' does not have ' + str(file_ending) + ' ending')
exit()
return
def verify_circuit(circuit_file, labeling, order, mode='transient', log='tmp/report.txt'):
secrets = ', '.join(
[var for k in labeling for var in labeling[k] if 's_' in var])
labels = labeling
time_start_abs = perf_counter()
time_start_rel = process_time()
circuit = CircuitGraph(labels, json_file=circuit_file)
checker = Z3Checker(circuit.get_graph(), labels, order, mode)
logger.info('Checking secrets: {}...'.format(secrets))
check_res, gates = checker.check()
time_end_abs = perf_counter()
time_end_rel = process_time()
rel_time = time_end_rel - time_start_rel
m_rel, s_rel = divmod(rel_time, 60)
h_rel, m_rel = divmod(m_rel, 60)
logger.info('... secrets {} are checked in {}h{}m{}s'.format(
secrets, int(h_rel), int(m_rel), round(s_rel, 2)))
logger.info('Result ({}): {}, {}'.format(secrets, check_res, gates))
return (check_res, gates)
if __name__ == '__main__':
parser = ArgumentParser(prog='Rebecca',
description=' A tool for checking if a given netlist is side-channel analysis resistant',
epilog='Questions and suggestions can be sent to the email <email>')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.9.2')
parser.add_argument('-p', '--parse-verilog', nargs=2,
metavar=('<netlist>', '<top module>'),
help='parse verilog file and generate labeling template')
parser.add_argument('-o', '--optimized', action='store_true',
help='run verification in parallel')
parser.add_argument('-c', '--check', nargs=4, metavar=('<netlist>', '<order>', '<labeling>', '<mode>'),
help='check if a parsed netlist <netlist> is <order>-order secure with the <labeling> as initial labeling; mode = s (stable) | t (transient)')
parser.add_argument('-i', '--independence-check', nargs=3, metavar=('<netlist>', '<order>', '<labeling>'),
help='check if a parsed netlist <netlist> is <order>-order independent with the <labeling> as initial labeling')
args = vars(parser.parse_args())
if args['parse_verilog']:
netlist = args['parse_verilog'][0]
check_file(netlist, '.v', 'netlist')
parse_verilog(netlist, args['parse_verilog'][1])
if args['independence_check']:
labeling = args['independence_check'][2]
check_file(labeling, '.txt', 'labeling')
shares = get_shares(labeling)
labels = generate_labeling(labeling)[0]
netlist = args['independence_check'][0]
check_file(netlist, '.json', 'parsed netlist')
circuit = CircuitGraph(labels, json_file=netlist)
if is_int(args['independence_check'][1]):
order = int(args['independence_check'][1])
else:
print('ERR: order should be int')
exit()
outputs = circuit.get_outputs()
checker = IndepChecker(circuit.get_graph(), labels, order, shares, outputs)
print(checker.check())
if args['check']:
labels = []
netlist = args['check'][0]
check_file(netlist, '.json', 'parsed netlist')
labeling = args['check'][2]
check_file(labeling, '.txt', 'labeling')
if args['optimized']:
labels = generate_optimized_labeling(labeling)
else:
labels = generate_labeling(labeling)
if is_int(args['check'][1]):
order = int(args['check'][1])
else:
print('ERR: order should be int')
exit()
if args['check'][3] == 't':
mode = 'transient'
elif args['check'][3] == 's':
mode = 'stable'
else:
print('ERR: mode should be either s or t')
exit()
logger.info('Verifying {} for {} order in {} mode'.format(
args['check'][0], order, mode))
for l in labels:
logger.info('Initial labeling:\n{}'.format(get_pretty_labeling(
l, labeling)))
pool_len = len(labels) if len(labels) <= 10 else 10
with Pool(pool_len) as p:
res = p.starmap(verify_circuit,
[(netlist, l, order, mode) for l in labels])
for r in res:
if not r[0]:
print(r)
exit()
print((True, []))
|
# Value Function Iteration with IID Income
# Greg Kaplan 2017
# Translated by Tom Sweeney Dec 2020
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
# PARAMETERS
## preferences
risk_aver = 2
beta = 0.95
## returns
r = 0.03
R = 1+r
## income
y = 1
## asset grids
na = 1000
amax = 20
borrow_lim = 0
agrid_par = 1 # 1 for linear, 0 for L-shaped
## computation
max_iter = 1000
tol_iter = 1.0e-6
Nsim = 100
Tsim = 500
# OPTIONS
Display = 1
DoSimulate = 1
MakePlots = 1
# SET UP GRIDS
## assets
agrid = np.linspace(0,1,na)
agrid = agrid**(1/agrid_par)
agrid = borrow_lim + (amax-borrow_lim)*agrid
# DRAW RANDOM NUMBERS
np.random.seed(2020)
arand = np.random.rand(Nsim)
# UTILITY FUNCTION
if risk_aver==1:
u = lambda c: np.log(c)
else:
u = lambda c: (c**(1-risk_aver)-1)/(1-risk_aver)
u1 = lambda c: c**(-risk_aver)
# INITIALIZE VALUE FUNCTION
Vguess = u(r*agrid+y)/(1-beta)
# ITERATE ON VALUE FUNCTION
V = Vguess.copy()
Vdiff = 1
Iter = 0
while Iter<=max_iter and Vdiff>tol_iter:
Iter = Iter + 1
Vlast = V.copy()
V = np.zeros(na)
sav = np.zeros(na)
savind = np.zeros(na, dtype=int)
con = np.zeros(na)
## loop over assets
for ia in range(0,na):
cash = R*agrid[ia] + y
Vchoice = u(np.maximum(cash-agrid,1.0e-10)) + beta*Vlast
V[ia] = np.max(Vchoice)
savind[ia] = np.argmax(Vchoice)
sav[ia] = agrid[savind[ia]]
con[ia] = cash - sav[ia]
Vdiff = np.max(abs(V-Vlast))
if Display >= 1:
print('Iteration no. ' + str(Iter), ' max val fn diff is ' + str(Vdiff))
# SIMULATE
if DoSimulate==1:
yindsim = np.zeros((Nsim,Tsim), dtype=int)
aindsim = np.zeros((Nsim,Tsim), dtype=int)
## initial assets: uniform on [borrow_lim, amax]
ainitial = borrow_lim + arand*(amax-borrow_lim)
## allocate to nearest point on agrid
aindsim[:,0] = interp1d(agrid,range(1,na+1),'nearest')(ainitial)
## loop over time periods
for it in range(0,Tsim):
if Display >= 1 and (it+1)%100 == 0:
print(' Simulating, time period ' + str(it+1))
## asset choice
if it < Tsim-1:
aindsim[:,it+1] = savind[aindsim[:,it]]
## assign actual asset and income values
asim = agrid[aindsim]
csim = R*asim[:,0:Tsim-1] + y - asim[:,1:Tsim]
# MAKE PLOTS
if MakePlots==1:
## consumption policy function
plt.plot(agrid,con,'b-',linewidth=1)
plt.grid()
plt.xlim((0,amax))
plt.title('Consumption Policy Function')
plt.show()
## savings policy function
plt.plot(agrid,sav-agrid,'b-',linewidth=1)
plt.plot(agrid,np.zeros(na),'k',linewidth=0.5)
plt.grid()
plt.xlim((0,amax))
plt.title('Savings Policy Function (a\'-a)')
plt.show()
## nice zoom
xlimits = (0,1)
xlimind = np.ones(na, dtype=bool)
if np.min(agrid) < xlimits[0]:
xlimind = np.logical_and(xlimind,(agrid>=np.max(agrid[agrid<xlimits[0]])))
elif np.min(agrid) > xlimits[1]:
xlimind = 0
if np.max(agrid) > xlimits[1]:
xlimind = np.logical_and(xlimind,(agrid<=np.min(agrid[agrid>xlimits[1]])))
elif np.max(agrid) < xlimits[0]:
xlimind = 0
## consumption policy function: zoomed in
plt.plot(agrid[xlimind],con[xlimind],'b-o',linewidth=2)
plt.grid()
plt.xlim(xlimits)
plt.title('Consumption: Zoomed')
plt.show()
## savings policy function: zoomed in
plt.plot(agrid[xlimind],sav[xlimind]-agrid[xlimind],'b-o',linewidth=2)
plt.plot(agrid,np.zeros((na,1)),'k',linewidth =0.5)
plt.grid()
plt.xlim(xlimits)
plt.title('Savings: Zoomed (a\'-a)')
plt.show()
## asset dynamics distribution
plt.plot(range(0,Tsim),asim.T)
plt.title('Asset Dynamics')
plt.grid()
plt.show()
## consumption dynamics distribution
plt.plot(range(0,Tsim-1),csim.T)
plt.title('Consumption Dynamics')
plt.grid()
plt.show()
|
from colorama import Fore
from rtxlib import info, error
from rtxlib.execution import experimentFunction
def start_sequential_strategy(wf):
""" executes all experiments from the definition file """
info("> ExecStrategy | Sequential", Fore.CYAN)
wf.totalExperiments = len(wf.execution_strategy["knobs"])
for kn in wf.execution_strategy["knobs"]:
experimentFunction(wf, {
"knobs":kn,
"ignore_first_n_results": wf.execution_strategy["ignore_first_n_results"],
"sample_size": wf.execution_strategy["sample_size"],
})
|
#
# PySNMP MIB module Unisphere-Data-ADDRESS-POOL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Unisphere-Data-ADDRESS-POOL-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:23:06 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Integer32, ModuleIdentity, IpAddress, Counter32, Bits, NotificationType, Counter64, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Unsigned32, iso, MibIdentifier, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "ModuleIdentity", "IpAddress", "Counter32", "Bits", "NotificationType", "Counter64", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Unsigned32", "iso", "MibIdentifier", "TimeTicks")
RowStatus, DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention", "TruthValue")
usDataMibs, = mibBuilder.importSymbols("Unisphere-Data-MIBs", "usDataMibs")
usdRouterName, = mibBuilder.importSymbols("Unisphere-Data-ROUTER-MIB", "usdRouterName")
usdAddressPoolMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21))
usdAddressPoolMIB.setRevisions(('2002-05-06 18:38', '2001-05-02 11:57', '2001-04-27 15:00', '1999-06-01 00:00',))
if mibBuilder.loadTexts: usdAddressPoolMIB.setLastUpdated('200205061838Z')
if mibBuilder.loadTexts: usdAddressPoolMIB.setOrganization('Unisphere Networks, Inc.')
usdAddressPoolObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1))
usdAddressPool = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1))
usdAddressPoolTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 1), )
if mibBuilder.loadTexts: usdAddressPoolTable.setStatus('current')
usdAddressPoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 1, 1), ).setIndexNames((0, "Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolIndex"))
if mibBuilder.loadTexts: usdAddressPoolEntry.setStatus('current')
usdAddressPoolIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: usdAddressPoolIndex.setStatus('current')
usdAddressPoolRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdAddressPoolRowStatus.setStatus('current')
usdAddressPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdAddressPoolName.setStatus('current')
usdAddressPoolStart = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 1, 1, 4), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdAddressPoolStart.setStatus('deprecated')
usdAddressPoolEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 1, 1, 5), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdAddressPoolEnd.setStatus('deprecated')
usdAddressPoolSize = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdAddressPoolSize.setStatus('deprecated')
usdAddressPoolInUse = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdAddressPoolInUse.setStatus('deprecated')
usdAddressPoolHighUtilThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100)).clone(85)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdAddressPoolHighUtilThreshold.setStatus('current')
usdAddressPoolAbatedUtilThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100)).clone(75)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdAddressPoolAbatedUtilThreshold.setStatus('current')
usdAddressPoolUtilPct = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdAddressPoolUtilPct.setStatus('current')
usdAddressPoolTrapEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 1, 1, 11), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdAddressPoolTrapEnable.setStatus('current')
usdAddressPoolProfileTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 3), )
if mibBuilder.loadTexts: usdAddressPoolProfileTable.setStatus('current')
usdAddressPoolProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 3, 1), ).setIndexNames((0, "Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolIndex"), (0, "Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolProfileIndex"))
if mibBuilder.loadTexts: usdAddressPoolProfileEntry.setStatus('current')
usdAddressPoolProfileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: usdAddressPoolProfileIndex.setStatus('current')
usdAddressPoolProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 3, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdAddressPoolProfileRowStatus.setStatus('current')
usdAddressPoolProfileStart = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 3, 1, 3), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdAddressPoolProfileStart.setStatus('current')
usdAddressPoolProfileEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 3, 1, 4), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdAddressPoolProfileEnd.setStatus('current')
usdAddressPoolProfileSize = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 3, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdAddressPoolProfileSize.setStatus('current')
usdAddressPoolProfileInUse = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 3, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdAddressPoolProfileInUse.setStatus('current')
usdAddressPoolNextPoolIndex = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdAddressPoolNextPoolIndex.setStatus('current')
usdAddressPoolTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 3))
usdAddressPoolTrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 3, 0))
usdAddressPoolHighAddrUtil = NotificationType((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 3, 0, 1)).setObjects(("Unisphere-Data-ROUTER-MIB", "usdRouterName"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolName"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolSize"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolInUse"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolUtilPct"))
if mibBuilder.loadTexts: usdAddressPoolHighAddrUtil.setStatus('current')
usdAddressPoolAbatedAddrUtil = NotificationType((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 3, 0, 2)).setObjects(("Unisphere-Data-ROUTER-MIB", "usdRouterName"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolName"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolSize"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolInUse"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolUtilPct"))
if mibBuilder.loadTexts: usdAddressPoolAbatedAddrUtil.setStatus('current')
usdAddressPoolNoAddresses = NotificationType((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 3, 0, 3)).setObjects(("Unisphere-Data-ROUTER-MIB", "usdRouterName"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolName"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolSize"))
if mibBuilder.loadTexts: usdAddressPoolNoAddresses.setStatus('current')
usdAddressPoolMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 4))
usdAddressPoolMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 4, 1))
usdAddressPoolMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 4, 2))
usdAddressPoolCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 4, 1, 1)).setObjects(("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdAddressPoolCompliance = usdAddressPoolCompliance.setStatus('obsolete')
usdAddressPoolCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 4, 1, 2)).setObjects(("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolGroup2"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolTrapGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdAddressPoolCompliance2 = usdAddressPoolCompliance2.setStatus('obsolete')
usdAddressPoolCompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 4, 1, 3)).setObjects(("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolGroup3"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolTrapGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdAddressPoolCompliance3 = usdAddressPoolCompliance3.setStatus('current')
usdAddressPoolGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 4, 2, 1)).setObjects(("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolRowStatus"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolName"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolStart"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolEnd"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolSize"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolInUse"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolNextPoolIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdAddressPoolGroup = usdAddressPoolGroup.setStatus('obsolete')
usdAddressPoolGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 4, 2, 2)).setObjects(("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolRowStatus"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolName"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolStart"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolEnd"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolSize"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolInUse"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolNextPoolIndex"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolHighUtilThreshold"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolAbatedUtilThreshold"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolUtilPct"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolTrapEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdAddressPoolGroup2 = usdAddressPoolGroup2.setStatus('deprecated')
usdAddressPoolTrapGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 4, 2, 3)).setObjects(("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolHighAddrUtil"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolAbatedAddrUtil"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolNoAddresses"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdAddressPoolTrapGroup = usdAddressPoolTrapGroup.setStatus('current')
usdAddressPoolGroup3 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 21, 4, 2, 4)).setObjects(("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolRowStatus"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolName"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolNextPoolIndex"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolHighUtilThreshold"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolAbatedUtilThreshold"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolUtilPct"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolTrapEnable"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolProfileRowStatus"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolProfileStart"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolProfileEnd"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolProfileSize"), ("Unisphere-Data-ADDRESS-POOL-MIB", "usdAddressPoolProfileInUse"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdAddressPoolGroup3 = usdAddressPoolGroup3.setStatus('current')
mibBuilder.exportSymbols("Unisphere-Data-ADDRESS-POOL-MIB", PYSNMP_MODULE_ID=usdAddressPoolMIB, usdAddressPoolRowStatus=usdAddressPoolRowStatus, usdAddressPoolCompliance=usdAddressPoolCompliance, usdAddressPoolGroup3=usdAddressPoolGroup3, usdAddressPoolProfileStart=usdAddressPoolProfileStart, usdAddressPoolObjects=usdAddressPoolObjects, usdAddressPoolEnd=usdAddressPoolEnd, usdAddressPoolTrapEnable=usdAddressPoolTrapEnable, usdAddressPoolProfileTable=usdAddressPoolProfileTable, usdAddressPool=usdAddressPool, usdAddressPoolEntry=usdAddressPoolEntry, usdAddressPoolMIB=usdAddressPoolMIB, usdAddressPoolGroup=usdAddressPoolGroup, usdAddressPoolUtilPct=usdAddressPoolUtilPct, usdAddressPoolStart=usdAddressPoolStart, usdAddressPoolName=usdAddressPoolName, usdAddressPoolProfileEnd=usdAddressPoolProfileEnd, usdAddressPoolProfileRowStatus=usdAddressPoolProfileRowStatus, usdAddressPoolMIBGroups=usdAddressPoolMIBGroups, usdAddressPoolTable=usdAddressPoolTable, usdAddressPoolMIBCompliances=usdAddressPoolMIBCompliances, usdAddressPoolAbatedAddrUtil=usdAddressPoolAbatedAddrUtil, usdAddressPoolSize=usdAddressPoolSize, usdAddressPoolTrapGroup=usdAddressPoolTrapGroup, usdAddressPoolHighAddrUtil=usdAddressPoolHighAddrUtil, usdAddressPoolProfileSize=usdAddressPoolProfileSize, usdAddressPoolNextPoolIndex=usdAddressPoolNextPoolIndex, usdAddressPoolProfileInUse=usdAddressPoolProfileInUse, usdAddressPoolIndex=usdAddressPoolIndex, usdAddressPoolMIBConformance=usdAddressPoolMIBConformance, usdAddressPoolGroup2=usdAddressPoolGroup2, usdAddressPoolAbatedUtilThreshold=usdAddressPoolAbatedUtilThreshold, usdAddressPoolHighUtilThreshold=usdAddressPoolHighUtilThreshold, usdAddressPoolTrapPrefix=usdAddressPoolTrapPrefix, usdAddressPoolCompliance3=usdAddressPoolCompliance3, usdAddressPoolInUse=usdAddressPoolInUse, usdAddressPoolCompliance2=usdAddressPoolCompliance2, usdAddressPoolNoAddresses=usdAddressPoolNoAddresses, usdAddressPoolTraps=usdAddressPoolTraps, usdAddressPoolProfileEntry=usdAddressPoolProfileEntry, usdAddressPoolProfileIndex=usdAddressPoolProfileIndex)
|
try:
from setuptools import find_packages, setup
except ImportError:
from distutils.core import find_packages, setup
import sys
from castle.version import VERSION
install_requires = ['requests>=2.5']
test_require = ['responses']
if sys.version_info[:2] == (3, 4):
test_require = ['responses<0.10.16']
setup(
name="castle",
version=VERSION,
author="Castle Intelligence, Inc.",
author_email="info@castle.io",
license="MIT License",
description="Castle protects your users from account compromise",
long_description=open("README.rst").read(),
url="https://github.com/castle/castle-python",
packages=find_packages(),
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
install_requires=install_requires,
tests_require=test_require,
test_suite='castle.test.all'
)
|
class Issue(object):
"""
Abstract class for issues.
"""
code = ''
description = ''
def __init__(self, lineno, col, parameters=None):
self.parameters = {} if parameters is None else parameters
self.col = col
self.lineno = lineno
@property
def message(self):
"""
Return issue message.
"""
message = self.description.format(**self.parameters)
return '{code} {message}'.format(code=self.code, message=message)
|
#!/usr/bin/env python3
# Usage:
# PYTHONPATH=src ./train --dataset <file|directory|glob>
import argparse
import json
import os
import numpy as np
import tensorflow as tf
import time
import tqdm
from tensorflow.core.protobuf import rewriter_config_pb2
import model, sample, encoder
from load_dataset import load_dataset, Sampler
from accumulate import AccumulatingOptimizer
import memory_saving_gradients
CHECKPOINT_DIR = 'checkpoint'
SAMPLE_DIR = 'samples'
def maketree(path):
try:
os.makedirs(path)
except:
pass
def randomize(context, hparams, p):
if p > 0:
mask = tf.random.uniform(shape=tf.shape(context)) < p
noise = tf.random.uniform(shape=tf.shape(context), minval=0, maxval=hparams.n_vocab, dtype=tf.int32)
return tf.where(mask, noise, context)
else:
return context
def generate_samples(data_sampler, generate_from, args, sess, tf_sample, context, enc, counter):
print('Generating samples...')
context_tokens = data_sampler.sample(1)
if generate_from is not None:
context_tokens = enc.encode(generate_from)
all_text = []
index = 0
while index < args.sample_num:
out = sess.run(
tf_sample,
feed_dict={context: args.batch_size * [context_tokens]})
for i in range(min(args.sample_num - index, args.batch_size)):
text = enc.decode(out[i])
text = '======== SAMPLE {} ========\n{}\n'.format(
index + 1, text)
#28 is the SAMPLE.. header
text = text[28+len(generate_from):]
all_text.append(text)
index += 1
print(text)
maketree(os.path.join(SAMPLE_DIR, args.run_name))
with open(
os.path.join(SAMPLE_DIR, args.run_name,
'samples-{}').format(counter), 'w') as fp:
fp.write('\n'.join(all_text))
return all_text[0]
class Object(object):
pass
print("initializing train2")
args = Object()
args.model_name = "117M"
args.restore_from = "../talkingdonkeys/layers/gpt2-models/lyrics"
args.optimizer = "adam"
args.batch_size = 1
args.noise = 0.0
args.sample_length = 200
args.top_k = 40
args.top_p = 0.0
args.only_train_transformer_layers = False
args.learning_rate = 0.00002
args.accumulate_gradients = 1
args.memory_saving_gradients = False
args.dataset = "texts.npz"
args.combine = 50000
args.run_name = "run1"
args.sample_num = 1
enc = encoder.get_encoder(args.model_name)
hparams = model.default_hparams()
generate_from = "hello world"
with open(os.path.join('models', args.model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if args.model_name == '345M':
args.memory_saving_gradients = True
if args.optimizer == 'adam':
args.only_train_transformer_layers = True
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.graph_options.rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.OFF
sess = tf.Session(config=config)
def bootstrap(sess, args, hparams, enc, generate_from):
context = tf.placeholder(tf.int32, [args.batch_size, None])
context_in = randomize(context, hparams, args.noise)
output = model.model(hparams=hparams, X=context_in)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=context[:, 1:], logits=output['logits'][:, :-1]))
tf_sample = sample.sample_sequence(
hparams=hparams,
length=args.sample_length,
context=context,
batch_size=args.batch_size,
temperature=1.0,
top_k=args.top_k,
top_p=args.top_p)
all_vars = [v for v in tf.trainable_variables() if 'model' in v.name]
train_vars = [v for v in all_vars if '/h' in v.name] if args.only_train_transformer_layers else all_vars
if args.optimizer == 'adam':
opt = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
elif args.optimizer == 'sgd':
opt = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
else:
exit('Bad optimizer:', args.optimizer)
if args.accumulate_gradients > 1:
if args.memory_saving_gradients:
exit("Memory saving gradients are not implemented for gradient accumulation yet.")
opt = AccumulatingOptimizer(
opt=opt,
var_list=train_vars)
opt_reset = opt.reset()
opt_compute = opt.compute_gradients(loss)
opt_apply = opt.apply_gradients()
summary_loss = tf.summary.scalar('loss', opt_apply)
else:
if args.memory_saving_gradients:
opt_grads = memory_saving_gradients.gradients(loss, train_vars)
else:
opt_grads = tf.gradients(loss, train_vars)
opt_grads = list(zip(opt_grads, train_vars))
opt_apply = opt.apply_gradients(opt_grads)
summary_loss = tf.summary.scalar('loss', loss)
summary_lr = tf.summary.scalar('learning_rate', args.learning_rate)
summaries = tf.summary.merge([summary_lr, summary_loss])
saver = tf.train.Saver(
var_list=all_vars,
max_to_keep=5,
keep_checkpoint_every_n_hours=2)
sess.run(tf.global_variables_initializer())
ckpt = tf.train.latest_checkpoint(args.restore_from)
print('Loading checkpoint', ckpt)
saver.restore(sess, ckpt)
print('Loading dataset...')
chunks = load_dataset(enc, args.dataset, args.combine)
data_sampler = Sampler(chunks)
print('dataset has', data_sampler.total_size, 'tokens')
print('Training...')
counter = 1
counter_path = os.path.join(CHECKPOINT_DIR, args.run_name, 'counter')
if os.path.exists(counter_path):
# Load the step number if we're resuming a run
# Add 1 so we don't immediately try to save again
with open(counter_path, 'r') as fp:
counter = int(fp.read()) + 1
def generate_samples():
print('Generating samples...')
context_tokens = data_sampler.sample(1)
if generate_from is not None:
context_tokens = enc.encode(generate_from)
all_text = []
index = 0
while index < args.sample_num:
out = sess.run(
tf_sample,
feed_dict={context: args.batch_size * [context_tokens]})
for i in range(min(args.sample_num - index, args.batch_size)):
text = enc.decode(out[i])
text = '======== SAMPLE {} ========\n{}\n'.format(
index + 1, text)
all_text.append(text)
index += 1
print(text)
maketree(os.path.join(SAMPLE_DIR, args.run_name))
with open(
os.path.join(SAMPLE_DIR, args.run_name,
'samples-{}').format(counter), 'w') as fp:
fp.write('\n'.join(all_text))
def sample_batch():
return [data_sampler.sample(1024) for _ in range(args.batch_size)]
while True:
if counter > 1:
generate_samples()
return data_sampler, generate_from, args, sess, tf_sample, context, enc, counter
if args.accumulate_gradients > 1:
sess.run(opt_reset)
for _ in range(args.accumulate_gradients):
sess.run(
opt_compute, feed_dict={context: sample_batch()})
(v_loss, v_summary) = sess.run((opt_apply, summaries))
else:
(_, v_loss, v_summary) = sess.run(
(opt_apply, loss, summaries),
feed_dict={context: sample_batch()})
counter += 1
data_sampler, generate_from, args, sess, tf_sample, context, enc, counter = bootstrap(sess, args, hparams, enc, generate_from)
def generate_sample(generate_from=generate_from):
return generate_samples(data_sampler, generate_from, args, sess, tf_sample, context, enc, counter)
if __name__ == '__main__':
print()
print("sample1 from main:")
generate_sample()
print()
print("sample2 from main:")
generate_sample()
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `bigquery_row_generator` module."""
import json
import unittest
from typing import Any, Dict # pylint: disable=unused-import
from apache_beam.io.gcp.internal.clients import bigquery
import mock
from gcp_variant_transforms.beam_io import vcfio
from gcp_variant_transforms.libs import bigquery_sanitizer
from gcp_variant_transforms.libs import bigquery_schema_descriptor
from gcp_variant_transforms.libs import bigquery_row_generator
from gcp_variant_transforms.libs import processed_variant
from gcp_variant_transforms.libs import vcf_field_conflict_resolver
from gcp_variant_transforms.libs.bigquery_util import ColumnKeyConstants
from gcp_variant_transforms.libs.bigquery_util import TableFieldConstants
from gcp_variant_transforms.testing import vcf_header_util
from gcp_variant_transforms.testing.testdata_util import hash_name
def _get_processed_variant(variant, header_num_dict=None):
header_fields = vcf_header_util.make_header(header_num_dict or {})
return processed_variant.ProcessedVariantFactory(
header_fields).create_processed_variant(variant)
def _get_table_schema(move_hom_ref_calls=False):
# type (None) -> bigquery.TableSchema
schema = bigquery.TableSchema()
schema.fields.append(bigquery.TableFieldSchema(
name='IB',
type=TableFieldConstants.TYPE_BOOLEAN,
mode=TableFieldConstants.MODE_NULLABLE,
description='INFO foo desc'))
schema.fields.append(bigquery.TableFieldSchema(
name='IBR',
type=TableFieldConstants.TYPE_BOOLEAN,
mode=TableFieldConstants.MODE_REPEATED,
description='INFO foo desc'))
schema.fields.append(bigquery.TableFieldSchema(
name='II',
type=TableFieldConstants.TYPE_INTEGER,
mode=TableFieldConstants.MODE_NULLABLE,
description='INFO foo desc'))
schema.fields.append(bigquery.TableFieldSchema(
name='II2',
type=TableFieldConstants.TYPE_INTEGER,
mode=TableFieldConstants.MODE_NULLABLE,
description='INFO foo desc'))
schema.fields.append(bigquery.TableFieldSchema(
name='IIR',
type=TableFieldConstants.TYPE_INTEGER,
mode=TableFieldConstants.MODE_REPEATED,
description='INFO foo desc'))
schema.fields.append(bigquery.TableFieldSchema(
name='IF',
type=TableFieldConstants.TYPE_FLOAT,
mode=TableFieldConstants.MODE_NULLABLE,
description='INFO foo desc'))
schema.fields.append(bigquery.TableFieldSchema(
name='IF2',
type=TableFieldConstants.TYPE_FLOAT,
mode=TableFieldConstants.MODE_NULLABLE,
description='INFO foo desc'))
schema.fields.append(bigquery.TableFieldSchema(
name='IFR',
type=TableFieldConstants.TYPE_FLOAT,
mode=TableFieldConstants.MODE_REPEATED,
description='INFO foo desc'))
schema.fields.append(bigquery.TableFieldSchema(
name='IFR2',
type=TableFieldConstants.TYPE_FLOAT,
mode=TableFieldConstants.MODE_REPEATED,
description='INFO foo desc'))
schema.fields.append(bigquery.TableFieldSchema(
name='field__IS',
type=TableFieldConstants.TYPE_STRING,
mode=TableFieldConstants.MODE_NULLABLE,
description='INFO foo desc'))
schema.fields.append(bigquery.TableFieldSchema(
name='IS',
type=TableFieldConstants.TYPE_STRING,
mode=TableFieldConstants.MODE_NULLABLE,
description='INFO foo desc'))
schema.fields.append(bigquery.TableFieldSchema(
name='ISR',
type=TableFieldConstants.TYPE_STRING,
mode=TableFieldConstants.MODE_REPEATED,
description='INFO foo desc'))
if move_hom_ref_calls:
hom_ref_call_record = bigquery.TableFieldSchema(
name=ColumnKeyConstants.HOM_REF_CALLS,
type=TableFieldConstants.TYPE_RECORD,
mode=TableFieldConstants.MODE_REPEATED,
description='One record for each call.')
hom_ref_call_record.fields.append(bigquery.TableFieldSchema(
name=ColumnKeyConstants.CALLS_SAMPLE_ID,
type=TableFieldConstants.TYPE_INTEGER,
mode=TableFieldConstants.MODE_NULLABLE,
description='Unique ID (type INT64) assigned to each sample. Table '
'with `__sample_info` suffix contains the mapping of '
'sample names (as read from VCF header) to these assigned '
'IDs.'))
hom_ref_call_record.fields.append(bigquery.TableFieldSchema(
name=ColumnKeyConstants.CALLS_NAME,
type=TableFieldConstants.TYPE_STRING,
mode=TableFieldConstants.MODE_NULLABLE,
description='Name of the call (sample names in the VCF Header '
'line).'))
schema.fields.append(hom_ref_call_record)
# Call record.
call_record = bigquery.TableFieldSchema(
name=ColumnKeyConstants.CALLS,
type=TableFieldConstants.TYPE_RECORD,
mode=TableFieldConstants.MODE_REPEATED,
description='One record for each call.')
call_record.fields.append(bigquery.TableFieldSchema(
name='FB',
type=TableFieldConstants.TYPE_BOOLEAN,
mode=TableFieldConstants.MODE_NULLABLE,
description='FORMAT foo desc'))
call_record.fields.append(bigquery.TableFieldSchema(
name='FI',
type=TableFieldConstants.TYPE_INTEGER,
mode=TableFieldConstants.MODE_NULLABLE,
description='FORMAT foo desc'))
call_record.fields.append(bigquery.TableFieldSchema(
name='GQ',
type=TableFieldConstants.TYPE_INTEGER,
mode=TableFieldConstants.MODE_NULLABLE,
description='FORMAT foo desc'))
call_record.fields.append(bigquery.TableFieldSchema(
name='FIR',
type=TableFieldConstants.TYPE_INTEGER,
mode=TableFieldConstants.MODE_REPEATED,
description='FORMAT foo desc'))
call_record.fields.append(bigquery.TableFieldSchema(
name='FSR',
type=TableFieldConstants.TYPE_STRING,
mode=TableFieldConstants.MODE_REPEATED,
description='FORMAT foo desc'))
schema.fields.append(call_record)
return schema
def _get_big_query_row():
# type: (...) -> Dict[unicode, Any]
"""Returns one sample BigQuery row for testing."""
row = {str(ColumnKeyConstants.REFERENCE_NAME): str('chr19'),
str(ColumnKeyConstants.START_POSITION): 11,
str(ColumnKeyConstants.END_POSITION): 12,
str(ColumnKeyConstants.REFERENCE_BASES): 'C',
str(ColumnKeyConstants.NAMES): [str('rs1'), str('rs2')],
str(ColumnKeyConstants.QUALITY): 2,
str(ColumnKeyConstants.FILTER): [str('PASS')],
str(ColumnKeyConstants.CALLS): [
{str(ColumnKeyConstants.CALLS_SAMPLE_ID): (
str(hash_name('Sample1'))),
str(ColumnKeyConstants.CALLS_GENOTYPE): [0, 1],
str(ColumnKeyConstants.CALLS_PHASESET): str('*'),
str('GQ'): 20, str('FIR'): [10, 20]},
{str(ColumnKeyConstants.CALLS_SAMPLE_ID): (
str(hash_name('Sample2'))),
str(ColumnKeyConstants.CALLS_GENOTYPE): [0, 0],
str(ColumnKeyConstants.CALLS_PHASESET): None,
str('GQ'): 10, str('FB'): True}
],
str(ColumnKeyConstants.ALTERNATE_BASES): [
{str(ColumnKeyConstants.ALTERNATE_BASES_ALT): str('A'),
str('IFR'): 1,
str('IFR2'): 0.2},
{str(ColumnKeyConstants.ALTERNATE_BASES_ALT): str('TT'),
str('IFR'): 0.2,
str('IFR2'): 0.3}
],
str('IS'): str('some data'),
str('ISR'): [str('data1'), str('data2')]}
return row
class VariantCallRowGeneratorTest(unittest.TestCase):
"""Test cases for class `VariantCallRowGenerator`."""
def setUp(self):
self._schema_descriptor = bigquery_schema_descriptor.SchemaDescriptor(
_get_table_schema())
self._conflict_resolver = (
vcf_field_conflict_resolver.FieldConflictResolver())
self._row_generator = bigquery_row_generator.VariantCallRowGenerator(
self._schema_descriptor, self._conflict_resolver)
def test_all_fields(self):
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='C',
alternate_bases=['A', 'TT'], names=['rs1', 'rs2'], quality=2,
filters=['PASS'],
info={'IFR': [0.1, 0.2],
'IFR2': [0.2, 0.3],
'IS': 'some data',
'ISR': ['data1', 'data2']},
calls=[
vcfio.VariantCall(
sample_id=hash_name('Sample1'), name='Sample1', genotype=[0, 1],
phaseset='*', info={'GQ': 20, 'FIR': [10, 20]}),
vcfio.VariantCall(
sample_id=hash_name('Sample2'), name='Sample2', genotype=[0, 0],
info={'GQ': 10, 'FB': True}),
vcfio.VariantCall(sample_id=hash_name('Sample3'), name='Sample3',
genotype=[vcfio.MISSING_GENOTYPE_VALUE])])
header_num_dict = {'IFR': 'A', 'IFR2': 'A', 'IS': '1', 'ISR': '2'}
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'C',
ColumnKeyConstants.ALTERNATE_BASES: [
{ColumnKeyConstants.ALTERNATE_BASES_ALT: 'A',
'IFR': 0.1, 'IFR2': 0.2},
{ColumnKeyConstants.ALTERNATE_BASES_ALT: 'TT',
'IFR': 0.2, 'IFR2': 0.3}],
ColumnKeyConstants.NAMES: ['rs1', 'rs2'],
ColumnKeyConstants.QUALITY: 2,
ColumnKeyConstants.FILTER: ['PASS'],
ColumnKeyConstants.CALLS: [
{ColumnKeyConstants.CALLS_SAMPLE_ID: hash_name('Sample1'),
ColumnKeyConstants.CALLS_NAME: 'Sample1',
ColumnKeyConstants.CALLS_GENOTYPE: [0, 1],
ColumnKeyConstants.CALLS_PHASESET: '*',
'GQ': 20, 'FIR': [10, 20]},
{ColumnKeyConstants.CALLS_SAMPLE_ID: hash_name('Sample2'),
ColumnKeyConstants.CALLS_NAME: 'Sample2',
ColumnKeyConstants.CALLS_GENOTYPE: [0, 0],
ColumnKeyConstants.CALLS_PHASESET: None,
'GQ': 10, 'FB': True},
{ColumnKeyConstants.CALLS_SAMPLE_ID: hash_name('Sample3'),
ColumnKeyConstants.CALLS_NAME: 'Sample3',
ColumnKeyConstants.CALLS_GENOTYPE: [vcfio.MISSING_GENOTYPE_VALUE],
ColumnKeyConstants.CALLS_PHASESET: None}],
'IS': 'some data',
'ISR': ['data1', 'data2']}
proc_variant = _get_processed_variant(variant, header_num_dict)
row_generator = bigquery_row_generator.VariantCallRowGenerator(
self._schema_descriptor, self._conflict_resolver,
include_call_name=True)
self.assertEqual(
[expected_row], list(row_generator.get_rows(proc_variant)))
def test_all_fields_with_hom_ref(self):
schema_descriptor = bigquery_schema_descriptor.SchemaDescriptor(
_get_table_schema(move_hom_ref_calls=True))
conflict_resolver = (
vcf_field_conflict_resolver.FieldConflictResolver())
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='C',
alternate_bases=['A', 'TT'], names=['rs1', 'rs2'], quality=2,
filters=['PASS'],
info={'IFR': [0.1, 0.2],
'IFR2': [0.2, 0.3],
'IS': 'some data',
'ISR': ['data1', 'data2']},
hom_ref_calls=[
('Sample2', hash_name('Sample2')),
('Sample3', hash_name('Sample3'))
],
calls=[
vcfio.VariantCall(
sample_id=hash_name('Sample1'), name='Sample1', genotype=[0, 1],
phaseset='*', info={'GQ': 20, 'FIR': [10, 20]})])
header_num_dict = {'IFR': 'A', 'IFR2': 'A', 'IS': '1', 'ISR': '2'}
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'C',
ColumnKeyConstants.ALTERNATE_BASES: [
{ColumnKeyConstants.ALTERNATE_BASES_ALT: 'A',
'IFR': 0.1, 'IFR2': 0.2},
{ColumnKeyConstants.ALTERNATE_BASES_ALT: 'TT',
'IFR': 0.2, 'IFR2': 0.3}],
ColumnKeyConstants.NAMES: ['rs1', 'rs2'],
ColumnKeyConstants.QUALITY: 2,
ColumnKeyConstants.FILTER: ['PASS'],
ColumnKeyConstants.HOM_REF_CALLS: [
{ColumnKeyConstants.CALLS_SAMPLE_ID: hash_name('Sample2'),
ColumnKeyConstants.CALLS_NAME: 'Sample2'},
{ColumnKeyConstants.CALLS_SAMPLE_ID: hash_name('Sample3'),
ColumnKeyConstants.CALLS_NAME: 'Sample3'}
],
ColumnKeyConstants.CALLS: [
{ColumnKeyConstants.CALLS_SAMPLE_ID: hash_name('Sample1'),
ColumnKeyConstants.CALLS_NAME: 'Sample1',
ColumnKeyConstants.CALLS_GENOTYPE: [0, 1],
ColumnKeyConstants.CALLS_PHASESET: '*',
'GQ': 20, 'FIR': [10, 20]}],
'IS': 'some data',
'ISR': ['data1', 'data2']}
proc_variant = _get_processed_variant(variant, header_num_dict)
row_generator = bigquery_row_generator.VariantCallRowGenerator(
schema_descriptor, conflict_resolver, include_call_name=True,
move_hom_ref_calls=True)
self.assertEqual(
[expected_row], list(row_generator.get_rows(proc_variant)))
def test_no_alternate_bases(self):
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='CT',
alternate_bases=[], filters=['q10'],
info={'IS': 'some data',
'ISR': ['data1', 'data2']})
header_num_dict = {'IS': '1', 'ISR': '2'}
proc_variant = _get_processed_variant(variant, header_num_dict)
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'CT',
ColumnKeyConstants.ALTERNATE_BASES: [],
ColumnKeyConstants.FILTER: ['q10'],
ColumnKeyConstants.CALLS: [],
'IS': 'some data',
'ISR': ['data1', 'data2']}
self.assertEqual([expected_row],
list(self._row_generator.get_rows(proc_variant)))
def test_some_fields_set(self):
variant = vcfio.Variant(
reference_name='chr19', start=None, end=123, reference_bases=None,
alternate_bases=[], quality=20)
proc_variant = _get_processed_variant(variant)
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: None,
ColumnKeyConstants.END_POSITION: 123,
ColumnKeyConstants.REFERENCE_BASES: None,
ColumnKeyConstants.ALTERNATE_BASES: [],
ColumnKeyConstants.QUALITY: 20,
ColumnKeyConstants.CALLS: []}
self.assertEqual([expected_row],
list(self._row_generator.get_rows(proc_variant)))
def test_no_field_set(self):
variant = vcfio.Variant()
proc_variant = _get_processed_variant(variant)
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: None,
ColumnKeyConstants.START_POSITION: None,
ColumnKeyConstants.END_POSITION: None,
ColumnKeyConstants.REFERENCE_BASES: None,
ColumnKeyConstants.ALTERNATE_BASES: [],
ColumnKeyConstants.CALLS: []}
self.assertEqual([expected_row],
list(self._row_generator.get_rows(proc_variant)))
def test_null_repeated_fields(self):
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='CT',
alternate_bases=[], filters=['q10'],
info={'IIR': [0, 1, None],
'IBR': [True, None, False],
'IFR': [0.1, 0.2, None, 0.4],
'ISR': [None, 'data1', 'data2']})
header_num_dict = {'IIR': '3', 'IBR': '3', 'IFR': '4', 'ISR': '3'}
proc_variant = _get_processed_variant(variant, header_num_dict)
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'CT',
ColumnKeyConstants.ALTERNATE_BASES: [],
ColumnKeyConstants.FILTER: ['q10'],
ColumnKeyConstants.CALLS: [],
'IIR': [0,
1,
bigquery_sanitizer._DEFAULT_NULL_NUMERIC_VALUE_REPLACEMENT],
'IBR': [True, False, False],
'IFR': [0.1,
0.2,
bigquery_sanitizer._DEFAULT_NULL_NUMERIC_VALUE_REPLACEMENT,
0.4],
'ISR': ['.', 'data1', 'data2']}
self.assertEqual([expected_row],
list(self._row_generator.get_rows(proc_variant)))
def test_unicode_fields(self):
sample_unicode_str = u'\xc3\xb6'
sample_utf8_str = sample_unicode_str.encode('utf-8')
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='CT',
alternate_bases=[], filters=[sample_unicode_str, sample_utf8_str],
info={'IS': sample_utf8_str,
'ISR': [sample_unicode_str, sample_utf8_str]})
header_num_dict = {'IS': '1', 'ISR': '2'}
proc_variant = _get_processed_variant(variant, header_num_dict)
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'CT',
ColumnKeyConstants.ALTERNATE_BASES: [],
ColumnKeyConstants.FILTER: [sample_unicode_str, sample_unicode_str],
ColumnKeyConstants.CALLS: [],
'IS': sample_unicode_str,
'ISR': [sample_unicode_str, sample_unicode_str]}
self.assertEqual([expected_row],
list(self._row_generator.get_rows(proc_variant)))
def test_nonstandard_float_values(self):
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='CT',
alternate_bases=['A', 'C', 'T', 'TC'], filters=[],
calls=[vcfio.VariantCall(
sample_id=hash_name('Sample1'), genotype=[0, 1],
phaseset='*', info={'GQ': float('inf')})],
info={'IF': float('inf'),
'IFR': [float('-inf'), float('nan'), 1.2],
'IF2': float('nan'),
'IF3': [float('-inf'), float('nan'), float('inf'), 1.2]},
)
header_num_dict = {'IF': '1', 'IFR': '3', 'IF2': '1', 'IF3': 'A'}
proc_variant = _get_processed_variant(variant, header_num_dict)
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'CT',
ColumnKeyConstants.ALTERNATE_BASES: [
{'IF3': -bigquery_sanitizer._INF_FLOAT_VALUE, 'alt': 'A'},
{'IF3': None, 'alt': 'C'},
{'IF3': bigquery_sanitizer._INF_FLOAT_VALUE, 'alt': 'T'},
{'IF3': 1.2, 'alt': 'TC'}
],
ColumnKeyConstants.CALLS: [
{
ColumnKeyConstants.CALLS_SAMPLE_ID: hash_name('Sample1'),
ColumnKeyConstants.CALLS_GENOTYPE: [0, 1],
ColumnKeyConstants.CALLS_PHASESET: '*',
'GQ': bigquery_sanitizer._INF_FLOAT_VALUE
}
],
'IF': bigquery_sanitizer._INF_FLOAT_VALUE,
'IFR': [-bigquery_sanitizer._INF_FLOAT_VALUE,
bigquery_sanitizer._DEFAULT_NULL_NUMERIC_VALUE_REPLACEMENT,
1.2],
'IF2': None
}
self.assertEqual([expected_row],
list(self._row_generator.get_rows(proc_variant)))
def test_nonstandard_fields_names(self):
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='CT',
alternate_bases=[],
info={'IS': 'data1',
'_IS': 'data2'})
header_num_dict = {'IS': '1', '_IS': '2'}
proc_variant = _get_processed_variant(variant, header_num_dict)
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'CT',
ColumnKeyConstants.ALTERNATE_BASES: [],
ColumnKeyConstants.CALLS: [],
'IS': 'data1',
'field__IS': 'data2'}
self.assertEqual([expected_row],
list(self._row_generator.get_rows(proc_variant)))
def test_sharded_rows(self):
"""Tests splitting BigQuery rows that are larger than BigQuery limit."""
num_calls = 10 # Number of calls for a variant in this test.
num_first_row_calls = 6 # BigQuery row limit is adjusted accordingly.
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='C',
alternate_bases=['A', 'TT'], names=['rs1', 'rs2'], quality=2,
filters=['PASS'],
info={'IFR': [0.1, 0.2],
'IFR2': [0.2, 0.3],
'IS': 'some data'},
calls=[
vcfio.VariantCall(
sample_id=hash_name('Sample{}'.format(i)), genotype=[0, 1],
phaseset='*', info={'GQ': 20, 'FIR': [10, 20]})
for i in range(num_calls)])
header_num_dict = {'IFR': 'A', 'IFR2': 'A', 'IS': '1'}
proc_variant = _get_processed_variant(variant, header_num_dict)
expected_rows = [
{
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'C',
ColumnKeyConstants.ALTERNATE_BASES: [
{ColumnKeyConstants.ALTERNATE_BASES_ALT: 'A',
'IFR': 0.1, 'IFR2': 0.2},
{ColumnKeyConstants.ALTERNATE_BASES_ALT: 'TT',
'IFR': 0.2, 'IFR2': 0.3}],
ColumnKeyConstants.NAMES: ['rs1', 'rs2'],
ColumnKeyConstants.QUALITY: 2,
ColumnKeyConstants.FILTER: ['PASS'],
ColumnKeyConstants.CALLS: [
{ColumnKeyConstants.CALLS_SAMPLE_ID: (
hash_name('Sample{}'.format(i))),
ColumnKeyConstants.CALLS_GENOTYPE: [0, 1],
ColumnKeyConstants.CALLS_PHASESET: '*',
'GQ': 20, 'FIR': [10, 20]}
for i in range(num_first_row_calls)],
'IS': 'some data'
},
{
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'C',
ColumnKeyConstants.ALTERNATE_BASES: [
{ColumnKeyConstants.ALTERNATE_BASES_ALT: 'A',
'IFR': 0.1, 'IFR2': 0.2},
{ColumnKeyConstants.ALTERNATE_BASES_ALT: 'TT',
'IFR': 0.2, 'IFR2': 0.3}],
ColumnKeyConstants.NAMES: ['rs1', 'rs2'],
ColumnKeyConstants.QUALITY: 2,
ColumnKeyConstants.FILTER: ['PASS'],
ColumnKeyConstants.CALLS: [
{ColumnKeyConstants.CALLS_SAMPLE_ID: (
hash_name('Sample{}'.format(i))),
ColumnKeyConstants.CALLS_GENOTYPE: [0, 1],
ColumnKeyConstants.CALLS_PHASESET: '*',
'GQ': 20, 'FIR': [10, 20]}
for i in range(num_first_row_calls, num_calls)],
'IS': 'some data'
},
]
with mock.patch.object(bigquery_row_generator.VariantCallRowGenerator,
'_MAX_BIGQUERY_ROW_SIZE_BYTES',
num_first_row_calls * len(json.dumps(
expected_rows[0][ColumnKeyConstants.CALLS][0]))):
with mock.patch.object(bigquery_row_generator.VariantCallRowGenerator,
'_MIN_NUM_CALLS_FOR_ROW_SIZE_ESTIMATION',
num_calls):
self.assertEqual(expected_rows,
list(self._row_generator.get_rows(proc_variant)))
def test_omit_empty_sample_calls(self):
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='C',
alternate_bases=[], names=['rs1', 'rs2'], quality=2,
filters=['PASS'],
info={},
calls=[
vcfio.VariantCall(
sample_id=hash_name('Sample1'), info={'GQ': None}),
vcfio.VariantCall(
sample_id=hash_name('Sample2'), genotype=[1, 0],
info={'GQ': 10}),
vcfio.VariantCall(
sample_id=hash_name('Sample3'),
genotype=[vcfio.MISSING_GENOTYPE_VALUE,
vcfio.MISSING_GENOTYPE_VALUE])])
proc_variant = _get_processed_variant(variant)
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'C',
ColumnKeyConstants.ALTERNATE_BASES: [],
ColumnKeyConstants.NAMES: ['rs1', 'rs2'],
ColumnKeyConstants.QUALITY: 2,
ColumnKeyConstants.FILTER: ['PASS'],
ColumnKeyConstants.CALLS: [
{ColumnKeyConstants.CALLS_SAMPLE_ID: hash_name('Sample2'),
ColumnKeyConstants.CALLS_GENOTYPE: [1, 0],
ColumnKeyConstants.CALLS_PHASESET: None,
'GQ': 10}]}
self.assertEqual(
[expected_row],
list(self._row_generator.get_rows(proc_variant,
omit_empty_sample_calls=True)))
def test_schema_conflict_in_info_field_type(self):
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='CT',
alternate_bases=[], filters=[],
info={'IB': 1,
'II': 1.1,
'IFR': [1, 2],
'ISR': [1.0, 2.0]})
header_num_dict = {'IB': '1', 'II': '1', 'IFR': '2', 'ISR': '2'}
proc_variant = _get_processed_variant(variant, header_num_dict)
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'CT',
ColumnKeyConstants.ALTERNATE_BASES: [],
ColumnKeyConstants.CALLS: [],
'IB': True,
'II': 1,
'IFR': [1.0, 2.0],
'ISR': ['1.0', '2.0']}
self.assertEqual([expected_row],
list(self._row_generator.get_rows(
proc_variant, allow_incompatible_records=True)))
with self.assertRaises(ValueError):
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='CT',
alternate_bases=[], filters=[],
# String cannot be casted to integer.
info={'II': '1.1'})
header_num_dict = {'II': '1'}
# self._get_row_list_from_variant(
# variant, header_num_dict, allow_incompatible_records=True)
proc_variant = _get_processed_variant(variant, header_num_dict)
list(self._row_generator.get_rows(proc_variant,
allow_incompatible_records=True))
self.fail('String data for an integer schema must cause an exception')
def test_schema_conflict_in_info_field_number(self):
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='CT',
alternate_bases=[], filters=[],
info={'IB': [1, 2],
'IBR': 1,
'II': [10, 20],
'IFR': 1.1,
'ISR': 'foo'},)
header_num_dict = {'IB': '2', 'IBR': '1', 'II': '2', 'IFR': '1', 'ISR': '1'}
proc_variant = _get_processed_variant(variant, header_num_dict)
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'CT',
ColumnKeyConstants.ALTERNATE_BASES: [],
ColumnKeyConstants.CALLS: [],
'IB': True,
'IBR': [True],
'II': 10,
'IFR': [1.1],
'ISR': ['foo']
}
self.assertEqual(
[expected_row],
list(self._row_generator.get_rows(proc_variant,
allow_incompatible_records=True)))
def test_schema_conflict_in_format_field_type(self):
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='CT',
alternate_bases=[], filters=[],
calls=[
vcfio.VariantCall(
sample_id=hash_name('Sample1'), genotype=[0, 1], phaseset='*',
info={'FB': '', 'FI': 1.0, 'FSR': [1, 2]}),
vcfio.VariantCall(
sample_id=hash_name('Sample2'), genotype=[1, 0],
info={'FB': 1, 'FI': True, 'FSR': [1.0, 2.0]})])
proc_variant = _get_processed_variant(variant)
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'CT',
ColumnKeyConstants.ALTERNATE_BASES: [],
ColumnKeyConstants.CALLS: [
{ColumnKeyConstants.CALLS_SAMPLE_ID: hash_name('Sample1'),
ColumnKeyConstants.CALLS_GENOTYPE: [0, 1],
ColumnKeyConstants.CALLS_PHASESET: '*',
'FB': False, 'FI': 1, 'FSR': ['1', '2']},
{ColumnKeyConstants.CALLS_SAMPLE_ID: hash_name('Sample2'),
ColumnKeyConstants.CALLS_GENOTYPE: [1, 0],
ColumnKeyConstants.CALLS_PHASESET: None,
'FB': True, 'FI': 1, 'FSR': ['1.0', '2.0']}],
}
self.assertEqual(
[expected_row],
list(self._row_generator.get_rows(proc_variant,
allow_incompatible_records=True)))
with self.assertRaises(ValueError):
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='CT',
alternate_bases=[], filters=[],
# String cannot be casted to integer.
calls=[
vcfio.VariantCall(
sample_id=hash_name('Sample1'), genotype=[0, 1], phaseset='*',
info={'FI': 'string_for_int_field'})])
proc_variant = _get_processed_variant(variant)
list(self._row_generator.get_rows(proc_variant,
allow_incompatible_records=True))
self.fail('String data for an integer schema must cause an exception')
def test_schema_conflict_in_format_field_number(self):
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='CT',
alternate_bases=[], filters=[],
calls=[
vcfio.VariantCall(
sample_id=hash_name('Sample1'), genotype=[0, 1], phaseset='*',
info={'FB': [1, 2], 'FI': [1, 2], 'FSR': 'str'}),
vcfio.VariantCall(
sample_id=hash_name('Sample2'), genotype=[1, 0],
info={'FB': [], 'FI': [], 'FSR': ''})])
proc_variant = _get_processed_variant(variant)
expected_row = {
ColumnKeyConstants.REFERENCE_NAME: 'chr19',
ColumnKeyConstants.START_POSITION: 11,
ColumnKeyConstants.END_POSITION: 12,
ColumnKeyConstants.REFERENCE_BASES: 'CT',
ColumnKeyConstants.ALTERNATE_BASES: [],
ColumnKeyConstants.CALLS: [
{ColumnKeyConstants.CALLS_SAMPLE_ID: hash_name('Sample1'),
ColumnKeyConstants.CALLS_GENOTYPE: [0, 1],
ColumnKeyConstants.CALLS_PHASESET: '*',
'FB': True, 'FI': 1, 'FSR': ['str']},
{ColumnKeyConstants.CALLS_SAMPLE_ID: hash_name('Sample2'),
ColumnKeyConstants.CALLS_GENOTYPE: [1, 0],
ColumnKeyConstants.CALLS_PHASESET: None,
'FB': False, 'FI': None, 'FSR': ['']}],
}
self.assertEqual(
[expected_row],
list(self._row_generator.get_rows(proc_variant,
allow_incompatible_records=True)))
|
"""A standard time-integrated analysis is performed, using one year of
IceCube data (IC86_1).
"""
import logging
import unittest
from flarestack.data.public import icecube_ps_3_year
from flarestack.core.unblinding import create_unblinder
from flarestack.analyses.tde.shared_TDE import tde_catalogue_name
from flarestack import MinimisationHandler, analyse
# Initialise Injectors/LLHs
llh_dict = {
"llh_name": "standard",
"llh_sig_time_pdf": {"time_pdf_name": "steady"},
"llh_bkg_time_pdf": {
"time_pdf_name": "steady",
},
"llh_energy_pdf": {"energy_pdf_name": "power_law"},
}
true_parameters = [3.6400763376308523, 0.0, 0.0, 4.0]
catalogue = tde_catalogue_name("jetted")
class TestTimeIntegrated(unittest.TestCase):
def setUp(self):
pass
def test_declination_sensitivity(self):
logging.info("Testing 'fit_weight' MinimisationHandler class")
mh_name = "fit_weights"
# Test three declinations
unblind_dict = {
"name": "tests/test_mh_fit_weights",
"mh_name": mh_name,
"dataset": icecube_ps_3_year.get_seasons("IC86-2011"),
"catalogue": catalogue,
"llh_dict": llh_dict,
}
ub = create_unblinder(unblind_dict)
key = [x for x in ub.res_dict.keys() if x != "TS"][0]
res = ub.res_dict[key]
logging.info("Best fit values {0}".format(list(res["x"])))
logging.info("Reference best fit {0}".format(true_parameters))
for i, x in enumerate(res["x"]):
self.assertAlmostEqual(x, true_parameters[i], delta=0.1)
inj_dict = {
"injection_sig_time_pdf": {"time_pdf_name": "steady"},
"injection_bkg_time_pdf": {
"time_pdf_name": "steady",
},
"injection_energy_pdf": {"energy_pdf_name": "power_law", "gamma": 2.0},
}
mh_dict = dict(unblind_dict)
mh_dict["inj_dict"] = inj_dict
mh_dict["n_trials"] = 1.0
mh_dict["n_steps"] = 3.0
mh_dict["scale"] = 5.0
mh = MinimisationHandler.create(mh_dict)
res = mh.simulate_and_run(5.0)
analyse(mh_dict, cluster=False)
mh.corner_likelihood_scan(
save=True,
res_dict=res,
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# setup.py generated by flit for tools that don't yet use PEP 517
from setuptools import setup, find_packages
setup(
name="nbopen",
version="0.6",
description='Open a notebook from the command line in the best available server',
author='Thomas Kluyver',
author_email='thomas@kluyver.me.uk',
url='https://github.com/takluyver/nbopen',
packages=find_packages(),
install_requires=[],
extras_require={"develop": []},
entry_points={
"console_scripts": [
"nbopen = nbopen:main"
],
"gui_scripts": []
}
)
|
import unittest
from unittest import mock
from itertools import product
from remake.task_query_set import TaskQuerySet
class TestTaskQuerySet(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
tasks = []
statuses = {'a': 'completed', 'b': 'pending', 'c': 'remaining'}
for val1, val2 in product(['a', 'b', 'c'], [1, 2, 3]):
task = mock.MagicMock()
task.val1 = val1
task.val2 = val2
del task.val3
task.path_hash_key.return_value = ''.join([val1, str(val2)] * 20)
task.__str__.return_value = f'Task(val1={val1}, val2={val2})'
task.__class__.__name__ = 'Rule' + val1.upper()
task.status = statuses[val1]
task.task_md.rerun_reasons = [('func_changed', None),
('path_doesnt_exist', 'not/there.txt')]
task.diff.return_value = ['def fn(self)', '+ print("hi")', ' pass']
tasks.append(task)
task_ctrl = mock.MagicMock()
cls.tasks = TaskQuerySet(tasks, task_ctrl)
assert len(cls.tasks) == 9
def test_index_slice(self):
assert self.tasks[0]
self.assertEqual(len(self.tasks[:3]), 3)
self.assertEqual(len(self.tasks[:-1]), 8)
def test_filter(self):
self.assertEqual(len(self.tasks.filter(val1='a')), 3)
self.assertEqual(len(self.tasks.filter(val1='a', val2=3)), 1)
self.assertEqual(len(self.tasks.filter(val1='a', val2='3', cast_to_str=True)), 1)
self.assertEqual(len(self.tasks.exclude(val1='b')), 6)
def test_get(self):
task = self.tasks.get(val1='a', val2=2)
self.assertEqual(task.val1, 'a')
self.assertEqual(task.val2, 2)
self.assertRaises(Exception, self.tasks.get, val1='a')
self.assertRaises(Exception, self.tasks.get, val1='d')
def test_first_last(self):
task = self.tasks.first()
self.assertEqual(task.val1, 'a')
self.assertEqual(task.val2, 1)
task = self.tasks.last()
self.assertEqual(task.val1, 'c')
self.assertEqual(task.val2, 3)
no_tasks = self.tasks[:0]
self.assertRaises(Exception, no_tasks.first)
self.assertRaises(Exception, no_tasks.last)
def test_in_rule(self):
ruleAtasks = self.tasks.in_rule('RuleA')
self.assertEqual(len(ruleAtasks), 3)
rule = mock.MagicMock()
for task in ruleAtasks:
task.__class__ = rule
ruleAtasks2 = self.tasks.in_rule(rule)
self.assertEqual(len(ruleAtasks2), 3)
def test_run(self):
self.tasks.run()
def test_status(self):
self.tasks.status()
self.tasks.status(True)
self.tasks.status(True, True)
|
from experiment_impact_tracker.emissions.rough_emissions_estimator import RoughEmissionsEstimator
def test_rough_emissoins_estimator():
assert "GTX 1080 Ti" in RoughEmissionsEstimator.get_available_gpus()
assert "Dual-core PowerPC MPC8641D" in RoughEmissionsEstimator.get_available_cpus()
emissions_estimator = RoughEmissionsEstimator(gpu="GTX 1080 Ti",
cpu="PowerPC 750CXe",
gpu_utilization_factor=1.0,
cpu_utilization_factor=0.0,
location="Portland, Oregon",
experiment_length_seconds=12*60*60)
assert emissions_estimator.kg_carbon == 0.381018
assert emissions_estimator.cpu_kWh == 0.0
assert emissions_estimator.gpu_kWh == 3
assert "GTX 1080 Ti" in emissions_estimator.carbon_impact_statement
assert "PowerPC 750CXe" in emissions_estimator.carbon_impact_statement
|
"""
made by Gladox114
https://github.com/Gladox114/Python/upload/master/Snake/
"""
import tkinter as tk
import copy
class Map:
def __init__(self, master=None, width=500, height=500, color="black", offset=10, Block=20,FrameColor="pink",FrameColor2="red"):
self.master = master
self.width = width
self.height = height
self.offset = offset
self.color = color
self.FrameColor = FrameColor
self.Block = Block
self.MapArea = width,height
self.FrameColor2 = FrameColor2
#print("init",self.MapArea,MapAreaX,MapAreaY)
self.food=[]
self.MapChunks=(0,0,int(self.MapArea[0]/self.Block-1),int(self.MapArea[1]/self.Block-1))
def BuildMap(self):
self.frame = tk.Frame(self.master,height=self.MapArea[1]+(self.offset*2)-1,width=self.MapArea[0]+(self.offset*2)-1)
self.frame.pack(fill="both",expand="true") # Create a Frame
self.canvas = tk.Canvas(self.frame,bg=self.FrameColor,height=self.MapArea[1]+(self.offset*2)-1,width=self.MapArea[0]+(self.offset*2)-1)
self.canvas.pack(fill="both",expand="true") # Create Canvas inside the Frame
self.f = tk.Canvas(self.canvas,bg=self.color,height=self.MapArea[1]+2,width=self.MapArea[0]+2,highlightthickness=0)
self.f.pack() # Create smaller Canvas in Canvas
self.f.place(x=self.offset,y=self.offset) # and offset it
self.f.create_rectangle(0,0,self.MapArea[1]+1,self.MapArea[0]+1,outline=self.FrameColor2,fill=self.color) # Create a Rectangle
self.c = tk.Canvas(self.canvas,bg=self.color,height=self.MapArea[1],width=self.MapArea[0],highlightthickness=0)
self.c.place(x=11,y=11)
self.frame2 = tk.Frame(self.master,bg="gray",height=15)
self.frame2.pack(fill="both",expand="true",side="bottom")
self.frame3 = tk.Frame(self.frame2,bg="gray",height=15)
self.frame3.place(relx=.5, rely=.5,anchor="center")
"""
self.v = tk.StringVar(value="Default Value")
self.Label = tk.Label(self.frame2,textvariable=self.v)
self.Label.pack(side="bottom")
self.v.set("Score:")
"""
def setScores(self,Players):
self.Label={}
self.v = {}
for i in range(len(Players)):
self.v[Players[i].Name] = tk.StringVar(value="Default Value")
self.Label[i] = tk.Label(self.frame3,bg="gray",textvariable=self.v[Players[i].Name])
self.Label[i].grid(row=1, column=i)
self.Label[i].grid_rowconfigure(0, weight=1)
self.Label[i].grid_rowconfigure(2, weight=1)
self.Label[i].grid_columnconfigure(0, weight=1)
self.Label[i].grid_columnconfigure(2, weight=1)
self.v[Players[i].Name].set(Players[i].Name+":"+str(Players[i].score))
def text(self,Player):
self.v[Player.Name].set(str(Player.Name+":"+str(Player.score)))
def drawCollision(self,pos,color1="red"):
x,y=pos
self.c.create_oval(self.pos(x,y),fill=color1,outline=color1)
def getCanvas(self):
return self.c,self.frame,self.f
def pos(self,x,y): # Converting the Map into Blocks... Not the Opposite way, It's converting Blocky Positions into Pixel Block Position
X=x*self.Block # This
Y=y*self.Block # And this are the left top Corner
X2=X+self.Block # this
Y2=Y+self.Block # and this are the right Bottom Corner
return X,Y,X2,Y2
def checkb(self,listd,a,b,c,d): # a is the body testing if its equal to b the second body and c and d are the direction or Position to the next bubble body
if listd[a][0]+1*c==listd[b][0]: # You can do a negative or positiv number with
#print("Stringasddddddddddddddddddd",listd[a][0]+1*c,listd[b][0])
x = (listd[a][0]*self.Block)+(self.Block/2) # Getting the Posittion from the Middle of the Body to the corner of the Body
y = (listd[a][1]*self.Block)
x2 = x+self.Block/2
y2 = y+self.Block
elif listd[a][0]+1*d==listd[b][0]: # Thats the same but other direction like the all others
#print("Stringasddddddddddddddddddd",listd[a][0]+1*d,listd[b][0])
x = (listd[a][0]*self.Block)+(self.Block/2)
y = (listd[a][1]*self.Block)
x2 = x-self.Block/2
y2 = y+self.Block
elif listd[a][1]+1*c==listd[b][1]: # It's just finding out if the next Body part is on the other site of the Map
#print("Stringasddddddddddddddddddd",listd[a][1]+1*c,listd[b][1])
x = (listd[a][0]*self.Block)
y = (listd[a][1]*self.Block)+(self.Block/2)
x2 = x+self.Block
y2 = y+self.Block/2
elif listd[a][1]+1*d==listd[b][1]: # If the Snake is in y=0 and the other part y=24 by a map of y=0-y=24 then it's True and executing
#print("Stringasddddddddddddddddddd",listd[a][1]+1*d,listd[b][1])
x = (listd[a][0]*self.Block)
y = (listd[a][1]*self.Block)+(self.Block/2)
x2 = x+self.Block,
y2 = y-self.Block/2
else:
#print("Nailed")
#print("Stringasddddddddddddddddddd",listd[a][1],listd[b][1])
#print("Stringasddddddddddddddddddd",listd[a][1],listd[b][1])
return False,2
return True,(x,y,x2,y2)
def drawSnake(self,snake):
#if len(snake.body)!=snake.bodyLen: #or len(snake.body)==snake.bodyLen+1:
#difference=abs(snake.bodyLen-len(snake.body))
#snake.lastBlockChain-=difference
#snake.bodyLen=len(snake.body)
if len(snake.body)==snake.bodyLen+1:
snake.bodyLen+=1
#snake.lastBlockChain-=1
self.c.itemconfig("snake"+str(id(snake)+snake.lastBlockChain),fill="black",outline="black")
else:
self.c.itemconfig("snake"+str(id(snake)+snake.lastBlockChain),fill="black",outline="black")
#print("Removing snake"+str(id(snake)+snake.lastBlockChain))
#self.c.itemconfig("snake"+str(id(snake)+snake.lastBlockChain),fill="black",outline="black")
snake.lastBlockChain+=1
print("lastblockchain",snake.lastBlockChain)
print("bodyLen1:",snake.bodyLen)
print("Body1:",len(snake.body))
#self.snapColor = "white"
if len(snake.body) > 1:
i = len(snake.body)-1
a,coord = self.checkb(snake.body,i,i-1,1,-1) # Check for body behind
#print(coord)
if a==True: # If the above is going through a wall then
self.c.create_rectangle(coord,fill=snake.snapColor,outline=snake.snapColor,tags="snake"+str(id(snake)+i+snake.ChainLen))
#print("Adding: snake"+str(i+snake.ChainLen))
else: # put rectangles through
number = abs(snake.MapChunks[0]-snake.MapChunks[2]) # Getting the distance to the opposite bubble/wall
coord = self.checkb(snake.body,i,i-1,-number,number)[1]
if coord==False or coord==2:
number = abs(snake.MapChunks[1]-snake.MapChunks[3])
coord = self.checkb(snake.body,i,i-1,-number,number)[1]
#print(coord,number,snake.body,snake.MapChunks)
self.c.create_rectangle(coord,fill=snake.snapColor,outline=snake.snapColor,tags="snake"+str(id(snake)+i+snake.ChainLen))
#print("Adding: snake"+str(i+snake.ChainLen))
a,coord = self.checkb(snake.body,i-1,i,1,-1) # Check at boddy before the last one for body in front
if a==True: # If the above is going through a wall then
self.c.create_rectangle(coord,fill=snake.snapColor,outline=snake.snapColor,tags="snake"+str(id(snake)+i+snake.ChainLen))
#print("Adding: snake"+str(i+snake.ChainLen))
else: # put rectangles through
number = abs(snake.MapChunks[0]-snake.MapChunks[2])
coord=self.checkb(snake.body,i-1,i,-number,number)[1]
if coord==False or coord==2:
number = abs(snake.MapChunks[1]-snake.MapChunks[3])
coord=self.checkb(snake.body,i-1,i,-number,number)[1]
self.c.create_rectangle(coord,fill=snake.snapColor,outline=snake.snapColor,tags="snake"+str(id(snake)+i+snake.ChainLen))
#print("Adding: snake"+str(i+snake.ChainLen))
self.c.create_oval(self.pos(snake.body[i][0],snake.body[i][1]),fill=snake.bodyColor,outline=snake.bodyColor,tags="snake"+str(id(snake)+i+snake.ChainLen))
self.c.create_oval(self.pos(snake.body[i-1][0],snake.body[i-1][1]),fill=snake.bodyColor,outline=snake.bodyColor,tags="snake"+str(id(snake)+i+snake.ChainLen))
#print("Adding: snake"+str(i+snake.ChainLen))
snake.ChainLen+=1 # If you Playing to loong then it will get a long number and it could maybe lag. If not then I did nothing wrong. else... i would need to reset the number after a time or length
print("CHainlen"+str(id(snake)+i+snake.ChainLen),str(snake.ChainLen+i))
def removeSnake(self,snake):
for i in range(0,len(snake.body)+snake.ChainLen):
self.c.itemconfig("snake"+str(id(snake)+snake.lastBlockChain),fill="black",outline="black")
#snake.lastBlockChain+=1
def placeFood(self,*food):
if food: self.food = food
if self.food != []:
x,y,x2,y2 = self.pos(self.food[0],self.food[1])
self.c.create_oval(x+self.Block*0.25,y+self.Block*0.25,x2-self.Block*0.25,y2-self.Block*0.25,fill="red",outline="red",tags="food")
else:
print("Error: No Food")
self.food = randomFood()
# ------------------------------
class Snake():
def __init__(self, Map, Name, body, Joystick, borderbool, direction, snapColor="white", bodyColor="lightblue",growLen=4):
super().__init__()
self.Map = Map
self.Name = Name
self.body = body
self.Joystick = Joystick
self.Pop = True
self.borderbool = borderbool
self.lastdirection = direction
self.JoyDirection=0
self.score = 0
self.snapColor=snapColor
self.bodyColor=bodyColor
self.lastBlockChain = 0
self.ChainLen = 0
self.counter = 0
self.growLen = growLen
self.startBody = copy.deepcopy(body)
self.bodyLen = len(self.body)
#print(self.startBody)
self.startDir = copy.copy(direction)
self.lose=False
#print(self.MapArea,self.Block)
#print(self.Map.MapArea[0:2],int(self.Map.MapArea[0]/self.Map.Block-1))
#print(int(self.Map.MapArea[1]/self.Map.Block-1))
self.MapChunks = self.Map.MapChunks
#print(self.MapChunks[0:4])
if self.borderbool==False: # Setting the Border Function
self.border=self.borderOff
elif self.borderbool==True:
self.border=self.borderOn()
#else:
# print("You need to select an option at the variable border")
#self.border = border()
def borderOff(self): # Border Option1 # If the Head is out of Map then change the location of the Head to the other site of the Map
if self.body[-1][0] > self.MapChunks[2]: #x
#x,y=self.body[-1]
self.body[-1][0] = self.MapChunks[0]
elif self.body[-1][0] < self.MapChunks[0]:
x,y=self.body[-1]
self.body[-1][0] = self.MapChunks[2]
if self.body[-1][1] > self.MapChunks[3]: #y
#x,y=self.body[-1]
self.body[-1][1] = self.MapChunks[1]
elif self.body[-1][1] < self.MapChunks[1]:
#x,y=self.body[-1]
self.body[-1][1] = self.MapChunks[3]
def borderOn(self): # Border Option2
if self.body[-1][0] > self.MapChunks[2]: #x
self.lose=True
elif self.body[-1][0] < self.MapChunks[0]:
self.lose=True
if self.body[-1][1] > self.MapChunks[3]: #y
self.lose=True
elif self.body[-1][1] < self.MapChunks[1]:
self.lose=True
def popSnake(self,x): # Using this method so if you get food you can decide if you want to pop(remove) the last body part
if self.Pop==True:
#print(x)
self.body.pop(x)
else:
self.counter+=1
if self.counter > self.growLen-1:
self.Pop = True
self.counter=0
def move(self,direction):
#print("lastDir:",self.lastdirection)
#print("direction:",direction)
if direction == "right" and self.lastdirection != "left": # if it's going to the Left it's not allowed to go Right (Opposite Direction)
x,y=self.body[-1] # x & y from Head
self.body.append([x+1,y]) # Add a body Part in the moving direction
self.popSnake(0) # Remove the last body Part
self.border() # Executing the border
elif direction == "left" and self.lastdirection != "right":
x,y=self.body[-1]
self.body.append([x-1,y])
self.popSnake(0)
self.border()
elif direction == "up" and self.lastdirection != "down":
x,y=self.body[-1]
self.body.append([x,y-1])
self.popSnake(0)
self.border()
elif direction == "down" and self.lastdirection != "up":
x,y=self.body[-1]
self.body.append([x,y+1])
self.popSnake(0)
self.border()
else: # If it wasn't moving then repeat the old Movement
direction = self.lastdirection
self.move(direction)
self.lastdirection = direction
# -----------------------------------------
def getdir(x,*y):
if type(x) == tuple:
x,y = x
if y == 1:
y="down"
elif y == 2:
y="up"
if x == 1:
x="left"
elif x == 2:
x="right"
if type(x) == str and type(y) == str:
out=0
elif type(x) == str:
out=x
elif type(y) == str:
out=y
else: out=0
#print("dir:",out)
return out
import random
def randomFood(Player,someList): # not tested
localMap = []
for x in range(0,Area.MapChunks[2]+1):
for y in range(0,Area.MapChunks[3]+1):
localMap+=[[x,y]]
for i in someList:
try: localMap.remove(i)
except ValueError:
Print("error")
return localMap[random.randint(0,len(localMap)-1)]
def checkFood(Map,Players=None,Player=None): # not tested
if Map.food!=[]:
if Player:
if Player.body[-1]==Map.food:
Player.score+=1
Map.text(Player)
someList=[]
for i in Players:
for z in i.body:
x,y=z
someList+=[[x,y]]
Map.food = randomFood(Player,someList)
Player.Pop=False
print(Map.food)
Area.placeFood()
return True
else: return False
else:
someList=[]
for i in Players:
for z in i.body:
x,y=z
someList+=[[x,y]]
Map.food = randomFood(Player,someList)
Area.placeFood()
def collision(body,head): # If Head in a Body part (Other Player or Itself) then return True
for i in body:
if i==head: return True
return False
###########################################################
# ----------------------- Settings -----------------------#
root = tk.Tk()
# Master, X,Y,Background color,Offset (Thickness of the Frame), Block thickness, Frame Color, Thin Frame Color
Area=Map(root,500,500,"black",10,10,"pink","white") # Creating Object from Class Map
Area.BuildMap() # Building the Map once
# Map, Player Name, Body, which Joystick, enable/disable Border, start direction, snapcolor, bodycolor, growLength (How long the snake gets after eating food)(Don't go over 2 or 5... It's not perfect... It's maybe buggy)
Player1 = Snake(Area,"Jeff",[[6,6],[5,6],[4,6],[3,6]],1,False,"left","white","lightblue",3)
Player2 = Snake(Area,"Felix",[[2,2],[3,2],[4,2],[5,2]],2,False,"left","yellow","purple",3)
# --------------------------------------------------------#
###########################################################
Playerslist=[Player1,Player2]
Players=[Player1,Player2]
import Joystick as Joy
def Input():
global stop
for Player in Players:
#print(Player.Joystick)
i=getdir(Joy.location(Player.Joystick))
if i!=0:
Player.JoyDirection=i # This is the Direction. If it's set then it has or gets a string named "left","right","up","down" and you could change that to a keyboard Input
if stop==False: root.after(1,Input)
def losePlayer(Player,Players,remove=False):
#print(type(Player.body))
if remove:
for i in range(len(Player.body)):
c=Area.getCanvas()[0]
c.itemconfig("snake"+str(id(Player)+Player.lastBlockChain),fill="black",outline="black")
Player.lastBlockChain+=1
Players.remove(Player)
#del Player
def start(remove=False): # Main
global stop
global food
for Player in Players: # Executing it for each Player
Player.move(Player.JoyDirection)
Area.drawSnake(Player)
checkFood(Area,Players,Player)
for i in Players: # For each Player again
if i!=Player: # Not the Player itself
if collision(i.body,Player.body[-1]): # Check if Player Collides with other Player
if remove==False: Area.drawCollision(Player.body[-1])
Player.lose=True
if i.body[-1]==Player.body[-1]:
if remove==False: Area.drawCollision(Player.body[-1])
print(i.Name,Player.Name)
i.lose=True
Player.lose=True
else:
if len(Player.body) > 4: # Check if Player Collides with itself
if collision(i.body[0:-2],Player.body[-1]):
if remove==False: Area.drawCollision(Player.body[-1])
Player.lose=True
for i in Players:
if i.lose:
for z in range(len(Players)):
if Players[z].lose: print(Players[z].Name,Players[z].lose)
i.score-=1
Area.text(i)
print(i.Name,i.score)
for i in Players:
if i.lose: losePlayer(i,Players,remove)
if len(Players)<2:
stop=True
#stop=True
#print(Player.body)
if stop==False: root.after(100,start)
else:
root.after(1000,restartScreen)
def restartScreen():
global stop
global Players
global Playerslist
Players=copy.copy(Playerslist)
for i in Players:
#print(i.startBody)
i.body = copy.deepcopy(i.startBody)
i.lastdirection = i.startDir
i.JoyDirection = i.startDir
i.lose=False
i.bodyLen=len(i.body)
#c=Area.getCanvas()[0]
#frame=Area.getCanvas()[1]
Area.frame.pack_forget()
Area.frame2.pack_forget()
Area.frame3.pack_forget()
#for i in range(len(Area.Label)):
# Area.Label[i].pack_forget()
Area.BuildMap()
Area.setScores(Players)
stop=False
Area.food=[]
checkFood(Area,Players)
start()
Input()
Area.setScores(Players)
Area.food=[5,5]
Area.placeFood()
stop=False
Input()
start()
root.wm_title("Nice Snake Game not")
root.mainloop()
|
from django.contrib import admin
from .models import WeChatUser,PhoneUser,UserCreateBookList,BookInList,StarBookList
admin.site.register(WeChatUser)
admin.site.register(PhoneUser)
admin.site.register(UserCreateBookList)
admin.site.register(BookInList)
admin.site.register(StarBookList)
# Register your models here.
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 .
# Guijin Ding, dingguijin@gmail.com
#
#
from .basehandler import BaseHandler
from ppmessage.core.redis import redis_hash_to_dict
from ppmessage.core.utils.datetimestring import datetime_to_timestamp
from ppmessage.core.utils.datetimestring import string_to_datetime
from ppmessage.core.constant import API_LEVEL
from ppmessage.db.models import OrgGroupUserData
from ppmessage.db.models import DeviceUser
from ppmessage.db.models import DeviceInfo
from ppmessage.db.models import OrgGroup
from ppmessage.api.error import API_ERR
import json
import time
import logging
import datetime
from operator import itemgetter
def _online(_redis, _device_uuid):
_key = DeviceInfo.__tablename__ + ".uuid." + _device_uuid
if not _redis.exists(_key):
return False
if _redis.hget(_key, "device_is_online") == "True":
return True
return False
def _group(_redis, _user_uuid):
_key = OrgGroupUserData.__tablename__ + ".user_uuid." + _user_uuid
_group_uuids = _redis.smembers(_key)
if _group_uuids == None:
return None
_gs = []
for _group_uuid in _group_uuids:
_key = OrgGroup.__tablename__ + ".uuid." + _group_uuid
_group_name = _redis.hget(_key, "group_name")
_gs.append({"uuid": _group_uuid, "group_name": _group_name})
return _gs
def _single(_redis, _user):
_is_mobile_online = False
_is_browser_online = False
_device_uuid = _user.get("mobile_device_uuid")
if _device_uuid != None:
_is_mobile_online = _online(_redis, _device_uuid)
_device_uuid = _user.get("browser_device_uuid")
if _device_uuid != None:
_is_browser_online = _online(_redis, _device_uuid)
_d = {}
_d["is_browser_online"] = _is_browser_online
_d["is_mobile_online"] = _is_mobile_online
_d["group"] = _group(_redis, _user.get("uuid"))
_fields = [
"uuid",
"user_icon",
"user_email",
"user_fullname",
"user_signature",
"updatetime",
]
for _i in _fields:
_d[_i] = _user.get(_i)
if isinstance(_d["updatetime"], str):
_updatetime = string_to_datetime(_d["updatetime"])
_d["updatetime"] = datetime_to_timestamp(_updatetime)
return _d
def single_user(_redis, _user_dict):
return _single(_redis, _user_dict)
class PPGetOrgGroupUserListHandler(BaseHandler):
def _get_users(self, _users):
_redis = self.application.redis
_pi = _redis.pipeline()
_pre = DeviceUser.__tablename__ + ".uuid."
for _user_uuid in _users:
_key = _pre + _user_uuid
_pi.hgetall(_key)
_unsort = _pi.execute()
return _unsort
def _get(self, _app_uuid, _group_uuid):
_redis = self.application.redis
_key = OrgGroup.__tablename__ + ".app_uuid." + _app_uuid
_is = _redis.sismember(_key, _group_uuid)
if _is != True:
self.setErrorCode(API_ERR.NO_ORG_GROUP)
return
_r = self.getReturnData()
_r["list"] = []
_key = OrgGroupUserData.__tablename__ + ".group_uuid." + _group_uuid
_users = _redis.smembers(_key)
if _users == None or len(_users) == 0:
return
_users = self._get_users(_users)
for _user in _users:
_updatetime = string_to_datetime(_user["updatetime"], "extra")
_user["updatetime"] = datetime_to_timestamp(_updatetime)
_sorted = sorted(_users, key=itemgetter("updatetime"), reverse=True)
_shrinked_users = []
for _user in _sorted:
_shrinked_users.append(single_user(_redis, _user))
_r["list"] = _shrinked_users
return
def initialize(self):
self.addPermission(app_uuid=True)
self.addPermission(api_level=API_LEVEL.PPCOM)
self.addPermission(api_level=API_LEVEL.PPKEFU)
self.addPermission(api_level=API_LEVEL.PPCONSOLE)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_KEFU)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_CONSOLE)
return
def _Task(self):
super(PPGetOrgGroupUserListHandler, self)._Task()
_body = json.loads(self.request.body)
if "app_uuid" not in _body or "group_uuid" not in _body:
self.setErrorCode(API_ERR.NO_PARA)
return
self._get(_body.get("app_uuid"), _body.get("group_uuid"))
return
|
class A:
pass
class B(A):
pass
class S(str):
pass
class Animal:
pass
class Person(Animal):
def __init__(self, name: str, age: int):
self.name = name
self.age = age
class PersonAndStr:
def __init__(self, person_a: Person, st: str):
self.person_a = person_a
self.st = st
class DefaultValueClass:
def __init__(self, person: Person, name: str = "Annie", age: int = 55):
self.person = person
self.name = name
self.age = age
|
from django.contrib import admin
from django.urls import path
from base import views
urlpatterns = [
path('', views.algorithmsView, name='pagina_inicial'),
path(r'dataset/', views.datasetView, name='dataset'),
path(r'training/', views.trainingView, name='training'),
#Create
path(r'create_dataset/', views.createDataset, name='create_dataset'),
path(r'create_network/', views.createNetwork, name='create_network'),
path(r'create_dataset_txt/', views.createDatasetTxt, name='create_dataset_txt'),
#Delete
path(r'delete_dataset/', views.deleteDataset, name='delete_dataset'),
#Algorithms
path(r'kececioglu_algorithm/', views.kececiogluAlgorithm, name='kececioglu_algorithm'),
path(r'reinforcement_algorithm/', views.reinforcementAlgorithm, name='reinforcement_algorithm'),
]
|
import numpy as np
from numpy import ndarray
import scipy.linalg as la
#import solution
from utils.gaussparams import MultiVarGaussian
from config import DEBUG
from typing import Sequence
def get_NIS(z_pred_gauss: MultiVarGaussian, z: ndarray):
"""Calculate the normalized innovation squared (NIS), this can be seen as
the normalized measurement prediction error squared.
See (4.66 in the Sensor Fusion book.)
Tip: use the mahalanobis_distance method of z_pred_gauss, (3.2) in the book
Args:
z_pred_gauss (MultiVarGaussian): predigted measurement gaussian
z (ndarray): measurement
Returns:
NIS (float): normalized innovation squared
"""
z_pred, S = z_pred_gauss
v = z - z_pred
NIS = v.T @ np.linalg.inv(S)@ v
return NIS
def get_NEES(x_gauss: MultiVarGaussian, x_gt: ndarray):
"""Calculate the normalized estimation error squared (NEES)
See (4.65 in the Sensor Fusion book).
Tip: use the mahalanobis_distance method of x_gauss, (3.2) in the book
Args:
x_gauss (MultiVarGaussian): state estimate gaussian
x_gt (ndarray): true state
Returns:
NEES (float): normalized estimation error squared
"""
x_hat, P_hat = x_gauss
err = x_hat - x_gt
NEES = err.T @ np.linalg.inv(P_hat) @ err
return NEES
def get_ANIS(z_pred_gauss_data: Sequence[MultiVarGaussian],
z_data: Sequence[ndarray]):
"""Calculate the average normalized innovation squared (ANIS)
Tip: use get_NIS
Args:
z_pred_gauss_data (Sequence[MultiVarGaussian]): Sequence (List) of
predicted measurement gaussians
z_data (Sequence[ndarray]): Sequence (List) of true measurements
Returns:
ANIS (float): average normalized innovation squared
"""
NIS_arr = np.array([])
for i in range(len(z_data)):
NIS = get_NIS(z_pred_gauss_data[i], z_data[i])
np.append(NIS_arr, NIS)
ANIS = np.average(NIS_arr)
return ANIS
def get_ANEES(x_upd_gauss_data: Sequence[MultiVarGaussian],
x_gt_data: Sequence[ndarray]):
"""Calculate the average normalized estimation error squared (ANEES)
Tip: use get_NEES
Args:
x_upd_gauss_data (Sequence[MultiVarGaussian]): Sequence (List) of
state estimate gaussians
x_gt_data (Sequence[ndarray]): Sequence (List) of true states
Returns:
ANEES (float): average normalized estimation error squared
"""
NEES_arr = np.array([])
for i in range(len(x_gt_data)):
NEES = get_NEES(x_upd_gauss_data[i], x_gt_data[i])
np.append(NEES_arr, NEES)
ANEES = np.average(NEES_arr)
return ANEES
# def get_RMSE(x_upd_gauss_data: Sequence[MultiVarGaussian],
# x _gt_data: Sequence[ndarray]):
# #TODO
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .utils import MultiTaskStopOnPlateau
import json
from io import open
from easydict import EasyDict as edict
import torch
import os
from imix.models.builder import VQA_MODELS
from transformers.modeling_bert import BertConfig
from ..base_model import BaseModel
from .task_utils import compute_score_with_logits
from .vilbert import VILBertForVLTasks
@VQA_MODELS.register_module()
class VILBERT(BaseModel):
def __init__(self, **kwargs):
super().__init__()
self.config = config = kwargs['params']
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.root_path = os.path.dirname(__file__)
# task_lr = []
task_ids = []
for i, task_id in enumerate(config.tasks.split('-')):
task = 'TASK' + task_id
cfg = config.TASKS[task]
name = cfg.name
task_ids.append(task)
# task_lr.append(cfg.lr)
# base_lr = min(task_lr)
# loss_scale = {}
# for i, task_id in enumerate(config.tasks.split('-')):
# task = 'TASK' + task_id
# loss_scale[task] = task_lr[i] / base_lr
# task_ave_iter = {}
self.task_stop_controller = {}
for task_id in task_ids:
# task_ave_iter[task_id] = int(config.TASKS[task]['num_epoch'] * num_iter *
# config.train_iter_multiplier /
# config.TASKS[task]['num_epoch']) # config.total_epochs)
self.task_stop_controller[task_id] = MultiTaskStopOnPlateau(
mode='max',
patience=1,
continue_threshold=0.005,
cooldown=1,
threshold=0.001,
)
# task_ave_iter_list = sorted(task_ave_iter.values())
# median_num_iter = task_ave_iter_list[-1]
# num_train_optimization_steps = (
# median_num_iter * \
# config.total_epochs // config.gradient_accumulation_steps
# )
num_labels = max([config.TASKS[k]['num_labels'] for k in task_ids])
bertconfig = BertConfig.from_dict(config)
if bertconfig.visual_target == 0:
bertconfig.v_config.target_size = 1601
else:
bertconfig.v_config.target_size = 2048
if 'roberta' in config.bert_model:
bertconfig.model = 'roberta'
self.model = VILBertForVLTasks.from_pretrained(
config.from_pretrained,
config=bertconfig,
num_labels=num_labels,
)
if config.freeze != -1:
bert_weight_name = json.load(
open(self.root_path + '/config/' + config.bert_model + '_weight_name.json', 'r'))
bert_weight_name_filtered = []
for name in bert_weight_name:
if 'embeddings' in name:
bert_weight_name_filtered.append(name)
elif 'encoder' in name:
layer_num = name.split('.')[2]
if int(layer_num) <= config.freeze:
bert_weight_name_filtered.append(name)
for key, value in dict(self.model.named_parameters()).items():
if key[12:] in bert_weight_name_filtered:
value.requires_grad = False
logger.info('filtered weight')
logger.info(bert_weight_name_filtered)
self.lr_reduce_list = [5, 7]
self.global_step = 0
self.task_iter_train = {name: None for name in task_ids}
self.task_count = {name: 0 for name in task_ids}
self.task_ids = task_ids
def run_one_time(self, task_id, data):
params = self.get_image_and_text_features(task_id, data)
(
vil_prediction,
vil_prediction_gqa,
vil_logit,
vil_binary_prediction,
vil_tri_prediction,
vision_prediction,
vision_logit,
linguisic_prediction,
linguisic_logit,
_,
) = self.model(
params.question,
params.features,
params.spatials,
params.segment_ids,
params.input_mask,
params.image_mask,
params.co_attention_mask,
params.task_tokens,
)
target = params.target
batch_size = params.batch_size
multiple_choice_ids = params.multiple_choice_ids
num_options = params.num_options
cfg_type = self.config.TASKS[task_id]['type']
if cfg_type == 'VL-classifier':
batch_score = compute_score_with_logits(vil_prediction, target).sum()
pred = vil_prediction
elif cfg_type == 'VL-classifier-GQA':
batch_score = compute_score_with_logits(vil_prediction_gqa, target).sum()
pred = vil_prediction_gqa
elif cfg_type == 'VL-logit':
vil_logit = vil_logit.view(batch_size, num_options)
_, preds = torch.max(vil_logit, 1)
batch_score = float((preds == target).sum())
pred = vil_logit
elif cfg_type == 'V-logit':
_, select_idx = torch.max(vision_logit, dim=1)
select_target = target.squeeze(2).gather(1, select_idx.view(-1, 1))
batch_score = float(torch.sum(select_target > 0.5))
pred = vision_logit
elif cfg_type == 'V-logit-mc':
vision_logit = vision_logit[:, 101:]
vision_logit = vision_logit.squeeze(2).gather(1, multiple_choice_ids)
vision_logit = vision_logit.unsqueeze(2)
_, preds = torch.max(vision_logit, dim=1)
_, target_tmp = torch.max(target, dim=1)
batch_score = float((preds == target_tmp).sum())
pred = vision_logit
elif cfg_type == 'VL-binary-classifier':
batch_score = compute_score_with_logits(vil_binary_prediction, target).sum()
pred = vil_binary_prediction
elif cfg_type == 'VL-tri-classifier':
batch_score = compute_score_with_logits(vil_tri_prediction, target).sum()
pred = vil_tri_prediction
return edict({
'scores': pred,
'target': target,
'batch_score': batch_score,
'batch_size': batch_size,
})
def forward_train(self, data, **kwargs):
iterId = kwargs['cur_iter']
epochId = kwargs['cur_epoch']
step = kwargs['inner_iter']
# torch.autograd.set_detect_anomaly(True)
first_task = True
model_output = {}
for task_id in self.task_ids:
is_forward = False
if (not self.task_stop_controller[task_id].in_stop) or (iterId % self.config.train_iter_gap == 0):
is_forward = True
if is_forward:
output_dict = self.run_one_time(task_id, data)
output_dict.batch_score /= output_dict.batch_size
model_output[task_id] = {
'scores': output_dict.scores,
'target': output_dict.target,
'batch_score': output_dict.batch_score,
}
if (step + 1) % self.config.gradient_accumulation_steps == 0:
# if config.fp16:
# lr_this_step = config[learning_rate] * warmup_linear(
# global_step / num_train_optimization_steps,
# config[warmup_proportio]n,
# )
# for param_group in optimizer.param_groups:
# param_group["lr"] = lr_this_step
# if first_task and (
# global_step < warmpu_steps
# or config.lr_scheduler == "warmup_linear"
# ):
# warmup_scheduler.step()
if first_task:
self.global_step += 1
first_task = False
# if "cosine" in config.lr_scheduler and global_step > warmpu_steps:
# lr_scheduler.step()
# if config.lr_scheduler == "automatic":
# lr_scheduler.step(sum(val_scores.values()))
# logger.info("best average score is %3f" % lr_scheduler.best)
# elif config.lr_scheduler == "mannul":
# lr_scheduler.step()
if epochId in self.lr_reduce_list:
for task_id in self.task_ids:
# reset the task_stop_controller once the lr drop
self.task_stop_controller[task_id]._reset()
# now only one task
return model_output[task_id]
def forward_test(self, data, **kwargs):
# test now does not support **kwargs
if isinstance(self.task_ids, list):
task_id = self.task_ids[0]
else:
task_id = self.task_ids
# torch.autograd.set_detect_anomaly(True)
model_output = {}
output_dict = self.run_one_time(task_id, data)
model_output[task_id] = {
'batch_score': output_dict.batch_score,
'batch_size': output_dict.batch_size,
}
# # update the multi-task scheduler.
# self.task_stop_controller[task_id].step(
# tbLogger.getValScore(task_id))
# score = tbLogger.showLossVal(task_id, task_stop_controller)
# now only one task
return model_output[task_id]
def get_image_and_text_features(self, task_id, data):
batch = tuple(t.cuda(device=self.device, non_blocking=True) for t in data)
if task_id == 'TASK4' or task_id == 'TASK17':
(features, spatials, image_mask, question, target, input_mask, segment_ids, multiple_choice_ids,
co_attention_mask, question_id) = (
batch)
else:
(features, spatials, image_mask, question, target, input_mask, segment_ids, co_attention_mask,
question_id) = (
batch)
num_options = None
batch_size = features.size(0)
cfg_process = self.config.TASKS[task_id]['process']
if cfg_process in ['dialog']:
max_num_bbox = features.size(1)
nround = question.size(1)
num_options = question.size(2)
rbatch_size = batch_size * nround
question = question.view(rbatch_size, question.size(2), question.size(3))
target = target.view(-1)
input_mask = input_mask.view(rbatch_size, input_mask.size(2), input_mask.size(3))
segment_ids = segment_ids.view(rbatch_size, segment_ids.size(2), segment_ids.size(3))
co_attention_mask = co_attention_mask.view(
rbatch_size,
co_attention_mask.size(2),
co_attention_mask.size(3),
co_attention_mask.size(4),
)
features = (
features.unsqueeze(1).unsqueeze(1).expand(batch_size, nround, num_options, max_num_bbox,
2048).contiguous().view(-1, max_num_bbox, 2048))
spatials = (
spatials.unsqueeze(1).unsqueeze(1).expand(batch_size, nround, num_options, max_num_bbox,
5).contiguous().view(-1, max_num_bbox, 5))
image_mask = (
image_mask.unsqueeze(1).expand(batch_size, nround, num_options,
max_num_bbox).contiguous().view(-1, max_num_bbox))
question = question.view(-1, question.size(2))
input_mask = input_mask.view(-1, input_mask.size(2))
segment_ids = segment_ids.view(-1, segment_ids.size(2))
co_attention_mask = co_attention_mask.view(-1, co_attention_mask.size(2), co_attention_mask.size(3))
batch_size = rbatch_size
elif cfg_process in ['expand']:
max_num_bbox = features.size(1)
num_options = question.size(1)
features = (
features.unsqueeze(1).expand(batch_size, num_options, max_num_bbox,
2048).contiguous().view(-1, max_num_bbox, 2048))
spatials = (
spatials.unsqueeze(1).expand(batch_size, num_options, max_num_bbox,
5).contiguous().view(-1, max_num_bbox, 5))
image_mask = (
image_mask.unsqueeze(1).expand(batch_size, num_options,
max_num_bbox).contiguous().view(-1, max_num_bbox))
question = question.view(-1, question.size(2))
input_mask = input_mask.view(-1, input_mask.size(2))
segment_ids = segment_ids.view(-1, segment_ids.size(2))
co_attention_mask = co_attention_mask.view(-1, co_attention_mask.size(2), co_attention_mask.size(3))
elif cfg_process in ['retrieval']:
max_num_bbox = features.size(1)
num_options = question.size(1)
features = features.view(-1, features.size(2), features.size(3))
spatials = spatials.view(-1, spatials.size(2), spatials.size(3))
image_mask = image_mask.view(-1, image_mask.size(2))
question = question.view(-1, question.size(2))
input_mask = input_mask.view(-1, input_mask.size(2))
segment_ids = segment_ids.view(-1, segment_ids.size(2))
co_attention_mask = co_attention_mask.view(-1, co_attention_mask.size(2), co_attention_mask.size(3))
elif cfg_process in ['nlvr']:
batch_size = features.size(0)
max_num_bbox = features.size(1)
num_options = question.size(1)
features = features.view(batch_size * 2, int(features.size(1) / 2), features.size(2))
spatials = spatials.view(batch_size * 2, int(spatials.size(1) / 2), spatials.size(2))
image_mask = image_mask.view(batch_size * 2, int(image_mask.size(1) / 2))
question = question.repeat(1, 2)
question = question.view(batch_size * 2, int(question.size(1) / 2))
input_mask = input_mask.repeat(1, 2)
input_mask = input_mask.view(batch_size * 2, int(input_mask.size(1) / 2))
segment_ids = segment_ids.repeat(1, 2)
segment_ids = segment_ids.view(batch_size * 2, int(segment_ids.size(1) / 2))
co_attention_mask = co_attention_mask.view(
batch_size * 2,
int(co_attention_mask.size(1) / 2),
co_attention_mask.size(2),
)
task_tokens = question.new().resize_(question.size(0), 1).fill_(int(task_id[4:]))
return edict({
'question': question,
'features': features,
'spatials': spatials,
'segment_ids': segment_ids,
'input_mask': input_mask,
'image_mask': image_mask,
'co_attention_mask': co_attention_mask,
'task_tokens': task_tokens,
'target': target,
'batch_size': batch_size,
'multiple_choice_ids': multiple_choice_ids if task_id == 'TASK4' or task_id == 'TASK17' else None,
'num_options': num_options,
})
|
import redis
from threading import Thread, RLock
import json
import time
import queue
import traceback
import logging
import random
from . import defaults
from . import common
from .utils import (
hook_console,
__org_stdout__,
_stdout,
_stderr,
check_connect_sender,
Valve,
TaskEnv,
)
from .pipeline import (
send_to_pipeline,
send_to_pipeline_data,
from_pipeline_execute,
)
from .order import (
cmdline_command,
script_command,
)
from .error import NotInDefaultType
class Worker(common.Initer):
def __init__(self,
rds = redis.StrictRedis(),
workerid = None
):
def wait_connect_pub_worker(self):
rname = '{}:{}'.format(defaults.VREDIS_PUBLISH_WORKER, self.workerid)
cursub = self.rds.pubsub_numsub(rname)[0][1]
self._pub = self.rds.pubsub()
self._pub.subscribe(rname)
while self.rds.pubsub_numsub(rname)[0][1] == cursub:
time.sleep(.15)
self._pubn = int(self.rds.pubsub_numsub(rname)[0][1]) # 一个源于redis自身的问题,这里不一定是1,所以需要进行传递处理。
self.rds = rds
self.rds.ping()
self.pull_task = queue.Queue()
self.cmdline_task = queue.Queue()
self.workerid = self.rds.hincrby(defaults.VREDIS_WORKER, defaults.VREDIS_WORKER_ID)\
if workerid is None else workerid
self.tasklist = set()
hook_console()
wait_connect_pub_worker(self) # 开启任务前需要等待自连接广播打开,用于任意形式工作端断开能被发送任务端检测到
self._thread_num = 0 # 用以计算当前使用的线程数量,同一时间 pull_task 线程过高会有警告。
self._settings = getattr(self, '_settings', {})
@classmethod
def from_settings(cls, **kw):
cls._settings = kw
rds = cls.redis_from_settings(**kw)
d = dict(
workerid = None
)
# 配置类参数
for i in kw:
if i in d:
d[i] = kw[i]
if hasattr(defaults,i):
setattr(defaults,i,kw[i])
return cls(rds=rds,**d)
# 拆分函数
@staticmethod
def disassemble_func(func,start=None,err=None,stop=None):
def _disassemble(*a,**kw):
return func, a, kw, start, err, stop
return _disassemble
def connect_work_queue(self,_queue,taskid,workerid,order):
def _task_func(task_func):
def pack_task(*a,**kw):
# 给任务注入“开始回调”、“错误回调”和“停止回调”的函数,放进线程执行队列
_start = self.disassemble_func(send_to_pipeline)(self,taskid,workerid,order,'start',plus=self._pubn)
_error = self.disassemble_func(send_to_pipeline)(self,taskid,workerid,order,'error')
_stop = self.disassemble_func(send_to_pipeline)(self,taskid,workerid,order,'stop')
_task = self.disassemble_func(task_func,start=_start,err=_error,stop=_stop)(*a,**kw)
_queue.put(_task)
return pack_task
return _task_func
def process_order(self):
def _start():
print('open worker id:',self.workerid)
self.pub = self.rds.pubsub()
self.pub.subscribe(defaults.VREDIS_PUBLISH_WORKER)
for i in self.pub.listen():
# 过滤订阅信息
if i['type'] == 'subscribe': continue
order = json.loads(i['data'])
workerid = self.workerid
taskid = order['taskid']
order = order['order']
pull_looper = self.connect_work_queue(self.pull_task, taskid,workerid,order)
cmdl_looper = self.connect_work_queue(self.cmdline_task,taskid,workerid,order) # 暂未用到
if order['command'] == 'cmdline': cmdl_looper(cmdline_command)(self,taskid,workerid,order)
elif order['command'] == 'script': pull_looper(script_command) (self,taskid,workerid,order)
idx = 0
# 暴力解决网络中断问题
while True:
try:
try:
self.rds.ping()
except:
self.rds = super(Worker, self).redis_from_settings(**self._settings)
_start()
except:
idx += 1
__org_stdout__.write('unconnect, retry time:{}\n'.format(idx))
time.sleep(1)
continue
def _thread(self,_queue):
while True:
func,args,kwargs,start,err,stop = _queue.get()
with common.Initer.lock: self._thread_num += 1
def task(func,args,kwargs,start,err,stop):
# 为了使 stack 寻找时候定位当前的环境从而找到 taskid 来分割不同任务的日志环境
# 需要确保这里的 locals() 空间内拥有该参数名并且其余的环境没有该参数名字
# 具体使用详细见 utils 内的 hook 类的函数实现(听不懂就算了,总之就是很魔法)
__very_unique_function_name__ = None
taskid = start[1][1]
workerid = start[1][2]
order = start[1][3]
rds = self.rds
valve = Valve(taskid)
rdm = self.rds.hincrby(defaults.VREDIS_WORKER, taskid)
# 阀门过滤,有配置用配置,没有配置就会用 defaults 里面的默认参数
# 使用时就当作一般的 defaults 来进行配置即可。
try:
valve.update(order['settings'])
if start is not None:
start_callback,a,kw,_,_,_ = start
start_callback(*a,**kw)
func(*args,**kwargs)
except:
if err is not None:
err_callback,a,kw,_,_,_ = err
err_callback(*a,**kw,msg=traceback.format_exc())
finally:
self.rds.hdel(defaults.VREDIS_WORKER, taskid)
if stop is not None:
stop_callback,a,kw,_,_,_ = stop
if self._thread_num < valve.VREDIS_WORKER_THREAD_TASK_NUM:
stop_callback(*a,**kw,plus=(valve,TaskEnv))
else:
print('Warning! More than {} tasks are currently being performed, workerid:{}.' \
.format(self._thread_num-1,workerid))
Thread(target=stop_callback,args=a,kwargs={**kw,'plus':(valve,TaskEnv)}).start()
_stdout._clear_cache(taskid)
_stderr._clear_cache(taskid)
task(func,args,kwargs,start,err,stop)
with common.Initer.lock: self._thread_num -= 1
def _thread_run(self):
self.clear_tail = {}
while True:
for etask in list(TaskEnv.__taskenv__):
# 为了安全的实现 worker 的缓冲任务能够在crash后分派给别的任务,这里替换成新的处理方式
with common.Initer.lock:
if etask not in self.tasklist:
continue
if etask not in self.clear_tail:
self.clear_tail[etask] = 0
ret, rdata = from_pipeline_execute(self, etask)
with common.Initer.lock:
if etask not in self.tasklist:
if etask in self.clear_tail:
if self.clear_tail[etask] == 0:
self.clear_tail.pop(etask)
else:
self.clear_tail[etask] -= 1
if ret: self.rds.rpush('{}:{}'.format(defaults.VREDIS_TASK, etask), ret)
continue
else:
self.clear_tail[etask] += 1
if rdata:
taskid = rdata['taskid']
func_name = rdata['function']
args = rdata['args']
kwargs = rdata['kwargs']
plus = rdata['plus']
TaskEnv.incr(self.rds, taskid, self.workerid)
try:
func_str = '{}(*{},**{})'.format(func_name,args,kwargs)
taskenv = TaskEnv.get_env_locals(taskid)
# 魔法参数,以及为了兼顾魔法的发生而需要的 get_task_locals 函数
# 看着没用实际有用(用于挂钩标准输出流)
__very_unique_function_name__ = None
taskid,workerid,order,rds,valve,rdm = TaskEnv.get_task_locals(taskid)
table = plus.get('table',valve.VREDIS_DATA_DEFAULT_TABLE)
if valve.VREDIS_HOOKCRASH is None:
# 修改了 hookcrash 传递的方式,现在会更好一点。
# 也不会浪费传输资源了。
with common.Initer.lock:
hookcrash = self.rds.hget(defaults.VREDIS_SENDER, '{}@hookcrash'.format(taskid))
valve.VREDIS_HOOKCRASH = json.loads(hookcrash)
if valve.VREDIS_KEEPALIVE:
if check_connect_sender(rds, taskid, order['sender_pubn']):
data = eval(func_str, None, taskenv)
send_to_pipeline_data(self,taskid,data,ret,table,valve)
else:
_cname = '{}:{}:{}'.format(defaults.VREDIS_TASK_CACHE, taskid, workerid)
self.rds.lrem(_cname, 1, ret)
else:
inter = self.rds.hget(defaults.VREDIS_WORKER, '{}@inter'.format(taskid)) or 0
if int(inter):
data = eval(func_str, None, taskenv)
send_to_pipeline_data(self,taskid,data,ret,table,valve)
else:
_cname = '{}:{}:{}'.format(defaults.VREDIS_TASK_CACHE, taskid, workerid)
self.rds.lrem(_cname, 1, ret)
except:
try:
# 这里的任务会用到任务配置的空间,所以需要考虑暴力处理异常。
# 网络异常中断点的问题可能存在一些奇怪问题,目前暴力异常捕捉即可。
send_to_pipeline(self,taskid,workerid,order,'error',traceback.format_exc())
except:
# 开发使用的代码
# with common.Initer.lock:
# __org_stdout__.write(traceback.format_exc())
pass
try:
# 任务失败则重试,默认最大重试次数为3
retry = plus.get('retry',0)
rdata['plus'].update({'retry':retry+1})
_rname = '{}:{}'.format(defaults.VREDIS_TASK, taskid)
_cname = '{}:{}:{}'.format(defaults.VREDIS_TASK_CACHE, taskid, self.workerid)
with self.rds.pipeline() as pipe:
pipe.multi()
if retry < defaults.VREDIS_TASK_MAXRETRY:
rdata.update({'traceback': traceback.format_exc()})
pipe.rpush(_rname, json.dumps(rdata))
else:
# 计入错误统计信息中,推入持久错误任务管道便于查看。
_ename = '{}:{}'.format(defaults.VREDIS_TASK_ERROR, taskid)
_sname_c = '{}:{}:{}'.format(defaults.VREDIS_TASK_STATE, taskid, self.workerid)
self.rds.hincrby(_sname_c,'fail',1)
self.rds.lpush(_ename, ret)
pipe.lrem(_cname, -1, ret)
pipe.execute()
except:
# 防止异常破坏线程。
pass
finally:
TaskEnv.decr(self.rds, taskid, self.workerid)
with common.Initer.lock:
self.clear_tail[etask] -= 1
time.sleep(defaults.VREDIS_WORKER_IDLE_TIME)
# 用于将广播的任务信号拖拽下来进行环境配置的线程群
def process_pull_task(self):
for i in range(defaults.VREDIS_WORKER_THREAD_TASK_NUM):
Thread(target=self._thread,args=(self.pull_task,)).start()
# 直接执行任务的线程群
def process_run_task(self):
for i in range(defaults.VREDIS_WORKER_THREAD_RUN_NUM):
Thread(target=self._thread_run).start()
# 这里将作为命令行传输执行的管道
def process_run_cmdline(self):
for i in range(defaults.VREDIS_WORKER_THREAD_TASK_NUM):
Thread(target=self._thread,args=(self.cmdline_task,)).start()
_o_print = print
def _lk_print(*a,**kw):
with common.Initer.lock:
_o_print(*a,**kw)
__builtins__['print'] = _lk_print
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python (halite)
# language: python
# name: halite
# ---
# # FastAPI Usage
# 1. Build the container:
# ```sh
# docker build -t halite:latest -f Dockerfile .
# ```
#
#
# 2. Run the container:
# ```sh
# docker run --publish 8080:80 halite
# ```
from kaggle_environments import make
import requests
env = make("halite")
def act(url, observation, configuration):
"""
Sends a post request to one of the two agents.
"""
data = {"observation": observation, "configuration": configuration}
return requests.post(url=url, json=data).json()
URL = "http://localhost:8080/act"
OBS = env.state[0].observation
CONFIG = env.configuration
res = act(URL, OBS, CONFIG)
res
|
""" Reprosim library - routines for a modelling the lung
The Reprosim library is an advanced modelling library for models of the reproductive system.
"""
classifiers = """\
Development Status :: 4 - Beta
Intended Audience :: Developers
Intended Audience :: Education
Intended Audience :: Science/Research
License :: OSI Approved :: Apache Software License
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3.5
Operating System :: Microsoft :: Windows
Operating System :: Unix
Operating System :: MacOS :: MacOS X
Topic :: Scientific/Engineering :: Medical Science Apps.
Topic :: Software Development :: Libraries :: Python Modules
"""
from setuptools import setup
doclines = __doc__#.split("\n")
setup(
name='reprosim',
version='@Reprosim_VERSION@',
author='Reproduction and Development Group, Auckland Bioengineering Institute.',
author_email='alys.clark@auckland.ac.nz',
packages=['reprosim'],
# package_data={'reprosim': []},
platforms=['any'],
url='http://www.abi.auckland.ac.nz/en/about/our-research/development-and-reproductive-health.html',
license='http://www.apache.org/licenses/LICENSE-2.0',
description='Reprosim library of routines for modelling the reproductive system.',
classifiers = filter(None, classifiers.split("\n")),
long_description=doclines,
)
|
class LocalCache(object):
pass
|
from pwn import *
from Crypto.Util.number import *
from Crypto.Cipher import AES
debug = True
r = remote("crypto.chall.pwnoh.io", 13374, level = 'debug' if debug else None)
r.recvuntil('p = ')
p = int(r.recvline())
assert isPrime(p)
g = 5
r.recvuntil('A = ')
A = int(r.recvline())
B = pow(g, 57, p)
ss = pow(A, 57, p)
r.sendlineafter('Give me your public key B: ', str(B))
r.recvuntil('ciphertext = ')
ct = r.recvline(keepends=False).decode()
key = hashlib.sha1(long_to_bytes(ss)).digest()[:16]
cipher = AES.new(key, AES.MODE_ECB)
print(cipher.decrypt(bytes.fromhex(ct)))
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import os, sys
import numpy as np
from cntk.cntk_py import DeviceKind_GPU
from cntk.device import try_set_default_device, gpu
from cntk.ops.tests.ops_test_utils import cntk_device
import pytest
abs_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(abs_path, "..", "..", "..", "..", "Examples", "Image", "Classification", "ConvNet", "Python" ))
from ConvNet_CIFAR10 import convnet_cifar10
TOLERANCE_ABSOLUTE = 2E-1
def test_convnet_cifar_error(device_id):
if cntk_device(device_id).type() != DeviceKind_GPU:
pytest.skip('test only runs on GPU')
try_set_default_device(cntk_device(device_id))
externalData = 'CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY' in os.environ
if externalData:
extPath = os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY']
dataset_path = os.path.join(extPath, "Image", "CIFAR", "v0")
else:
dataset_path = os.path.join(abs_path, "..", "..", "..", "..", "Examples", "Image", "DataSets", "CIFAR-10")
error = convnet_cifar10(data_path=dataset_path, epoch_size=2000, minibatch_size=32, max_epochs=10)
expected_error = 0.7
assert np.allclose(error, expected_error, atol=TOLERANCE_ABSOLUTE)
|
# http://stackoverflow.com/
# questions/12507274/how-to-get-bounds-of-a-google-static-map
import math
MERCATOR_RANGE = 256
def bound(value, opt_min, opt_max):
if opt_min is not None:
value = max(value, opt_min)
if opt_max is not None:
value = min(value, opt_max)
return value
def degrees_to_radians(deg):
return deg * (math.pi / 180)
def radians_to_degrees(rad):
return rad / (math.pi / 180)
class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __repr__(self):
return "Point(%d,%d)" % (self.x, self.y)
def __str__(self):
return "(x=%d,y=%d)" % (self.x, self.y)
class LatLng:
def __init__(self, lt, ln):
self.lat = lt
self.lng = ln
def __repr__(self):
return "LatLng(%g,%g)" % (self.lat, self.lng)
def __str__(self):
return "(lat=%g,lng=%g)" % (self.lat, self.lng)
class MercatorProjection:
def __init__(self):
self.pixelOrigin_ = Point(int(MERCATOR_RANGE / 2.0), int(MERCATOR_RANGE / 2.0))
self.pixelsPerLonDegree_ = MERCATOR_RANGE / 360.0
self.pixelsPerLonRadian_ = MERCATOR_RANGE / (2.0 * math.pi)
def from_latlng_to_point(self, latlng, opt_point=None):
point = opt_point if opt_point is not None else Point(0, 0)
origin = self.pixelOrigin_
point.x = origin.x + latlng.lng * self.pixelsPerLonDegree_
# NOTE(appleton): Truncating to 0.9999 effectively limits latitude to
# 89.189.This is about a third of a tile past the edge of world tile
siny = bound(math.sin(degrees_to_radians(latlng.lat)), -0.9999, 0.9999)
point.y = origin.y + 0.5 * math.log((1 + siny) / (1.0 - siny)) * -self.pixelsPerLonRadian_
return point
def from_point_to_latlng(self, point):
origin = self.pixelOrigin_
lng = (point.x - origin.x) / self.pixelsPerLonDegree_
lat_radians = (point.y - origin.y) / -self.pixelsPerLonRadian_
lat = radians_to_degrees(2.0 * math.atan(math.exp(lat_radians)) -
math.pi / 2.0)
return LatLng(lat, lng)
def get_point(point, center, zoom, mapwidth, mapheight):
scale = 2.0 ** zoom
proj = MercatorProjection()
center_p = proj.from_latlng_to_point(center)
center_p.x = center_p.x * scale
center_p.y = center_p.y * scale
subject_p = proj.from_latlng_to_point(point)
subject_p.x = subject_p.x * scale
subject_p.y = subject_p.y * scale
return Point((subject_p.x - center_p.x) + mapwidth / 2.0,
(subject_p.y - center_p.y) + mapheight / 2.0)
def get_corners(center, zoom, mapwidth, mapheight):
scale = 2.0 ** zoom
proj = MercatorProjection()
center_px = proj.from_latlng_to_point(center)
sw_point = Point(center_px.x - (mapwidth / 2.0) / scale, center_px.y +
(mapheight / 2.0) / scale)
sw_lat_lon = proj.from_point_to_latlng(sw_point)
ne_point = Point(center_px.x + (mapwidth / 2.0) / scale, center_px.y -
(mapheight / 2.0) / scale)
ne_lat_lon = proj.from_point_to_latlng(ne_point)
return {
'N': ne_lat_lon.lat,
'E': ne_lat_lon.lng,
'S': sw_lat_lon.lat,
'W': sw_lat_lon.lng,
}
# https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
def get_tile_xy(latlng, zoom):
lat_rad = math.radians(latlng.lat)
n = 2.0 ** zoom
xtile = (latlng.lng + 180.0) / 360.0 * n
ytile = ((1.0 - math.log(math.tan(lat_rad) +
(1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return {
'X': xtile,
'Y': ytile
}
|
count = 0
ls = [1,2,3,0,4,0]
for i in ls:
if i == 0:
count += 1
print(count)
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Vincent Garonne, <vincent.garonne@cern.ch>, 2015
"""asynchronous rules and rule approval
Revision ID: 1d96f484df21
Revises: 1fc15ab60d43
Create Date: 2015-07-08 16:59:23.710208
"""
from alembic import op, context
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1d96f484df21'
down_revision = '3d9813fab443'
def upgrade():
if context.get_context().dialect.name not in ('sqlite'):
op.add_column('rules', sa.Column('ignore_account_limit', sa.Boolean(name='RULES_IGNORE_ACCOUNT_LIMIT_CHK'), default=False))
if context.get_context().dialect.name not in ('mysql'):
op.drop_constraint('RULES_STATE_CHK', 'rules')
op.create_check_constraint('RULES_STATE_CHK', 'rules', 'state IN (\'S\', \'R\', \'U\', \'O\', \'A\', \'I\')')
def downgrade():
if context.get_context().dialect.name not in ('sqlite'):
op.drop_column('rules', 'ignore_account_limit')
op.drop_constraint('RULES_STATE_CHK', 'rules')
op.create_check_constraint('RULES_STATE_CHK', 'rules', 'state IN (\'S\', \'R\', \'U\', \'O\')')
|
import unittest
from test import testlmlexer
from test import testlmparser
suite = unittest.TestLoader().loadTestsFromTestCase(testlmlexer.testLmLexer)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(testlmparser.TestLmParser)
unittest.TextTestRunner(verbosity=2).run(suite)
|
class Solution:
def possibleBipartition(self, n: int, dislikes: List[List[int]]) -> bool:
self.is_bipartite = True
visited = [False] * (n + 1)
color = [False] * (n + 1)
# build a graph
graph = [[] for _ in range(n + 1)]
for edge in dislikes:
# undirected graph
graph[edge[0]].append(edge[1])
graph[edge[1]].append(edge[0])
def dfs(node):
nonlocal graph, color, visited
visited[node] = True
for next_node in graph[node]:
if not visited[next_node]:
color[next_node] = not color[node]
dfs(next_node)
elif color[next_node] == color[node]:
# is not bipartite
self.is_bipartite = False
return
for i in range(1, n + 1):
if not visited[i]:
dfs(i)
if not self.is_bipartite:
return False
return True
|
#! /usr/bin/env python
"""
File: interpolate_exp_cos.py
Copyright (c) 2016 Taylor Patti
License: MIT
This module calculates an interpolant approximation to the given function at the given point.
"""
import numpy as np
def exp_cos_func(q):
"""Approximates interpolant approximation to the function."""
mesh = np.linspace(-1, 1, q+1)
f = np.exp(- mesh**2)*np.cos(2 * np.pi * mesh)
x_point = -0.45
step = 2 / float(q+1)
k_val = int(1-x_point / step)
approximation = f[k_val] + ((f[k_val+1] - f[k_val])/(mesh[k_val+1] - mesh[k_val])) * (x_point - mesh[k_val])
exact = f = np.exp(- x_point**2)*np.cos(2 * np.pi * x_point)
return approximation, exact - approximation
def test_approx():
"""Ensures that the approximation is accurate at q = 8"""
apt = np.fabs(exp_cos_func(8)[1]) < 0.1
msg = 'Approximation not accurate.'
assert apt, msg
|
import pandas as pd
import itertools
class Node(object):
# frequent pattern tree node
def __init__(self, value, count, parent):
# initialize the node
self.value = value
self.count = count
self.parent = parent
self.link = None
self.children = []
def get_child_node(self, value):
# return the child node that contains the specific value
for node in self.children:
if node.value == value:
return node
return None
def add_child_node(self, value):
# add a child node with a specific value and return it
child = Node(value, 1, self)
self.children.append(child)
return child
class FrequentPatternTree(object):
# FPtree object
def __init__(self, transactions, support_threshold, root_value, root_count):
# initialize and build the FPtree
self.frequent_items = self.get_frequent_items(transactions, support_threshold)
self.headers = self.initialize_header_table(self.frequent_items)
self.root = self.build_frequent_pattern_tree(
transactions, root_value,
root_count, self.frequent_items, self.headers)
@staticmethod
def get_frequent_items(transactions, support_threshold):
# returns a dictionary of items who's support are above the support_threshold
items = {}
# For each item in each transaction,
for transaction in transactions:
for item in transaction:
if item in items:
# if the item is already in the dictionary increase it's key value by 1
items[item] += 1
else:
# if the item is not in the dictionary add it and set its key value to 1
items[item] = 1
for key in list(items.keys()):
# delete items who's support is below the support_threshold
if items[key] < support_threshold:
del items[key]
return items
@staticmethod
def initialize_header_table(frequent_items):
# initialize the header table for linking keys to their nodes.
headers = {}
for key in frequent_items.keys():
headers[key] = None
return headers
def build_frequent_pattern_tree(self, transactions, root_value, root_count, frequent_items, headers):
# build the frequent pattern tree and return its root node
root_node = Node(root_value, root_count, None)
for transaction in transactions:
# sift through transactions to extract frequent items
sorted_items = [x for x in transaction if x in frequent_items]
# sort the list of frequent items by most frequent first
sorted_items.sort(key=lambda x: frequent_items[x], reverse=True)
# if the transaction contained frequent items, add them to the tree
if len(sorted_items) > 0:
self.insert_into_tree(sorted_items, root_node, headers)
return root_node
def insert_into_tree(self, items, node, headers):
# grow the frequent pattern tree recursively.
first_item = items[0]
# check to see if the item is a child of it's parent
child = node.get_child_node(first_item)
# if so, increase its count by 1
if child is not None:
child.count += 1
else:
# otherwise, add a new child to the parent node.
child = node.add_child_node(first_item)
# if the item doesnt have a node reference in the header table, add it.
if headers[first_item] is None:
headers[first_item] = child
# otherwise, if the node has a reference in the header table already,
# link the new child to the last known occurrence of its header key
else:
current = headers[first_item]
while current.link is not None:
current = current.link
current.link = child
# recurse until there are no remaining items in the list
remaining_items = items[1:]
if len(remaining_items) > 0:
self.insert_into_tree(remaining_items, child, headers)
def tree_has_single_path(self, node):
# Check to see if the tree is a single path, if so return true, else false.
number_of_children = len(node.children)
# if more than one child, ret false
if number_of_children > 1:
return False
# if there is no children, return true
elif number_of_children == 0:
return True
# if there is one child, recursively check to see if it has a single path
else:
return True and self.tree_has_single_path(node.children[0])
def search_for_patterns(self, support_threshold):
# search through the FPtree for frequent patterns.
# if the tree has a single path, generate a list of frequent patterns.
if self.tree_has_single_path(self.root):
return self.create_pattern_list()
else:
# if the tree is conditional (root value != null) search subtrees for patterns
# and generate new frequent patterns by appending the key_value of it's root
return self.append_root(self.search_sub_trees(support_threshold))
def append_root(self, patterns):
# Append value_of_root to patterns in the dictionary if we are in a conditional FPtree.
value_of_root = self.root.value
# if the value of the root is not null, we are in a conditional tree
if value_of_root is not None:
# create new patterns by appending the value of the root to patterns in the dictionary
new_patterns = {}
for key in patterns.keys():
new_patterns[tuple(sorted(list(key) + [value_of_root]))] = patterns[key]
return new_patterns
return patterns
def create_pattern_list(self):
# create a list of patterns with their support counts.
patterns = {}
# get all frequent items from the tree
items = self.frequent_items.keys()
# if we are in a conditional tree, add the root value as a pattern.
if self.root.value is None:
value_of_root = []
else:
value_of_root = [self.root.value]
patterns[tuple(value_of_root)] = self.root.count
# find patterns by looking at combinations of frequent items
# of size 1 -> length of frequent_items_list + 1
for i in range(1, len(items) + 1):
for combination in itertools.combinations(items, i):
pattern = tuple(sorted(list(combination) + value_of_root))
# let the support count of the new pattern be the support of the least frequent item
patterns[pattern] = min([self.frequent_items[x] for x in combination])
return patterns
def search_sub_trees(self, support_threshold):
# create subtrees and search them for frequent patterns.
patterns = {}
# generate a search_order list by sorting the items
# by least number of occurrences first
search_order = sorted(self.frequent_items.keys(),
key=lambda x: self.frequent_items[x])
# insert items into tree
for item in search_order:
occurrences = []
all_occurrence_paths_to_root = []
# get item's node from the header table
node = self.headers[item]
# trace node links to get a list of all occurrences of an item.
while node is not None:
occurrences.append(node)
node = node.link
# for each occurrence of the item, trace it's path back to the root
for occurrence in occurrences:
frequency = occurrence.count
path_to_root = []
parent = occurrence.parent
while parent.parent is not None:
path_to_root.append(parent.value)
parent = parent.parent
for i in range(frequency):
all_occurrence_paths_to_root.append(path_to_root)
# with the list of all occurrence paths to root, create a subtree and search it for patterns
subtree = FrequentPatternTree(all_occurrence_paths_to_root, support_threshold,
item, self.frequent_items[item])
subtree_patterns = subtree.search_for_patterns(support_threshold)
# add subtree patterns the patterns dictionary.
for pattern in subtree_patterns.keys():
# if pattern already exits, increase it's count by it's frequency
if pattern in patterns:
patterns[pattern] += subtree_patterns[pattern]
# otherwise add the pattern with it's frequency
else:
patterns[pattern] = subtree_patterns[pattern]
return patterns
def mine_frequent_patterns(transactions, support_threshold):
# Given a list of transactions and a support threshold,
# build an FPtree and search it for frequent patterns
FPtree = FrequentPatternTree(transactions, support_threshold, None, None)
# Search the FPtree to get a list of frequent patterns
return FPtree.search_for_patterns(support_threshold)
def get_association_rules(patterns, confidence_threshold):
# Given a set of frequent itemsets, print out strong association rules
rules = {}
for itemset in patterns.keys():
# Get the support of AUB
union_support = patterns[itemset]
for i in range(1, len(itemset)):
# find each combination of the antecedent
for antecedent in itertools.combinations(itemset, i):
# get the antecedent
antecedent = tuple(sorted(antecedent))
# get the consequent
consequent = tuple(sorted(set(itemset) - set(antecedent)))
if len(set(consequent)) == 0:
consequent = antecedent
# if the antecedent is a known pattern
if antecedent in patterns:
# Get support of A
antecedent_support = patterns[antecedent]
# Calculate the confidence via support AUB/support A
confidence = float(union_support) / antecedent_support
# if the support AUB/support of A is >= the confidence threshold, add the rule
if antecedent != consequent:
if confidence >= confidence_threshold:
print(antecedent,'->', consequent,',', confidence)
return rules
def main():
# Read data from csv file into a dataframe
data = pd.read_csv("transactions.csv")
# Group items (index) by Member_number and Date
grouped_df = data.groupby(['Member_number', 'Date'])
# Create a list of transactions
transactions = []
for group, group_column in grouped_df:
transactions.append(group_column['itemDescription'].values.tolist())
# Threshold vars
support_threshold = 20
confidence_threshold = .02
# Generate frequent patterns for the transactions with support_threshold of n
patterns = mine_frequent_patterns(transactions, support_threshold)
# Output Rules
print('Support Threshold: ', support_threshold)
print('Confidence Threshold: ', confidence_threshold*100, '%')
print('------------------------------------------')
print('(Antecedent) -> (Consequent), Confidence')
print('------------------------------------------')
get_association_rules(patterns, confidence_threshold)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Copyright Joseph Holland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from graffiti_monkey import cli as gm_cli
import os
import boto3
import logging
# Remove existing log handler setup by Lambda
log = logging.getLogger()
for handler in log.handlers:
log.removeHandler(handler)
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO)
def envvar_to_list(envvar):
return os.environ[envvar].split(',')
def send_notification(sns_arn, region, subject, message):
client = boto3.client('sns')
response = client.publish(
TopicArn=sns_arn,
Subject=subject,
Message=message
)
log.info('SNS Response: {}'.format(response))
def handler(event, context):
log.info('Loading function')
try:
sns_arn = os.environ['SNS_ARN']
region = os.environ['REGION']
gm = gm_cli.GraffitiMonkeyCli()
gm.region = region
gm.config = {"_instance_tags_to_propagate": envvar_to_list('INSTANCE_TAGS_TO_PROPAGATE'),
"_volume_tags_to_propagate": envvar_to_list('VOLUME_TAGS_TO_PROPAGATE'),
"_volume_tags_to_be_set": envvar_to_list('VOLUME_TAGS_TO_BE_SET'),
"_snapshot_tags_to_be_set": envvar_to_list('SNAPSHOT_TAGS_TO_BE_SET'),
"_instance_filter": envvar_to_list('INSTANCE_FILTER'),
}
gm.initialize_monkey()
gm.start_tags_propagation()
send_notification(sns_arn, region, 'Graffiti Monkey completed successfully',
'Graffiti Monkey completed successfully in ' + region + '.')
return 'Graffiti Monkey completed successfully!'
except KeyError, e:
error_message = 'Error: Environment variable not set: ' + str(e)
log.error(error_message)
send_notification(sns_arn, region, 'Error running Graffiti Monkey',
'Error running Lambda Graffiti Monkey in ' + region + '. Error Message: ' + error_message)
except Exception, e:
error_message = 'Error: Graffiti Monkey encountered the following error: ' + str(e)
log.error(error_message)
send_notification(sns_arn, region, 'Error running Graffiti Monkey',
'Error running Lambda Graffiti Monkey in ' + region + '. Error Message: ' + error_message)
|
import urllib.request,json
from .models import News, news_article
# Getting api key
apiKey = None
# Getting the news base url
base_url = None
def configure_request(app):
global apiKey,base_url
apiKey = app.config['NEWS_API_KEY']
base_url = app.config['NEWS_API_BASE_URL']
def get_news(endpoint):
'''
Function that gets the json response to our url request
'''
get_news_url = base_url.format(endpoint, apiKey)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
if get_news_response['sources']:
news_results_list = get_news_response['sources']
news_results = process_results(news_results_list)
return news_results
def get_source_news(endpoint):
'''
Function that gets the json response to our url request
'''
get_source_url = base_url.format(endpoint, apiKey)
with urllib.request.urlopen(get_source_url) as url:
get_source_articles = url.read()
get_articles_response = json.loads(get_source_articles)
if get_articles_response['articles']:
articles_results = get_articles_response['articles']
# articles_results_list = get_articles_response['articles']
# articles_results = process_results(articles_results_list)
return articles_results
def process_articles_results(articles_results_list):
'''
Function that processes the News result and transform them to a list of Objects
Args:
news_list: A list of dictionaries that contain movie details
Returns :
news_results: A list of movie objects
'''
articles_results = []
for article_item in articles_results_list:
source = article_item .get('source')
author = article_item .get('author')
title = article_item .get('title')
url = article_item .get('url')
description = article_item .get('description')
urlToImage = article_item .get('urlToImage')
publishedAt = article_item .get('publishedAt')
content = article_item .get('content')
articles_object = news_article(source,description,url, title, urlToImage, publishedAt, content, author)
articles_results.append(articles_object)
return articles_results
def process_results(news_list):
'''
Function that processes the News result and transform them to a list of Objects
Args:
news_list: A list of dictionaries that contain movie details
Returns :
news_results: A list of movie objects
'''
news_results = []
for news_item in news_list:
id = news_item.get('id')
name = news_item.get('name')
description = news_item.get('description')
url = news_item.get('url')
category = news_item.get('category')
country = news_item.get('country')
language = news_item.get('language')
news_object = News(id,name,description,url,category,country, language)
news_results.append(news_object)
return news_results
|
"""
Author: Colin Rioux
Group SPoC pseudo code and source code together
*Should be run after fix_data
"""
import glob
import pandas as pd
import string
import random
import argparse
import os
from pathlib import Path
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--data_path', default='./data/in/')
arg_parser.add_argument('--out_path', default='./data/uniq/')
args = arg_parser.parse_args()
files = glob.glob(os.path.join(args.data_path, "*.tsv"))
for file in files:
# fname = file.split('/')[-1].split(',')[0]
fname = Path(file).stem
""" Skip already grouped files """
if "grouped-" in fname:
continue
fname = str("grouped-") + fname
df = pd.read_csv(file, sep="\t")
df = df.fillna('')
data = {}
"""
Group programs together
"""
for index, row in df.iterrows():
worker_id, prob_id = int(row['workerid']), row['probid']
if prob_id not in data:
data[prob_id] = {}
if worker_id not in data[prob_id]:
data[prob_id][worker_id] = { "ps": [], "sc": [], "workerid": worker_id, "probid": prob_id, "subid": row['subid'] }
data[prob_id][worker_id]["ps"].append(row['text'])
# data[prob_id][worker_id]["sc"].append("".join(["\\t"*int(row['indent'])]) + row['code'])
data[prob_id][worker_id]["sc"].append(row['code'])
"""
Compress codes and texts
"""
for prob_id, problem in data.items():
for worker_id, worker in problem.items():
worker["ps"] = [x for x in worker["ps"] if x]
# worker["ps"] = "\\n".join(worker["ps"])
worker["ps"] = "; ".join(worker["ps"])
# worker["ps"] = '"' + worker["ps"].strip("'").strip('"') + '"'
worker["ps"] = worker["ps"].strip("'").strip('"')
worker["sc"] = [x for x in worker["sc"] if x]
# worker["sc"] = "\\n".join(worker["sc"])
worker["sc"] = " ".join(worker["sc"])
# worker["sc"] = '"' + worker["sc"].strip("'").strip('"') + '"'
worker["sc"] = worker["sc"].strip("'").strip('"')
data_l = []
for prob_id, problem in data.items():
for worker_id, worker in problem.items():
data_l.append(worker)
final_df = pd.DataFrame(data_l)
final_df.to_csv(os.path.join(args.out_path, fname + '.csv'), index=False)
|
import click
import numpy as np
from tqdm import tqdm
from ..io import (
append_column_to_hdf5,
read_telescope_data_chunked,
get_column_names_in_file,
remove_column_from_file,
load_model,
)
from ..apply import predict_disp
from ..configuration import AICTConfig
from ..logging import setup_logging
@click.command()
@click.argument('configuration_path', type=click.Path(exists=True, dir_okay=False))
@click.argument('data_path', type=click.Path(exists=True, dir_okay=False))
@click.argument('disp_model_path', type=click.Path(exists=False, dir_okay=False))
@click.argument('sign_model_path', type=click.Path(exists=False, dir_okay=False))
@click.option('-n', '--n-jobs', type=int, help='Number of cores to use')
@click.option('-y', '--yes', help='Do not prompt for overwrites', is_flag=True)
@click.option('-v', '--verbose', help='Verbose log output', is_flag=True)
@click.option(
'-N', '--chunksize', type=int,
help='If given, only process the given number of events at once',
)
def main(configuration_path, data_path, disp_model_path, sign_model_path, chunksize, n_jobs, yes, verbose):
'''
Apply given model to data. Three columns are added to the file, source_x_prediction, source_y_prediction
and disp_prediction
CONFIGURATION_PATH: Path to the config yaml file
DATA_PATH: path to the FACT data in a h5py hdf5 file, e.g. erna_gather_fits output
DISP_MODEL_PATH: Path to the pickled disp model.
SIGN_MODEL_PATH: Path to the pickled sign model.
'''
log = setup_logging(verbose=verbose)
config = AICTConfig.from_yaml(configuration_path)
model_config = config.disp
columns_to_delete = [
'source_x_prediction',
'source_y_prediction',
'theta',
'theta_deg',
'theta_rec_pos',
'disp_prediction',
]
for i in range(1, 6):
columns_to_delete.extend([
'theta_off_' + str(i),
'theta_deg_off_' + str(i),
'theta_off_rec_pos_' + str(i),
])
n_del_cols = 0
for column in columns_to_delete:
if column in get_column_names_in_file(data_path, config.telescope_events_key):
if not yes:
click.confirm(
'Dataset "{}" exists in file, overwrite?'.format(column),
abort=True,
)
yes = True
remove_column_from_file(data_path, config.telescope_events_key, column)
log.warn("Deleted {} from the feature set.".format(column))
n_del_cols += 1
if n_del_cols > 0:
log.warn("Source dependent features need to be calculated from the predicted source possition. "
+ "Use e.g. `fact_calculate_theta` from https://github.com/fact-project/pyfact.")
log.info('Loading model')
disp_model = load_model(disp_model_path)
sign_model = load_model(sign_model_path)
log.info('Done')
if n_jobs:
disp_model.n_jobs = n_jobs
sign_model.n_jobs = n_jobs
df_generator = read_telescope_data_chunked(
data_path, config, chunksize, model_config.columns_to_read_apply,
feature_generation_config=model_config.feature_generation
)
log.info('Predicting on data...')
for df_data, start, stop in tqdm(df_generator):
disp = predict_disp(
df_data[model_config.features], disp_model, sign_model,
log_target=model_config.log_target,
)
source_x = df_data[model_config.cog_x_column] + disp * np.cos(df_data[model_config.delta_column])
source_y = df_data[model_config.cog_y_column] + disp * np.sin(df_data[model_config.delta_column])
key = config.telescope_events_key
append_column_to_hdf5(data_path, source_x, key, 'source_x_prediction')
append_column_to_hdf5(data_path, source_y, key, 'source_y_prediction')
append_column_to_hdf5(data_path, disp, key, 'disp_prediction')
if __name__ == '__main__':
# pylint: disable=no-value-for-parameter
main()
|
# Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from typing import Tuple, Optional
from openfold.model.primitives import Linear, LayerNorm
from openfold.utils.tensor_utils import add, one_hot
class InputEmbedder(nn.Module):
"""
Embeds a subset of the input features.
Implements Algorithms 3 (InputEmbedder) and 4 (relpos).
"""
def __init__(
self,
tf_dim: int,
msa_dim: int,
c_z: int,
c_m: int,
relpos_k: int,
**kwargs,
):
"""
Args:
tf_dim:
Final dimension of the target features
msa_dim:
Final dimension of the MSA features
c_z:
Pair embedding dimension
c_m:
MSA embedding dimension
relpos_k:
Window size used in relative positional encoding
"""
super(InputEmbedder, self).__init__()
self.tf_dim = tf_dim
self.msa_dim = msa_dim
self.c_z = c_z
self.c_m = c_m
self.linear_tf_z_i = Linear(tf_dim, c_z)
self.linear_tf_z_j = Linear(tf_dim, c_z)
self.linear_tf_m = Linear(tf_dim, c_m)
self.linear_msa_m = Linear(msa_dim, c_m)
# RPE stuff
self.relpos_k = relpos_k
self.no_bins = 2 * relpos_k + 1
self.linear_relpos = Linear(self.no_bins, c_z)
def relpos(self, ri: torch.Tensor):
"""
Computes relative positional encodings
Implements Algorithm 4.
Args:
ri:
"residue_index" features of shape [*, N]
"""
d = ri[..., None] - ri[..., None, :]
boundaries = torch.arange(
start=-self.relpos_k, end=self.relpos_k + 1, device=d.device
)
oh = one_hot(d, boundaries).type(ri.dtype)
return self.linear_relpos(oh)
def forward(
self,
tf: torch.Tensor,
ri: torch.Tensor,
msa: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
tf:
"target_feat" features of shape [*, N_res, tf_dim]
ri:
"residue_index" features of shape [*, N_res]
msa:
"msa_feat" features of shape [*, N_clust, N_res, msa_dim]
Returns:
msa_emb:
[*, N_clust, N_res, C_m] MSA embedding
pair_emb:
[*, N_res, N_res, C_z] pair embedding
"""
# [*, N_res, c_z]
tf_emb_i = self.linear_tf_z_i(tf)
tf_emb_j = self.linear_tf_z_j(tf)
# [*, N_res, N_res, c_z]
pair_emb = tf_emb_i[..., None, :] + tf_emb_j[..., None, :, :]
pair_emb = pair_emb + self.relpos(ri.type(pair_emb.dtype))
# [*, N_clust, N_res, c_m]
n_clust = msa.shape[-3]
tf_m = (
self.linear_tf_m(tf)
.unsqueeze(-3)
.expand(((-1,) * len(tf.shape[:-2]) + (n_clust, -1, -1)))
)
msa_emb = self.linear_msa_m(msa) + tf_m
return msa_emb, pair_emb
class RecyclingEmbedder(nn.Module):
"""
Embeds the output of an iteration of the model for recycling.
Implements Algorithm 32.
"""
def __init__(
self,
c_m: int,
c_z: int,
min_bin: float,
max_bin: float,
no_bins: int,
inf: float = 1e8,
**kwargs,
):
"""
Args:
c_m:
MSA channel dimension
c_z:
Pair embedding channel dimension
min_bin:
Smallest distogram bin (Angstroms)
max_bin:
Largest distogram bin (Angstroms)
no_bins:
Number of distogram bins
"""
super(RecyclingEmbedder, self).__init__()
self.c_m = c_m
self.c_z = c_z
self.min_bin = min_bin
self.max_bin = max_bin
self.no_bins = no_bins
self.inf = inf
self.linear = Linear(self.no_bins, self.c_z)
self.layer_norm_m = LayerNorm(self.c_m)
self.layer_norm_z = LayerNorm(self.c_z)
def forward(
self,
m: torch.Tensor,
z: torch.Tensor,
x: torch.Tensor,
_inplace: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
m:
First row of the MSA embedding. [*, N_res, C_m]
z:
[*, N_res, N_res, C_z] pair embedding
x:
[*, N_res, 3] predicted C_beta coordinates
Returns:
m:
[*, N_res, C_m] MSA embedding update
z:
[*, N_res, N_res, C_z] pair embedding update
"""
# [*, N, C_m]
m_update = self.layer_norm_m(m)
if(_inplace):
m.copy_(m_update)
m_update = m
# [*, N, N, C_z]
z_update = self.layer_norm_z(z)
if(_inplace):
z.copy_(z_update)
z_update = z
# This squared method might become problematic in FP16 mode.
bins = torch.linspace(
self.min_bin,
self.max_bin,
self.no_bins,
dtype=x.dtype,
device=x.device,
requires_grad=False,
)
squared_bins = bins ** 2
upper = torch.cat(
[squared_bins[1:], squared_bins.new_tensor([self.inf])], dim=-1
)
d = torch.sum(
(x[..., None, :] - x[..., None, :, :]) ** 2, dim=-1, keepdims=True
)
# [*, N, N, no_bins]
d = ((d > squared_bins) * (d < upper)).type(x.dtype)
# [*, N, N, C_z]
d = self.linear(d)
z_update = add(z_update, d, _inplace)
return m_update, z_update
class TemplateAngleEmbedder(nn.Module):
"""
Embeds the "template_angle_feat" feature.
Implements Algorithm 2, line 7.
"""
def __init__(
self,
c_in: int,
c_out: int,
**kwargs,
):
"""
Args:
c_in:
Final dimension of "template_angle_feat"
c_out:
Output channel dimension
"""
super(TemplateAngleEmbedder, self).__init__()
self.c_out = c_out
self.c_in = c_in
self.linear_1 = Linear(self.c_in, self.c_out, init="relu")
self.relu = nn.ReLU()
self.linear_2 = Linear(self.c_out, self.c_out, init="relu")
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x: [*, N_templ, N_res, c_in] "template_angle_feat" features
Returns:
x: [*, N_templ, N_res, C_out] embedding
"""
x = self.linear_1(x)
x = self.relu(x)
x = self.linear_2(x)
return x
class TemplatePairEmbedder(nn.Module):
"""
Embeds "template_pair_feat" features.
Implements Algorithm 2, line 9.
"""
def __init__(
self,
c_in: int,
c_out: int,
**kwargs,
):
"""
Args:
c_in:
c_out:
Output channel dimension
"""
super(TemplatePairEmbedder, self).__init__()
self.c_in = c_in
self.c_out = c_out
# Despite there being no relu nearby, the source uses that initializer
self.linear = Linear(self.c_in, self.c_out, init="relu")
def forward(
self,
x: torch.Tensor,
) -> torch.Tensor:
"""
Args:
x:
[*, C_in] input tensor
Returns:
[*, C_out] output tensor
"""
x = self.linear(x)
return x
class ExtraMSAEmbedder(nn.Module):
"""
Embeds unclustered MSA sequences.
Implements Algorithm 2, line 15
"""
def __init__(
self,
c_in: int,
c_out: int,
**kwargs,
):
"""
Args:
c_in:
Input channel dimension
c_out:
Output channel dimension
"""
super(ExtraMSAEmbedder, self).__init__()
self.c_in = c_in
self.c_out = c_out
self.linear = Linear(self.c_in, self.c_out)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x:
[*, N_extra_seq, N_res, C_in] "extra_msa_feat" features
Returns:
[*, N_extra_seq, N_res, C_out] embedding
"""
x = self.linear(x)
return x
|
import curses.ascii
class EditField:
"""Single line edit field.
Returns content on Enter, None on cancel (Escape).
Does not touch attributes, does not care about win size.
Does not touch area behind current cursor pos + width.
Clears the touched are when done.
"""
def __init__(self, win, width=0):
"""Assign the field to `win`, allowing at max `width` chars.
Zero width allows to uses all space up to the end of window.
"""
# Compute width
_, w = win.getmaxyx()
by, bx = win.getbegyx()
y, x = win.getyx()
self.width = width or w - x
# Create new window for edit box
self.win = curses.newwin(1, width, by + y, bx + x)
self.win.keypad(1)
# Edited text is split to left and right part
self.left = '' # Text on left side of the cursor
self.right = '' # Text on right side of the cursor
def edit(self):
"""Run edit loop, return the content."""
while True:
wc = self.win.get_wch()
res = self.process_input(wc)
if res == 'enter':
return self.left + self.right
if res == 'escape':
return None
self.draw()
def draw(self):
"""Draw current content, move cursor to current position."""
left, right = self.left, self.right
# If whole content cannot fit, clip it
while len(left) + len(right) > self.width - 1:
if len(left) > self.width // 2:
left = left[1:]
else:
right = right[:-1]
if left != self.left:
left = '<' + left[1:]
if right != self.right:
right = right[:-1] + '>'
# Draw the (possibly clipped) content
self.win.move(0, 0)
self.win.addnstr(left, self.width)
_, curs_x = self.win.getyx()
self.win.addnstr(right, self.width - curs_x)
_, end_x = self.win.getyx()
self.win.hline(' ', self.width - end_x)
self.win.move(0, curs_x)
self.win.refresh()
def process_input(self, wc):
"""Process character obtained from get_wch().
Returns None if not resolved, or string with resolution type:
* 'enter' - input finished, text is valid
* 'escape' - input canceled, text is invalid
"""
if wc == chr(curses.ascii.ESC):
return 'escape'
elif wc == curses.KEY_LEFT:
self.right = self.left[-1:] + self.right
self.left = self.left[:-1]
elif wc == curses.KEY_RIGHT:
self.left = self.left + self.right[:1]
self.right = self.right[1:]
elif wc == curses.KEY_HOME:
self.right = self.left + self.right
self.left = ''
elif wc == curses.KEY_END:
self.left = self.left + self.right
self.right = ''
elif wc == curses.KEY_BACKSPACE or wc == chr(curses.ascii.BS):
self.left = self.left[:-1]
elif wc == curses.KEY_DC or wc == chr(curses.ascii.DEL):
self.right = self.right[1:]
elif wc == chr(curses.ascii.NL):
return 'enter'
elif isinstance(wc, str):
self.left += wc
if __name__ == '__main__':
# Demo
def curses_main(stdscr):
w = 20
stdscr.move(9, 10)
stdscr.hline('-', w)
stdscr.move(11, 10)
stdscr.hline('-', w)
stdscr.move(10, 10)
stdscr.refresh()
field = EditField(stdscr, w)
return field.edit()
result = curses.wrapper(curses_main)
print(result)
|
"""
This file contains the routes (with a basic example on validation) for
the application.
Either extend this OR create a new routes file and include that one.
"""
import json
import sys
from app import app
from app.utils import Validation, Configuration
from app.payloads import ApplicationPayload, AddUserPayload
from app.route_inspector import RouteRenderer, RouteInspector
from app.routes_utils import RoutesBasics
from app.tasks import ApplicationTask, AddUserTask, ListUserTask
from flask import request, abort
from flask.wrappers import Response
from markupsafe import escape
# Additional routes files for /
import app.routes_intro as second_routes
GLOB_CONFIG = Configuration(None)
GLOB_VALIDATION = Validation(GLOB_CONFIG)
@app.route("/", methods = ["GET"])
def default():
"""
Retrieve the information of all routes that are supported
by this server implementation.
"""
ri = RouteInspector()
routes = ri.get_routes([sys.modules[__name__], second_routes])
html_content = RouteRenderer.render_routes(request.host, routes)
return Response(html_content, status=200)
@app.route(
"/subscriptions/<subscription>/providers/Microsoft.AI",
methods = ["GET", "POST"]
)
def application(subscription):
"""
Requires:
?api_version=1.0
bearer token from configuration.json
POST:
Body matches ApplicationPayload class
Content-Type: application/json
"""
global GLOB_VALIDATION
# Can throw 401,400 or 422, otherwise it passed basic validation on
# auth token, version, payload format (not neccesarily payload content)
execute_context = RoutesBasics.request_validation(
GLOB_VALIDATION,
request,
ApplicationPayload,
False
)
execute_context.configuration = GLOB_CONFIG
app_task = ApplicationTask(
execute_context,
subscription=escape(subscription)
)
if request.method.upper() == "POST":
return Response(json.dumps(app_task.execute_post(), indent=4), status=201)
elif request.method.upper() == "GET":
return Response(json.dumps(app_task.execute_get(), indent=4), status=200)
@app.route(
"/users/add",
methods = ["POST"]
)
def add_user():
"""
Requires:
?api_version=1.0
admin bearer token from configuration.json
POST:
Body matches AddUserPayload class
Content-Type: application/json
"""
global GLOB_VALIDATION
# Can throw 401,400 or 422, otherwise it passed basic validation on
# auth token, version, payload format (not neccesarily payload content)
execute_context = RoutesBasics.request_validation(
GLOB_VALIDATION,
request,
AddUserPayload,
True
)
execute_context.configuration = GLOB_CONFIG
app_task = AddUserTask(
execute_context
)
result = app_task.execute_post()
return Response(result.description, status=result.status_code)
@app.route(
"/users/list",
methods = ["GET"]
)
def list_users():
"""
Requires:
?api_version=1.0
any valid bearer token from configuration.json
"""
global GLOB_VALIDATION
# Can throw 401,400 or 422, otherwise it passed basic validation on
# auth token, version, payload format (not neccesarily payload content)
execute_context = RoutesBasics.request_validation(
GLOB_VALIDATION,
request,
None
)
execute_context.configuration = GLOB_CONFIG
app_task = ListUserTask(
execute_context
)
result = app_task.execute_get()
return Response(result.description, status=result.status_code)
|
import random
random.seed(42)
num_nodes = 10000
max_id = 2**32
max_id = num_nodes * 100
num_channels = 4
node_by_id = dict()
nodeids = []
print "use pypy"
def get_closest_node_id(target_id):
for node_id in nodeids:
if node_id > target_id:
return node_id
return nodeids[0]
class Node(object):
def __init__(self, id, num_channels, deposit):
self.id = id
assert num_channels % 2 == 0
self.num_channels = num_channels
self.num_initiated_channels = num_channels / 2
self.deposit = deposit
self.channels = dict() # nodeid => capacity
def __repr__(self):
return '<Node:%d>' % self.id
@property
def targets(self):
"""
connect to closest node larger than self!
"""
distances = [max_id / 2**i for i in range(self.num_initiated_channels)]
return [(self.id + d) % max_id for d in distances]
def initiate_channels(self):
assert self.id in self.targets
for target_id in self.targets:
node_id = get_closest_node_id(target_id)
self.connect(node_id)
def connect(self, other_id):
assert other_id
assert other_id in node_by_id
node = node_by_id[other_id]
# self.channels[node.id] = self.deposit / self.num_channels
self.channels[node.id] = self.deposit
# node.channels[self.id] = node.deposit / node.num_channels
def transfer(self, transfer):
"""
try to transfer along a channel with a node that has a lower id than target.
closest node first
"""
# print 'in transfer', self, transfer.receiver
if self in transfer.tried:
return False
transfer.tried.append(self)
transfer.path.append(self)
# sort connections by distance to target
target_id = transfer.receiver
if target_id == self.id:
return True
def _distance(cid):
d = target_id - cid
if d < 0:
d += max_id
return d
res = False
channels = sorted(self.channels.keys(), lambda a, b: cmp(_distance(a), _distance(b)))
# print target_id, channels
for cid in channels:
if cid > target_id:
if len(transfer.path) > 1: # not first
# print 'breaking'
break
capacity = self.channels[cid]
# print cid, capacity, transfer.amount
if capacity < transfer.amount:
continue
node = node_by_id[cid]
try:
res = node.transfer(transfer)
except RuntimeError:
continue
if res:
break
if not res:
transfer.path.pop()
return False
return True
class Transfer(object):
def __init__(self, sender, receiver, amount):
self.sender = sender
self.receiver = receiver
self.amount = amount
self.tried = []
self.path = []
self.success = False
# print self
def __repr__(self):
return '<Transfer v=%d t=%s>' % (self.amount, self.receiver)
deposit_distribution = [100 * 2**i for i in range(5)]
print deposit_distribution
print 'setting up nodes'
for i in range(num_nodes):
node_id = random.randrange(max_id)
deposit = random.choice(deposit_distribution)
node = Node(node_id, num_channels, deposit)
node_by_id[node.id] = node
nodeids = sorted(node_by_id.keys())
print 'setting up channels'
for node in node_by_id.values():
node.initiate_channels()
num_edges = sum([len(n.channels) for n in node_by_id.values()]) / 2
print 'num_nodes', len(nodeids)
print 'num_edges', num_edges
# dump some nodes and their channels
# for nodeid in nodeids[:4]:
# node = node_by_id[nodeid]
# print node, sorted(node.channels.keys()), node.targets
def rand_transfer(amount):
sender = random.choice(nodeids)
receiver = sender
while receiver == sender:
receiver = random.choice(nodeids)
t = Transfer(sender, receiver, amount)
res = node_by_id[sender].transfer(t)
t.success = res
return t
for value in deposit_distribution:
transfers = []
value /= 2
for i in range(100):
t = rand_transfer(value)
transfers.append(t)
avg_path_len = sum([len(t.path) for t in transfers]) / float(len(transfers))
avg_tried_len = sum([len(t.tried) for t in transfers]) / float(len(transfers))
median_tried_len = sorted([len(t.tried) for t in transfers])[len(transfers) / 2]
max_tried_len = max([len(t.tried) for t in transfers])
num_successful = sum(1 for t in transfers if t.success)
print 'value', value, deposit_distribution
print 'avg_path_len', avg_path_len
print 'avg_tried_len', avg_tried_len, median_tried_len, max_tried_len
print 'num_successful', num_successful, num_successful / float(len(transfers))
|
# from django.db import models # silence pyflakes
# Create your models here.
|
import json
from copy import copy
import dateutil.parser
from mythx_models.response import Analysis, AnalysisStatus
from . import common as testdata
def assert_analysis(analysis):
assert analysis.uuid == testdata.UUID_1
assert analysis.api_version == testdata.API_VERSION_1
assert analysis.maru_version == testdata.MARU_VERSION_1
assert analysis.mythril_version == testdata.MYTHRIL_VERSION_1
assert analysis.harvey_version == testdata.HARVEY_VERSION_1
assert analysis.queue_time == testdata.QUEUE_TIME_1
assert analysis.run_time == 0 # default value
assert analysis.status == AnalysisStatus(testdata.STATUS_1)
assert analysis.submitted_at == dateutil.parser.parse(testdata.SUBMITTED_AT_1)
assert analysis.submitted_by == testdata.SUBMITTED_BY_1
def test_analysis_from_valid_json():
analysis = Analysis.from_json(json.dumps(testdata.ANALYSIS_DICT))
assert_analysis(analysis)
def test_analysis_to_json():
assert json.loads(testdata.ANALYSIS_OBJECT.to_json()) == testdata.ANALYSIS_DICT
def test_analysis_to_dict():
assert testdata.ANALYSIS_OBJECT.to_dict() == testdata.ANALYSIS_DICT
def test_analysis_propagate_error_field():
analysis = copy(testdata.ANALYSIS_OBJECT)
# add optional error field
analysis.error = testdata.ERROR
analysis_dict = analysis.to_dict()
analysis_dict["error"] == testdata.ERROR
def test_analysis_from_valid_dict():
analysis = Analysis.from_dict(testdata.ANALYSIS_DICT)
assert_analysis(analysis)
def test_repr():
analysis_repr = repr(testdata.ANALYSIS_OBJECT)
assert testdata.ANALYSIS_OBJECT.uuid in analysis_repr
testdata.ANALYSIS_OBJECT.status in analysis_repr
|
def validate_thing_amount(user_input):
try:
user_input = int(user_input)
except:
return False
if user_input in range(1, 5):
return user_input
def validate_month_amount(user_input):
try:
user_input = int(user_input)
except:
return False
if user_input in range(1, 7):
return user_input
def validate_weeks_amount(user_input):
try:
user_input = int(user_input)
except:
return False
if user_input in range(1, 5):
return user_input
def validate_size_cell(user_input):
try:
user_input = int(user_input)
except:
return False
if user_input in range(1, 11):
return user_input
def validate_cell_period(user_input):
try:
user_input = int(user_input)
except:
return False
if user_input in range(1, 13):
return user_input
|
# %%
cd ..
# %%
from tptp_features.tptp_v7_0_0_0Lexer import tptp_v7_0_0_0Lexer
from tptp_features.tptp_v7_0_0_0Parser import tptp_v7_0_0_0Parser
from tptp_features.tptp_v7_0_0_0Listener import tptp_v7_0_0_0Listener
from antlr4 import FileStream, CommonTokenStream, ParseTreeWalker
from pprint import pprint
#%%
from tptp_features.Tptp import Tptp
tptp = Tptp("../tptp-parser/")
# %%
import random
random.seed()
problems = list(tptp.get_problems({'SPC': 'FOF_.*'}))
random.shuffle(problems)
#%%
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# %%
class Listener(tptp_v7_0_0_0Listener):
def __init__(self):
super().__init__()
self.includes = []
self.first = True
def enterEveryRule(self, ctx):
if not hasattr(ctx, 'negated_env'):
if ctx.parentCtx is None:
ctx.negated_env = False
else:
ctx.negated_env = ctx.parentCtx.negated_env
def enterFof_binary_nonassoc(self, ctx):
if ctx.binary_connective().Impl():
logging.debug(f"Flipping context {ctx.negated_env} {ctx.fof_unitary_formula(0).getText()}")
ctx.fof_unitary_formula(0).negated_env = not ctx.negated_env
def enterInclude(self, ctx):
fname = ctx.file_name().Single_quoted().getText().strip("'")
if ctx.formula_selection():
formulas = tuple((c.getText() for c in ctx.formula_selection().name_list().name()))
else:
formulas = None
self.includes.append((fname, formulas))
# %%
from tptp_features.strategic_features import QuantifierFeaturesListener
# %%
def parse_one(problem):
lexer = tptp_v7_0_0_0Lexer(FileStream(problem.file))
stream = CommonTokenStream(lexer)
parser = tptp_v7_0_0_0Parser(stream)
tree = parser.tptp_file()
listener = QuantifierFeaturesListener()
walker = ParseTreeWalker()
walker.walk(listener, tree)
return listener
# %%
l = parse_one(tptp.problems['CMX001'])
# %%
l = parse_one(tptp.axioms['SET005+0'])
# %%
l = parse_one(tptp.axioms['SET006+0'])
# %%
l = parse_one(tptp.problems['SYN000+1'])
# %%
class TimeoutException(Exception):
pass
def parse_problem_timeout(p, timeout):
import signal
def signal_handler(signum, frame):
raise TimeoutSignalException('TIMEOUT')
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(timeout)
return parse_problem(p)
def parse_problem(p):
listener = parse_one(p)
return listener.includes
# %%
l = parse_problem_timeout(problems[3], 3)
# %%
for c, p in enumerate(problems):
if c % 100 == 0:
print()
try:
formulasel, includes = parse_problem_timeout(p, 1)
if formulasel:
print()
print(p.name, includes, flush=True)
else:
print('.', end='')
except Exception as e:
print('*', end='')
# %%
from concurrent import futures
with futures.ProcessPoolExecutor(max_workers=4) as executor:
future_map = {executor.submit(parse_problem, p): p for p in problems}
for future in futures.as_completed(future_map, timeout=30):
p = future_map[future]
try:
formula_selection, includes = future.result()
print(p, includes, formula_selection)
except Exception as e:
print(p, e)
# %%
p = tptp.problems['KRS180+1']
print(p.name)
lexer = tptp_v7_0_0_0Lexer(FileStream(p.file))
listener = parse(lexer)
print(listener.includes)
print(type(listener.includes[0][0]))
print(dir(listener.includes[0][0]))
# %%
c = listener.includes[0][0]
c.getChild(1)
print(c.symbol)
# %%
from pathlib import Path
a = tptp.find_by_name('Axioms/KRS001+1.ax')
print(a)
# %%
|
#======================================================#
# + Projet: How to use API Rest with Python and Flask #
# + Author: Pedro Impulcetto, Adapted: Thiago Piovesan #
# + API documentation using Swagger:
# YouTube video: https://youtu.be/levz4eumJ98
#======================================================#
# Libraries importation:
from flask_restx import fields
from src.server.instance import server
#======================================================#
book = server.api.model('Book', {
'id': fields.Integer(readOnly=True, description='The unique identifier of a book'),
'title': fields.String(required=True, description='The title of a book', min_length=3, max_length=100),
'author': fields.String(required=True, description='The author of a book', min_length=3, max_length=100),
})
|
import typing
def main() -> typing.NoReturn:
n, x = map(int, input().split())
# dp
a = list(map(int, input().split()))
dp = [0] * n
dp[0] = x
mn = x
for i in range(n - 1):
p = a[i + 1] // a[i]
dp[i + 1], dp[i] = divmod(dp[i], p)
assert p > dp[i]
if p - dp[i] + 1 <= dp[i]:
dp[i + 1] += 1
dp[i] = p - dp[i]
mn = min(mn, sum(dp))
print(mn)
main()
|
from .aBuffer import ABuffer, AMappable
from ...constants import _GL_TYPE_NP_TYPE_MAP, _GL_TYPE_SIZE_MAP
from OpenGL import GL
import numpy as np
class StaticBuffer(ABuffer):
_BufferUsageFlags = 0
def __init__(self, data: list, dataType: GL.GLenum):
super().__init__(len(data) * _GL_TYPE_SIZE_MAP[dataType], dataType)
GL.glNamedBufferStorage(self._id, self._size, np.asarray(data, dtype=_GL_TYPE_NP_TYPE_MAP[dataType]), self._BufferUsageFlags)
class Buffer(ABuffer):
_BufferUsageFlags = GL.GL_DYNAMIC_STORAGE_BIT
def __init__(self, size: int, dataStorageView: memoryview):
super().__init__(size, None)
self._dataView = dataStorageView
GL.glNamedBufferStorage(self._id, self._size, None, self._BufferUsageFlags)
def TransferData(self, size: int) -> None:
"""
Transfer given amount of data from bound data storage
to buffer's memory.
"""
GL.glNamedBufferSubData(self._id, 0, size, self._dataView.obj)
def __del__(self):
self._dataView.release()
class MappedBuffer(AMappable):
def __init__(self, size: int):
super().__init__(size, None)
GL.glNamedBufferStorage(self._id, self._size, None, self._BufferUsageFlags)
self._MapPersistent()
|
"""END MODEL
To better evaluate the quality of our generated training data,
we evaluate an end model trained on this data.
Here, the end model is implemented as a logistic regression bag of words model.
However, you can replace this with any model of your choosing, as long as it has
functions "fit" and "predict" with the specifications outlined below.
"""
import tensorflow as tf
from numpy.random import seed as np_seed
from random import seed as py_seed
from snorkel.utils import set_seed as snork_seed
from snorkel.utils import preds_to_probs
from sklearn.feature_extraction.text import CountVectorizer
def get_keras_logreg(input_dim, output_dim=2):
"""Create a simple logistic regression model (using keras)
"""
model = tf.keras.Sequential()
if output_dim == 1:
loss = "binary_crossentropy"
activation = tf.nn.sigmoid
else:
loss = "categorical_crossentropy"
activation = tf.nn.softmax
dense = tf.keras.layers.Dense(
units=output_dim,
input_dim=input_dim,
activation=activation,
kernel_regularizer=tf.keras.regularizers.l2(0.001),
)
model.add(dense)
opt = tf.keras.optimizers.Adam(lr=0.01)
model.compile(optimizer=opt, loss=loss, metrics=["accuracy"])
return model
def get_keras_early_stopping(patience=10):
"""Create early stopping condition
"""
return tf.keras.callbacks.EarlyStopping(
monitor="val_accuracy", patience=10, verbose=1, restore_best_weights=True
)
class KerasLogReg:
"""This logistic regression model is trained on the labels that Ruler generates, and then evaluated against a test set.
This provides a more complete picture of the quality of the generated training data.
Attributes:
cardinality (int): Number of output classes
keras_model (a Keras logistic regression model): Description
vectorizer (CountVectorizer): Object with fit and transform functions, which transforms texts into vectors
"""
def __init__(self, cardinality=2):
"""Summary
Args:
cardinality (int, optional): Number of output classes
"""
# Set all random seeds
snork_seed(123)
tf.random.set_seed(123)
np_seed(123)
py_seed(123)
self.cardinality = cardinality
self.keras_model = None
def fit(self, X_train, Y_train, X_valid, Y_valid):
"""Train the model using the given training and validation data.
Args:
X_train (list(str)): Training text examples, length n
Y_train (matrix): Training labels, size n*m, where m is the cardinality
X_valid (list(str)): Validation test examples, length p
Y_valid (matrix): Validation labels, size p*m
"""
if self.keras_model is None:
self.vectorizer = CountVectorizer(ngram_range=(1, 2))
self.vectorizer.fit(X_train)
X_train = self.vectorizer.transform(X_train)
X_valid = self.vectorizer.transform(X_valid)
if self.keras_model is None:
self.keras_model = get_keras_logreg(input_dim=X_train.shape[1], output_dim=self.cardinality)
self.keras_model.fit(
x=X_train,
y=Y_train,
validation_data=(X_valid, Y_valid),
callbacks=[get_keras_early_stopping()],
epochs=20,
verbose=0,
)
def predict(self, X):
"""Predict probabilities that each sample in X belongs to each class.
Args:
X (list(str)): Texts to predict class, length n
Returns:
matrix: size n*m, where m is the cardinality of the model
"""
X_v = self.vectorizer.transform(X)
return self.keras_model.predict(x=X_v)
|
"""Collection module."""
from .catalog import Catalog
from .item import ItemCollection
from .utils import Utils
class Extent(dict):
"""The Extent object."""
def __init__(self, data):
"""Initialize instance with dictionary data.
:param data: Dict with Extent metadata.
"""
super(Extent, self).__init__(data or {})
@property
def spatial(self):
""":return: the spatial extent."""
return SpatialExtent(self['spatial'])
@property
def temporal(self):
""":return: the temporal extent."""
return TemporalExtent(self['temporal'])
class SpatialExtent(dict):
"""The Spatial Extent object."""
def __init__(self, data):
"""Initialize instance with dictionary data.
:param data: Dict with Spatial Extent metadata.
"""
super(SpatialExtent, self).__init__(data or {})
@property
def bbox(self):
""":return: the bbox of the Spatial Extent."""
return self['bbox']
class TemporalExtent(dict):
"""The Temporal Extent object."""
def __init__(self, data):
"""Initialize instance with dictionary data.
:param data: Dict with Temporal Extent metadata.
"""
super(TemporalExtent, self).__init__(data or {})
@property
def interval(self):
""":return: the interval of the Temporal Extent."""
return self['interval']
class Provider(dict):
"""The Provider Object."""
def __init__(self, data):
"""Initialize instance with dictionary data.
:param data: Dict with Provider metadata.
"""
super(Provider, self).__init__(data or {})
@property
def name(self):
""":return: the Provider name."""
return self['name']
@property
def description(self):
""":return: the Provider description."""
return self['description']
@property
def roles(self):
""":return: the Provider roles."""
return self['description']
@property
def url(self):
""":return: the Provider url."""
return self['url']
class Collection(Catalog):
"""The STAC Collection."""
def __init__(self, data):
"""Initialize instance with dictionary data.
:param data: Dict with collection metadata.
"""
super(Collection, self).__init__(data or {})
@property
def keywords(self):
""":return: the Collection list of keywords."""
return self['keywords'] if 'keywords' in self else None
@property
def version(self):
""":return: the Collection version."""
return self['version'] if 'version' in self else None
@property
def license(self):
""":return: the Collection license."""
return self['license']
@property
def providers(self):
""":return: the Collection list of providers."""
return [Provider(provider) for provider in self['providers']]
@property
def extent(self):
""":return: the Collection extent."""
return Extent(self['extent'])
@property
def properties(self):
""":return: the Collection properties."""
return self['properties'] if 'properties' in self else None
def get_items(self, item_id=None, filter=None):
""":return: A GeoJSON FeatureCollection of STAC Items from the collection."""
for link in self['links']:
if link['rel'] == 'items':
if item_id is not None:
data = Utils.get(f'{link["href"]}/{item_id}')
return Item(data)
data = Utils._get(link['href'], params=filter)
return ItemCollection(data)
return ItemCollection({})
|
class Solution:
def hasGroupsSizeX(self, deck):
helper = {}
for card in deck:
if helper.__contains__(card):
helper[card] += 1
else:
helper[card] = 1
i = 2
while i <= helper[card]:
status = True
for j in helper:
if helper[j] % i != 0:
status = False
break
if status is True:
return True
i += 1
return False
slu = Solution()
print(slu.hasGroupsSizeX([1, 2, 3, 4, 4, 3, 2, 1, 1]))
|
import numpy as np
import torch
from torch import nn
class regressor_fcn_bn_32_b2h(nn.Module):
def __init__(self):
super(regressor_fcn_bn_32_b2h, self).__init__()
def build_net(self, feature_in_dim, feature_out_dim, require_image=False, default_size=256):
self.require_image = require_image
self.default_size = default_size
self.use_resnet = True
embed_size = default_size
if self.require_image:
embed_size += default_size
if self.use_resnet:
self.image_resnet_postprocess = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(1000*2, default_size), # 1000 is the size of ResNet50's embeddings (2 hands)
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(default_size, momentum=0.01),
)
self.image_reduce = nn.Sequential(
nn.MaxPool1d(kernel_size=2, stride=2),
)
self.encoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(feature_in_dim,256,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(256),
nn.MaxPool1d(kernel_size=2, stride=2),
)
self.conv5 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv6 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv7 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
# self.conv8 = nn.Sequential(
# nn.Dropout(0.5),
# nn.Conv1d(embed_size,embed_size,3,padding=1),
# nn.LeakyReLU(0.2, True),
# nn.BatchNorm1d(embed_size),
# )
# self.conv9 = nn.Sequential(
# nn.Dropout(0.5),
# nn.Conv1d(embed_size,embed_size,3,padding=1),
# nn.LeakyReLU(0.2, True),
# nn.BatchNorm1d(embed_size),
# )
# self.conv10 = nn.Sequential(
# nn.Dropout(0.5),
# nn.Conv1d(embed_size,embed_size,3,padding=1),
# nn.LeakyReLU(0.2, True),
# nn.BatchNorm1d(embed_size),
# )
# self.skip1 = nn.Sequential(
# nn.Dropout(0.5),
# nn.Conv1d(embed_size,embed_size,3,padding=1),
# nn.LeakyReLU(0.2, True),
# nn.BatchNorm1d(embed_size),
# )
# self.skip2 = nn.Sequential(
# nn.Dropout(0.5),
# nn.Conv1d(embed_size,embed_size,3,padding=1),
# nn.LeakyReLU(0.2, True),
# nn.BatchNorm1d(embed_size),
# )
self.skip4 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.skip5 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.decoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
nn.Dropout(0.5),
nn.ConvTranspose1d(embed_size, feature_out_dim, 7, stride=2, padding=3, output_padding=1),
nn.ReLU(True),
nn.BatchNorm1d(feature_out_dim),
nn.Dropout(0.5),
nn.Conv1d(feature_out_dim, feature_out_dim, 7, padding=3),
)
## create image embedding
def process_image(self, image_):
B, T, _ = image_.shape
image_ = image_.view(-1, 1000*2)
feat = self.image_resnet_postprocess(image_)
feat = feat.view(B, T, self.default_size)
feat = feat.permute(0, 2, 1).contiguous()
feat = self.image_reduce(feat)
return feat
## utility upsampling function
def upsample(self, tensor, shape):
return tensor.repeat_interleave(2, dim=2)[:,:,:shape[2]]
## forward pass through generator
def forward(self, input_, audio_=None, percent_rand_=0.7, feats_=None):
B, T = input_.shape[0], input_.shape[2]
fourth_block = self.encoder(input_)
if self.require_image:
feat = self.process_image(feats_)
fourth_block = torch.cat((fourth_block, feat), dim=1)
fifth_block = self.conv5(fourth_block)
sixth_block = self.conv6(fifth_block)
seventh_block = self.conv7(sixth_block)
# eighth_block = self.conv8(seventh_block)
# ninth_block = self.conv9(eighth_block)
# tenth_block = self.conv10(ninth_block)
# ninth_block = tenth_block + ninth_block
# ninth_block = self.skip1(ninth_block)
# eighth_block = ninth_block + eighth_block
# eighth_block = self.skip2(eighth_block)
sixth_block = self.upsample(seventh_block, sixth_block.shape) + sixth_block
sixth_block = self.skip4(sixth_block)
fifth_block = sixth_block + fifth_block
fifth_block = self.skip5(fifth_block)
output = self.decoder(fifth_block)
return output
class regressor_fcn_bn_32(nn.Module):
def __init__(self):
super(regressor_fcn_bn_32, self).__init__()
def build_net(self, feature_in_dim, feature_out_dim, require_text=None, default_size=256):
self.require_text = require_text
self.default_size = default_size
embed_size_encoder = default_size
embed_size = default_size
if self.require_text:
embed_size += default_size
self.text_embeds_postprocess = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(512, default_size), # 512 is the size of CLIP's text embeddings
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(default_size, momentum=0.01),
)
self.text_reduce = nn.Sequential(
nn.MaxPool1d(kernel_size=2, stride=2),
)
self.encoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(feature_in_dim,embed_size_encoder,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size_encoder),
nn.MaxPool1d(kernel_size=2, stride=2),
)
self.conv5 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv6 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.conv7 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
# self.conv8 = nn.Sequential(
# nn.Dropout(0.5),
# nn.Conv1d(embed_size,embed_size,3,padding=1),
# nn.LeakyReLU(0.2, True),
# nn.BatchNorm1d(embed_size),
# )
# self.conv9 = nn.Sequential(
# nn.Dropout(0.5),
# nn.Conv1d(embed_size,embed_size,3,padding=1),
# nn.LeakyReLU(0.2, True),
# nn.BatchNorm1d(embed_size),
# )
# self.conv10 = nn.Sequential(
# nn.Dropout(0.5),
# nn.Conv1d(embed_size,embed_size,3,padding=1),
# nn.LeakyReLU(0.2, True),
# nn.BatchNorm1d(embed_size),
# )
# self.skip1 = nn.Sequential(
# nn.Dropout(0.5),
# nn.Conv1d(embed_size,embed_size,3,padding=1),
# nn.LeakyReLU(0.2, True),
# nn.BatchNorm1d(embed_size),
# )
# self.skip2 = nn.Sequential(
# nn.Dropout(0.5),
# nn.Conv1d(embed_size,embed_size,3,padding=1),
# nn.LeakyReLU(0.2, True),
# nn.BatchNorm1d(embed_size),
# )
self.skip4 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.skip5 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
)
self.decoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(embed_size,embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(embed_size),
nn.Dropout(0.5),
nn.ConvTranspose1d(embed_size, feature_out_dim, 7, stride=2, padding=3, output_padding=1),
nn.ReLU(True),
nn.BatchNorm1d(feature_out_dim),
nn.Dropout(0.5),
nn.Conv1d(feature_out_dim, feature_out_dim, 7, padding=3),
)
## create text embedding
def process_text(self, text_, T): # "v1"
text_ = text_.unsqueeze(1).repeat(1, T, 1)
B, _, E = text_.shape
text_ = text_.view(-1, E)
feat = self.text_embeds_postprocess(text_)
feat = feat.view(B, T, self.default_size)
feat = feat.permute(0, 2, 1).contiguous()
feat = self.text_reduce(feat)
return feat
## utility upsampling function
def upsample(self, tensor, shape):
return tensor.repeat_interleave(2, dim=2)[:,:,:shape[2]]
## forward pass through generator
def forward(self, input_, audio_=None, percent_rand_=0.7, feats_=None):
B, T = input_.shape[0], input_.shape[2]
# print(f"input_.shape: {input_.shape}")
fourth_block = self.encoder(input_)
if self.require_text: # "v1"
# print(text_.shape)
feat = self.process_text(feats_, T)
fourth_block = torch.cat((fourth_block, feat), dim=1)
fifth_block = self.conv5(fourth_block)
sixth_block = self.conv6(fifth_block)
seventh_block = self.conv7(sixth_block)
# eighth_block = self.conv8(seventh_block)
# ninth_block = self.conv9(eighth_block)
# tenth_block = self.conv10(ninth_block)
# ninth_block = tenth_block + ninth_block
# ninth_block = self.skip1(ninth_block)
# eighth_block = ninth_block + eighth_block
# eighth_block = self.skip2(eighth_block)
sixth_block = self.upsample(seventh_block, sixth_block.shape) + sixth_block
sixth_block = self.skip4(sixth_block)
fifth_block = sixth_block + fifth_block
fifth_block = self.skip5(fifth_block)
output = self.decoder(fifth_block)
return output
class regressor_fcn_bn_32_v2(nn.Module):
def __init__(self):
super(regressor_fcn_bn_32_v2, self).__init__()
def build_net(self, feature_in_dim, feature_out_dim, require_text=None, default_size=256):
self.require_text = require_text
self.default_size = default_size
self.embed_size = default_size
if self.require_text:
self.embed_size += default_size
self.text_embeds_postprocess = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(512, self.embed_size), # 512 is the size of CLIP's text embeddings
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size, momentum=0.01),
)
self.encoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(feature_in_dim,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
nn.MaxPool1d(kernel_size=2, stride=2),
)
self.conv5 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.conv6 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.conv7 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.skip4 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.skip5 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.decoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
nn.Dropout(0.5),
nn.ConvTranspose1d(self.embed_size, feature_out_dim, 7, stride=2, padding=3, output_padding=1),
nn.ReLU(True),
nn.BatchNorm1d(feature_out_dim),
nn.Dropout(0.5),
nn.Conv1d(feature_out_dim, feature_out_dim, 7, padding=3),
)
## create text embedding
def process_text(self, feats_):
feats_ = feats_.unsqueeze(1)
B, TT, E = feats_.shape
feats_ = feats_.view(-1, E)
feat = self.text_embeds_postprocess(feats_)
feat = feat.view(B, TT, self.embed_size) # TT should == 1
feat = feat.permute(0, 2, 1).contiguous()
return feat
## utility upsampling function
def upsample(self, tensor, shape):
return tensor.repeat_interleave(2, dim=2)[:,:,:shape[2]]
## forward pass through generator
def forward(self, input_, audio_=None, percent_rand_=0.7, feats_=None):
B, T = input_.shape[0], input_.shape[2]
fourth_block = self.encoder(input_)
fifth_block = self.conv5(fourth_block)
sixth_block = self.conv6(fifth_block)
seventh_block = self.conv7(sixth_block)
if self.require_text:
feat = self.process_text(feats_)
seventh_block = torch.cat((seventh_block, feat), dim=2)
sixth_block = self.upsample(seventh_block, sixth_block.shape) + sixth_block
sixth_block = self.skip4(sixth_block)
fifth_block = sixth_block + fifth_block
fifth_block = self.skip5(fifth_block)
output = self.decoder(fifth_block)
return output
class regressor_fcn_bn_32_v4(nn.Module):
def __init__(self):
super(regressor_fcn_bn_32_v4, self).__init__()
def build_net(self, feature_in_dim, feature_out_dim, require_text=None, default_size=256):
self.require_text = require_text
self.default_size = default_size
self.embed_size = default_size
if self.require_text:
self.embed_size += default_size
self.text_embeds_postprocess = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(512, self.embed_size//2), # 512 is the size of CLIP's text embeddings
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size//2, momentum=0.01),
)
self.encoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(feature_in_dim,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
nn.MaxPool1d(kernel_size=2, stride=2),
)
self.conv5 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.conv6 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.conv7 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size//(1+self.require_text),5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size//(1+self.require_text)),
)
self.skip4 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.skip5 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.decoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
nn.Dropout(0.5),
nn.ConvTranspose1d(self.embed_size, feature_out_dim, 7, stride=2, padding=3, output_padding=1),
nn.ReLU(True),
nn.BatchNorm1d(feature_out_dim),
nn.Dropout(0.5),
nn.Conv1d(feature_out_dim, feature_out_dim, 7, padding=3),
)
## create text embedding
def process_text(self, feats_, T):
feats_ = feats_.unsqueeze(1).repeat(1, T, 1)
B, _, E = feats_.shape
feats_ = feats_.view(-1, E)
feat = self.text_embeds_postprocess(feats_)
feat = feat.view(B, T, -1)
feat = feat.permute(0, 2, 1).contiguous()
return feat
## utility upsampling function
def upsample(self, tensor, shape):
return tensor.repeat_interleave(2, dim=2)[:,:,:shape[2]]
## forward pass through generator
def forward(self, input_, audio_=None, percent_rand_=0.7, feats_=None):
B, T = input_.shape[0], input_.shape[2]
fourth_block = self.encoder(input_)
fifth_block = self.conv5(fourth_block)
sixth_block = self.conv6(fifth_block)
seventh_block = self.conv7(sixth_block)
if self.require_text:
T = seventh_block.shape[2]
feat = self.process_text(feats_, T)
seventh_block = torch.cat((seventh_block, feat), dim=1)
sixth_block = self.upsample(seventh_block, sixth_block.shape) + sixth_block
sixth_block = self.skip4(sixth_block)
fifth_block = sixth_block + fifth_block
fifth_block = self.skip5(fifth_block)
output = self.decoder(fifth_block)
return output
class regressor_fcn_bn_32_v4_deeper(nn.Module):
def __init__(self):
super(regressor_fcn_bn_32_v4_deeper, self).__init__()
def build_net(self, feature_in_dim, feature_out_dim, require_text=None, default_size=256):
self.require_text = require_text
self.default_size = default_size
self.embed_size = default_size
if self.require_text:
self.embed_size += default_size
self.text_embeds_postprocess = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(512, self.embed_size//2), # 512 is the size of CLIP's text embeddings
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size//2, momentum=0.01),
)
self.encoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(feature_in_dim,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
nn.MaxPool1d(kernel_size=2, stride=2),
)
self.conv5 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.conv6 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.conv7 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.conv8 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.conv9 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size//(1+self.require_text),3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size//(1+self.require_text)),
)
self.conv10 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size//(1+self.require_text),self.embed_size//(1+self.require_text),3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size//(1+self.require_text)),
)
self.skip1 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.skip2 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.skip3 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.skip4 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.decoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
nn.Dropout(0.5),
nn.ConvTranspose1d(self.embed_size, feature_out_dim, 7, stride=2, padding=3, output_padding=1),
nn.ReLU(True),
nn.BatchNorm1d(feature_out_dim),
nn.Dropout(0.5),
nn.Conv1d(feature_out_dim, feature_out_dim, 7, padding=3),
)
## create text embedding
def process_text(self, text_, T):
text_ = text_.unsqueeze(1).repeat(1, T, 1)
B, _, E = text_.shape
text_ = text_.view(-1, E)
feat = self.text_embeds_postprocess(text_)
feat = feat.view(B, T, -1)
feat = feat.permute(0, 2, 1).contiguous()
return feat
## utility upsampling function
def upsample(self, tensor, shape):
return tensor.repeat_interleave(2, dim=2)[:,:,:shape[2]]
## forward pass through generator
def forward(self, input_, audio_=None, percent_rand_=0.7, feats_=None):
fourth_block = self.encoder(input_)
fifth_block = self.conv5(fourth_block)
sixth_block = self.conv6(fifth_block)
seventh_block = self.conv7(sixth_block)
eighth_block = self.conv8(seventh_block)
ninth_block = self.conv9(eighth_block)
tenth_block = self.conv10(ninth_block)
ninth_block = tenth_block + ninth_block
if self.require_text:
T = ninth_block.shape[2]
feat = self.process_text(feats_, T)
ninth_block = torch.cat((ninth_block, feat), dim=1)
ninth_block = self.skip1(ninth_block)
eighth_block = ninth_block + eighth_block
eighth_block = self.skip2(eighth_block)
sixth_block = self.upsample(seventh_block, sixth_block.shape) + sixth_block
sixth_block = self.skip3(sixth_block)
fifth_block = sixth_block + fifth_block
fifth_block = self.skip4(fifth_block)
output = self.decoder(fifth_block)
return output
class decoder_embed2pose(nn.Module):
def __init__(self):
super(decoder_embed2pose, self).__init__()
def build_net(self, feature_in_dim, feature_out_dim, feature_out_len,require_text=None, default_size=256):
self.require_text = require_text
self.default_size = default_size
self.use_embeds = True
self.conv1 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.conv2 = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
)
self.decoder = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(self.embed_size,self.embed_size,3,padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(self.embed_size),
nn.Dropout(0.5),
nn.ConvTranspose1d(self.embed_size, feature_out_dim, 7, stride=2, padding=3, output_padding=1),
nn.ReLU(True),
nn.BatchNorm1d(feature_out_dim),
nn.Dropout(0.5),
nn.Conv1d(feature_out_dim, feature_out_dim, 7, padding=3),
)
## utility upsampling function
def upsample(self, tensor, shape):
return tensor.repeat_interleave(2, dim=2)[:,:,:shape[2]]
## forward pass through generator
def forward(self, input_, audio_=None, percent_rand_=0.7, feats_=None):
B, T = input_.shape[0], input_.shape[2]
# print(f"input_.shape: {input_.shape}")
output = None
return output
class regressor_fcn_bn_discriminator(nn.Module):
def __init__(self):
super(regressor_fcn_bn_discriminator, self).__init__()
def build_net(self, feature_in_dim):
self.convs = nn.Sequential(
nn.Dropout(0.5),
nn.Conv1d(feature_in_dim,64,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(64),
## 64
nn.Dropout(0.5),
nn.Conv1d(64,64,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(64),
## 32
nn.Dropout(0.5),
nn.Conv1d(64,32,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(32),
## 16
nn.Dropout(0.5),
nn.Conv1d(32,32,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(32),
## 8
nn.Dropout(0.5),
nn.Conv1d(32,16,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(16),
## 4
nn.Dropout(0.5),
nn.Conv1d(16,16,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(16),
## 2
nn.Dropout(0.5),
nn.Conv1d(16,8,5,stride=2,padding=2),
nn.LeakyReLU(0.2, True),
nn.BatchNorm1d(8),
## 1
nn.Dropout(0.5),
nn.Conv1d(8,1,3,padding=1),
)
def forward(self, input_):
outputs = self.convs(input_)
return outputs
|
from qiskit import *
from qiskit.chemistry.drivers import UnitsType, HFMethodType, PySCFDriver
from qiskit.chemistry.core import Hamiltonian, TransformationType, QubitMappingType
from qiskit.chemistry.components.initial_states import HartreeFock
from qiskit.aqua import QuantumInstance,aqua_globals
from qiskit.aqua import QuantumInstance,aqua_globals
from qiskit.aqua.operators import Z2Symmetries
from qiskit.aqua.algorithms.classical import ExactEigensolver
import numpy as np
import functools
from qiskit.providers.aer.noise import NoiseModel
from qiskit.ignis.mitigation.measurement import (complete_meas_cal,tensored_meas_cal,CompleteMeasFitter,TensoredMeasFitter)
import sys
from subroutines import *
driver = PySCFDriver(atom='''H 0.0000 0.0000 0.0000; H 0.0000 0.0000 0.742''',
unit=UnitsType.ANGSTROM,charge=0,spin=0,basis='sto-6g',hf_method=HFMethodType.RHF)
molecule = driver.run()
#Now the Hamiltonian
core = Hamiltonian(transformation=TransformationType.FULL,qubit_mapping=QubitMappingType.PARITY,two_qubit_reduction=True,freeze_core=False,orbital_reduction=[0,1,2,5,6])
H_op,A_op = core.run(molecule)
dE = core._energy_shift + core._ph_energy_shift + core._nuclear_repulsion_energy
A_op = A_op[:3]
dA = [0]*len(A_op)
## Initial state + variational ansatze to construct the circuit
init_state = HartreeFock(num_qubits=H_op.num_qubits,num_orbitals=core._molecule_info['num_orbitals'],
qubit_mapping=core._qubit_mapping,two_qubit_reduction=core._two_qubit_reduction,
num_particles=core._molecule_info['num_particles'])
num_qubits = H_op.num_qubits
num_parameters = 6*(num_qubits-1)
def generate_circuit(parameters):
circuit = init_state.construct_circuit()
num_qubits = H_op.num_qubits
circuit.barrier()
p0 = 0
for qubit1 in range(0,num_qubits-1,2):
qubit2 = qubit1+1
circuit.s(qubit1)
circuit.s(qubit2)
circuit.h(qubit2)
circuit.cx(qubit2,qubit1)
circuit.u3(parameters[p0+0],parameters[p0+1],parameters[p0+2],qubit1)
circuit.u3(parameters[p0+3],parameters[p0+4],parameters[p0+5],qubit2); p0 += 6
circuit.cx(qubit2,qubit1)
circuit.h(qubit2)
circuit.sdg(qubit1)
circuit.sdg(qubit2)
circuit.barrier()
for qubit1 in range(1,num_qubits-1,2):
qubit2 = qubit1+1
circuit.s(qubit1)
circuit.s(qubit2)
circuit.h(qubit2)
circuit.cx(qubit2,qubit1)
circuit.u3(parameters[p0+0],parameters[p0+1],parameters[p0+2],qubit1)
circuit.u3(parameters[p0+3],parameters[p0+4],parameters[p0+5],qubit2); p0 += 6
circuit.cx(qubit2,qubit1)
circuit.h(qubit2)
circuit.sdg(qubit1)
circuit.sdg(qubit2)
circuit.barrier()
return circuit
print(generate_circuit(np.zeros(num_parameters)).draw())
###Now choose the right instance
#runtype = ['qasm',None,False,8000]
#runtype = ['noise_model','ibmq_rome',False,8000]
#runtype = ['noise_model','ibmq_rome',True,8000]
runtype = ['hardware','ibmq_rome',True,8000]
if(runtype[0]=='qasm'): # ==== running VQE on QASM simulator, without noise
print("Running on qasm simulator ")
backend = Aer.get_backend('qasm_simulator')
instance = QuantumInstance(backend=backend,shots=runtype[3])
if(runtype[0]=='noise_model'): # ==== running VQE on QASM simulator, with noise model from an actual machine
print("Running on qasm simulator with noise of a real hardware")
print("Opening IBMQ account...")
provider = IBMQ.load_account()
backend = provider.get_backend(runtype[1])
noise_model = NoiseModel.from_backend(backend)
coupling_map = backend.configuration().coupling_map
basis_gates = noise_model.basis_gates
simulator = Aer.get_backend('qasm_simulator')
if(runtype[2]): # ==== usando readout error mitigation
print("with error mitigation")
instance = QuantumInstance(backend=simulator,noise_model=noise_model,coupling_map=coupling_map,basis_gates=basis_gates,
measurement_error_mitigation_cls=CompleteMeasFitter,shots=runtype[3])
else: # ==== senza readout error mitigation
print("without error mitigation")
instance = QuantumInstance(backend=simulator,noise_model=noise_model,coupling_map=coupling_map,basis_gates=basis_gates,shots=runtype[3])
if(runtype[0]=='hardware'): # ==== running VQE on HARDWARE, I would do it with readout error mitigation
print("Running on hardware")
print("Running "+r)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q-research',group='Stefan-Barison', project='main')
simulator = provider.get_backend(runtype[1])
instance = QuantumInstance(backend=simulator,measurement_error_mitigation_cls=CompleteMeasFitter,shots=runtype[3],skip_qobj_validation=False)
algo = manual_VQE(H_op,dE,A_op,dA,generate_circuit,instance,num_parameters)
algo.run(t_mesh=np.arange(-2,2.1,0.5))
|
import unittest
from cnc.pulses import *
from cnc.config import *
from cnc.coordinates import *
from cnc import hal_virtual
class TestPulses(unittest.TestCase):
def setUp(self):
self.v = min(MAX_VELOCITY_MM_PER_MIN_X,
MAX_VELOCITY_MM_PER_MIN_Y,
MAX_VELOCITY_MM_PER_MIN_Z,
MAX_VELOCITY_MM_PER_MIN_E)
def tearDown(self):
pass
def test_zero(self):
# PulseGenerator should never receive empty movement.
self.assertRaises(ZeroDivisionError,
PulseGeneratorLinear,
Coordinates(0, 0, 0, 0), self.v)
self.assertRaises(ZeroDivisionError, PulseGeneratorCircular,
Coordinates(0, 0, 0, 0), Coordinates(0, 0, 9, 9),
PLANE_XY, CW, self.v)
self.assertRaises(ZeroDivisionError, PulseGeneratorCircular,
Coordinates(0, 0, 0, 0), Coordinates(9, 0, 0, 9),
PLANE_YZ, CW, self.v)
self.assertRaises(ZeroDivisionError, PulseGeneratorCircular,
Coordinates(0, 0, 0, 0), Coordinates(0, 9, 0, 9),
PLANE_ZX, CW, self.v)
def test_step_linear(self):
# Check if PulseGenerator returns correctly single step movement.
g = PulseGeneratorLinear(Coordinates(1.0 / STEPPER_PULSES_PER_MM_X,
0, 0, 0),
self.v)
i = 0
for direction, px, py, pz, pe in g:
if direction:
continue
i += 1
self.assertEqual(px, 0)
self.assertEqual(py, None)
self.assertEqual(pz, None)
self.assertEqual(pe, None)
self.assertEqual(i, 1)
g = PulseGeneratorLinear(Coordinates(
1.0 / STEPPER_PULSES_PER_MM_X,
1.0 / STEPPER_PULSES_PER_MM_Y,
1.0 / STEPPER_PULSES_PER_MM_Z,
1.0 / STEPPER_PULSES_PER_MM_E),
self.v)
i = 0
for direction, px, py, pz, pe in g:
if direction:
continue
i += 1
self.assertEqual(px, 0)
self.assertEqual(py, 0)
self.assertEqual(pz, 0)
self.assertEqual(pe, 0)
self.assertEqual(i, 1)
def __check_circular(self, delta, radius, plane, direction=CW):
g = PulseGeneratorCircular(delta, radius, plane, direction, self.v)
x, y, z, e = 0, 0, 0, 0
dx, dy, dz, de = None, None, None, None
dir_changed = 0
dir_requested = False
t = -1
for direction_i, px, py, pz, pe in g:
if direction_i:
dx, dy, dz, de = px, py, pz, pe
if STEPPER_INVERTED_X:
dx = -dx
if STEPPER_INVERTED_Y:
dy = -dy
if STEPPER_INVERTED_Z:
dz = -dz
if STEPPER_INVERTED_E:
de = -de
dir_requested = True
continue
if dir_requested: # ignore last change
dir_requested = False
dir_changed += 1
if px is not None:
x += dx
if py is not None:
y += dy
if pz is not None:
z += dz
if pe is not None:
e += de
v = list(i for i in (px, py, pz, pe) if i is not None)
self.assertEqual(min(v), max(v))
self.assertLessEqual(t, min(v))
t = max(v)
return dir_changed, Coordinates(x / STEPPER_PULSES_PER_MM_X,
y / STEPPER_PULSES_PER_MM_Y,
z / STEPPER_PULSES_PER_MM_Z,
e / STEPPER_PULSES_PER_MM_E)
def test_single_radius_circles(self):
# Check if PulseGenerator returns correctly single radius movement in
# both direction.
zero_delta = Coordinates(0, 0, 0, 0)
radius = Coordinates(1.0 / STEPPER_PULSES_PER_MM_X, 0, 0, 0)
_, pos = self.__check_circular(zero_delta, radius, PLANE_XY, CW)
self.assertEqual(pos, Coordinates(0, 0, 0, 0))
radius = Coordinates(-1.0 / STEPPER_PULSES_PER_MM_X, 0, 0, 0)
_, pos = self.__check_circular(zero_delta, radius,
PLANE_XY, CW)
self.assertEqual(pos, Coordinates(0, 0, 0, 0))
radius = Coordinates(0, 1.0 / STEPPER_PULSES_PER_MM_Y, 0, 0)
_, pos = self.__check_circular(zero_delta, radius, PLANE_YZ, CW)
self.assertEqual(pos, Coordinates(0, 0, 0, 0))
radius = Coordinates(0, -1.0 / STEPPER_PULSES_PER_MM_Y, 0, 0)
_, pos = self.__check_circular(zero_delta, radius, PLANE_YZ, CW)
self.assertEqual(pos, Coordinates(0, 0, 0, 0))
radius = Coordinates(0, 0, 1.0 / STEPPER_PULSES_PER_MM_Z, 0)
_, pos = self.__check_circular(zero_delta, radius, PLANE_ZX, CW)
self.assertEqual(pos, Coordinates(0, 0, 0, 0))
radius = Coordinates(0, 0, -1.0 / STEPPER_PULSES_PER_MM_Z, 0)
_, pos = self.__check_circular(zero_delta, radius, PLANE_ZX, CW)
self.assertEqual(pos, Coordinates(0, 0, 0, 0))
radius = Coordinates(1.0 / STEPPER_PULSES_PER_MM_X, 0, 0, 0)
_, pos = self.__check_circular(zero_delta, radius, PLANE_XY, CCW)
self.assertEqual(pos, Coordinates(0, 0, 0, 0))
radius = Coordinates(-1.0 / STEPPER_PULSES_PER_MM_X, 0, 0, 0)
_, pos = self.__check_circular(zero_delta, radius, PLANE_XY, CCW)
self.assertEqual(pos, Coordinates(0, 0, 0, 0))
radius = Coordinates(0, 1.0 / STEPPER_PULSES_PER_MM_Y, 0, 0)
_, pos = self.__check_circular(zero_delta, radius, PLANE_YZ, CCW)
self.assertEqual(pos, Coordinates(0, 0, 0, 0))
radius = Coordinates(0, -1.0 / STEPPER_PULSES_PER_MM_Y, 0, 0)
_, pos = self.__check_circular(zero_delta, radius, PLANE_YZ, CCW)
self.assertEqual(pos, Coordinates(0, 0, 0, 0))
radius = Coordinates(0, 0, 1.0 / STEPPER_PULSES_PER_MM_Z, 0)
_, pos = self.__check_circular(zero_delta, radius, PLANE_ZX, CCW)
self.assertEqual(pos, Coordinates(0, 0, 0, 0))
radius = Coordinates(0, 0, -1.0 / STEPPER_PULSES_PER_MM_Z, 0)
_, pos = self.__check_circular(zero_delta, radius, PLANE_ZX, CCW)
self.assertEqual(pos, Coordinates(0, 0, 0, 0))
def test_with_hal_virtual(self):
# Using hal_virtual module for this test, it already contains plenty
# of asserts for wrong number of pulses, pulse timing issues etc
hal_virtual.move(PulseGeneratorLinear(Coordinates(1, 0, 0, 0),
self.v))
hal_virtual.move(PulseGeneratorLinear(Coordinates(25.4, 0, 0, 0),
self.v))
hal_virtual.move(PulseGeneratorLinear(Coordinates(25.4, 0, 0, 0),
self.v))
hal_virtual.move(PulseGeneratorLinear(Coordinates(25.4, 0, 0, 0),
self.v))
hal_virtual.move(PulseGeneratorLinear(Coordinates(TABLE_SIZE_X_MM,
TABLE_SIZE_Y_MM,
TABLE_SIZE_Z_MM,
100.0), self.v))
hal_virtual.move(PulseGeneratorCircular(Coordinates(0, 20, 0, 0),
Coordinates(-10, 10, 0, 0),
PLANE_XY, CW, self.v))
hal_virtual.move(PulseGeneratorCircular(Coordinates(-4, -4, 0, 0),
Coordinates(-2, -2, 0, 0),
PLANE_XY, CW, self.v))
delta = Coordinates(- 2.0 / STEPPER_PULSES_PER_MM_X,
- 2.0 / STEPPER_PULSES_PER_MM_Y, 0, 0)
radius = Coordinates(- 1.0 / STEPPER_PULSES_PER_MM_X,
- 1.0 / STEPPER_PULSES_PER_MM_Y, 0, 0)
hal_virtual.move(PulseGeneratorCircular(delta, radius, PLANE_XY, CW,
self.v))
def test_twice_faster_linear(self):
# Checks if one axis moves exactly twice faster, pulses are correct.
m = Coordinates(2, 4, 0, 0)
g = PulseGeneratorLinear(m, self.v)
i = 0
j = 0
k = 0
for direction, px, py, pz, pe in g:
if direction:
continue
if py is not None:
k += 1
j += 1
else:
self.assertNotEqual(px, None)
if px is not None:
if i != 0:
self.assertEqual(j, 2 * STEPPER_PULSES_PER_MM_Y
/ STEPPER_PULSES_PER_MM_X)
j = 0
self.assertEqual(pz, None)
self.assertEqual(pe, None)
i += 1
self.assertEqual(k / STEPPER_PULSES_PER_MM_Y, m.y)
def test_pulses_count_and_timings(self):
# Check if number of pulses is equal to specified distance.
m = Coordinates(TABLE_SIZE_X_MM, TABLE_SIZE_Y_MM, TABLE_SIZE_Z_MM,
100.0)
g = PulseGeneratorLinear(m, self.v)
ix = 0
iy = 0
iz = 0
ie = 0
t = -1
for direction, px, py, pz, pe in g:
if direction:
continue
if px is not None:
ix += 1
if py is not None:
iy += 1
if pz is not None:
iz += 1
if pe is not None:
ie += 1
v = list(x for x in (px, py, pz, pe) if x is not None)
self.assertEqual(min(v), max(v))
self.assertLess(t, min(v))
t = max(v)
self.assertEqual(m.x * STEPPER_PULSES_PER_MM_X, ix)
self.assertEqual(m.y * STEPPER_PULSES_PER_MM_Y, iy)
self.assertEqual(m.z * STEPPER_PULSES_PER_MM_Z, iz)
self.assertEqual(m.e * STEPPER_PULSES_PER_MM_E, ie)
self.assertLessEqual(t, g.total_time_s())
_, pos = self.__check_circular(Coordinates(0, 8, 0, 7),
Coordinates(1, 0, 1, 0),
PLANE_ZX, CCW)
self.assertEqual(pos, Coordinates(0, 8, 0, 7))
_, pos = self.__check_circular(Coordinates(5, 0, 0, 6),
Coordinates(0, 1, -1, 0),
PLANE_YZ, CW)
self.assertEqual(pos, Coordinates(5, 0, 0, 6))
_, pos = self.__check_circular(Coordinates(-2, -2, 3, 2),
Coordinates(-1, -1, 0, 0),
PLANE_XY, CCW)
self.assertEqual(pos, Coordinates(-2, -2, 3, 2))
def test_acceleration_velocity(self):
# Check if acceleration present in pulses sequence and if velocity
# is correct, since PulseGenerator is responsible for this, check only
# one child class.
m = Coordinates(TABLE_SIZE_X_MM, 0, 0, 0)
velocity = 1000
g = PulseGeneratorLinear(m, velocity)
i = 0
lx = 0
lt, at, bt = None, None, None
for direction, px, py, pz, pe in g:
if direction:
continue
if i == 2:
at = px - lx
if i == TABLE_SIZE_X_MM * STEPPER_PULSES_PER_MM_X / 2:
lt = px - lx
bt = px - lx
lx = px
i += 1
self.assertEqual(round(60.0 / lt / STEPPER_PULSES_PER_MM_X), velocity)
self.assertGreater(at, lt)
self.assertGreater(bt, lt)
def test_directions(self):
# Check if directions are set up correctly.
m = Coordinates(1, -2, 3, -4)
g = PulseGeneratorLinear(m, self.v)
dir_found = False
for direction, px, py, pz, pe in g:
if direction:
if STEPPER_INVERTED_X:
px = -px
if STEPPER_INVERTED_Y:
py = -py
if STEPPER_INVERTED_Z:
pz = -pz
if STEPPER_INVERTED_E:
pe = -pe
# should be once
self.assertFalse(dir_found)
dir_found = True
# check dirs
self.assertTrue(px > 0 and py < 0 and pz > 0 and pe < 0)
m = Coordinates(-1, 2, -3, 4)
g = PulseGeneratorLinear(m, self.v)
dir_found = False
for direction, px, py, pz, pe in g:
if direction:
if STEPPER_INVERTED_X:
px = -px
if STEPPER_INVERTED_Y:
py = -py
if STEPPER_INVERTED_Z:
pz = -pz
if STEPPER_INVERTED_E:
pe = -pe
# should be once
self.assertFalse(dir_found)
dir_found = True
# check dirs
self.assertTrue(px < 0 and py > 0 and pz < 0 and pe > 0)
# check for circle, full circle
dir_changed, _ = self.__check_circular(Coordinates(0, 0, 0, 0),
Coordinates(1.0, 1.0, 0, 0),
PLANE_ZX, CCW)
self.assertEqual(dir_changed, 4)
if __name__ == '__main__':
unittest.main()
|
"""Simple DHT scraper."""
import logging
import os
import threading
from logging.handlers import TimedRotatingFileHandler
from queue import Queue
from random import randint
from secrets import token_hex
from signal import SIGINT, SIGTERM, signal
from typing import List
from dht_node import DHTNode
from dht_node.data_structures import Counter
from dht_node.utils import log_stats
from diskcache import Cache
from src import handlers, utils
if __name__ == "__main__":
cache = Cache("cache", eviction_policy="none", size_limit=5 * 10 ** 10)
counters = {"all": Counter(), "saved": Counter()}
found_torrents: Queue = Queue(maxsize=10 ** 6)
started_nodes: List[DHTNode] = []
# Generate folders, if necessary
for folder in ["logs", "results"]:
if not os.path.exists(folder):
os.makedirs(folder)
# Generate random node details, if necessary
if not os.path.exists("nodes.csv") or os.path.getsize("nodes.csv") == 0:
with open("nodes.csv", "w", encoding="utf8") as source_file:
source_file.write(f"{token_hex(20)},{randint(1025, 65535)}\n")
# Configure logging
log_f = os.path.join("logs/log.txt")
logging.basicConfig(
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
handlers=[TimedRotatingFileHandler(log_f, utc=True, when="midnight")],
level=logging.INFO,
)
# Handle close signal gracefully
stop = threading.Event()
signal(SIGINT, lambda *args: utils.signal_handler(started_nodes, stop))
signal(SIGTERM, lambda *args: utils.signal_handler(started_nodes, stop))
# Add existing info hashes to the cache
utils.update_cache(cache, stop)
# Start result queue handler
threading.Thread(
target=handlers.process_found_torrents,
args=(cache, counters, found_torrents, stop),
).start()
# Load list of nodes from the source file
with open("nodes.csv", "r", encoding="utf8") as source_file:
# Initialize and start them
if not stop.is_set():
for row in source_file:
if not row.strip():
continue
node_id = row.split(",")[0].strip()
node_port = int(row.split(",")[1].strip())
new_node = DHTNode(node_id, node_port)
new_node.add_message_handler(
lambda m, n: utils.on_dht_message(found_torrents, m, n),
)
new_node.start()
started_nodes.append(new_node)
while not stop.is_set():
# Log the progress
logging.info("%s threads", threading.active_count())
log_stats(*started_nodes)
logging.info(
"Processed info hashes: %s all, %s saved",
counters["all"].value,
counters["saved"].value,
)
logging.info("Queue length: %s info hashes\n", found_torrents.qsize())
# Reset counters
for counter in counters.values():
counter.reset()
# Wait until the next check
stop.wait(60)
logging.info("Exiting!\n")
|
#tuto obtenido de aca https://www.youtube.com/watch?v=a8xNuu-2X_4
import sys
import pyqtgraph as pg
import numpy as np
from PyQt5 import QtGui, QtCore
app = QtGui.QApplication(sys.argv) #QGuiApplication
x = np.random.normal(loc=0.0, scale=2, size=100)
widget = pg.PlotWidget(title="Some plotting")
widget.setWindowTitle("Random Plottoring")
widget.plotItem.plot(x)
widget.show()
sys.exit(app.exec_())
|
# 실제로 게임이 진행되는 게임 월드
from character import Player
from attack_kind import FireAttackKind, IceAttackKind
from monsters import (FireMonster, IceMonster, StoneMonster,
KungfuMonster)
fm=FireMonster()
im=IceMonster()
sm=StoneMonster()
kfm=KungfuMonster()
monsters=[]
monsters.extend((fm, im, sm, kfm))
# Dependency Injection : DI
player = Player('john', 120, 20, FireAttackKind(), IceAttackKind())
print(player)
for mon in monsters:
player.attack(mon, 'Fire')
for mon in monsters:
print(mon)
print()
print("Monster Attack!")
for mon in monsters:
print(mon.get_attack_kind())
mon.attack(player, mon.get_attack_kind())
print()
print(player)
|
"Bidirectional IPC with anonymous pipes"
'''
Pipes normally let data flow in only one direction—one side is input, one is output.
What if you need your programs to talk back and forth, though? For example, one
program might send another a request for information and then wait for that information
to be sent back. A single pipe can’t generally handle such bidirectional conversations,
but two pipes can. One pipe can be used to pass requests to a program and
another can be used to ship replies back to the requestor.
by spawning command-line programs with streams attached by pipes, systems
can add new interfaces to legacy programs.
'''
"""
spawn a child process/program, connect my stdin/stdout to child process's
stdout/stdin--my reads and writes map to output and input streams of the
spawned program; much like tying together streams with subprocess module;
"""
import os, sys
def spawn(prog, *args): # pass progname, cmdline args
stdinFd = sys.stdin.fileno() # get descriptors for streams
stdoutFd = sys.stdout.fileno() # normally stdin=0, stdout=1
parentStdin, childStdout = os.pipe() # make two IPC pipe channels
childStdin, parentStdout = os.pipe() # pipe returns (inputfd, outoutfd)
pid = os.fork() # make a copy of this process
if pid:
os.close(childStdout) # in parent process after fork:
os.close(childStdin) # close child ends in parent
os.dup2(parentStdin, stdinFd) # my sys.stdin copy = pipe1[0]
os.dup2(parentStdout, stdoutFd) # my sys.stdout copy = pipe2[1]
else:
os.close(parentStdin) # in child process after fork:
os.close(parentStdout) # close parent ends in child
os.dup2(childStdin, stdinFd) # my sys.stdin copy = pipe2[0]
os.dup2(childStdout, stdoutFd) # my sys.stdout copy = pipe1[1]
args = (prog,) + args
os.execvp(prog, args) # new program in this process
assert False, 'execvp failed!' # os.exec call never returns here
if __name__ == '__main__':
mypid = os.getpid()
spawn('python', 'pipes-testchild.py', 'spam') # fork child program
print('Hello 1 from parent', mypid) # to child's stdin
sys.stdout.flush() # subvert stdio buffering
reply = input() # from child's stdout
sys.stderr.write('Parent got: "%s"\n' % reply) # stderr not tied to pipe!
print('Hello 2 from parent', mypid)
sys.stdout.flush()
reply = sys.stdin.readline()
sys.stderr.write('Parent got: "%s"\n' % reply[:-1])
"""
The spawn function in this module does not work on standard Windows Python (remember
that fork isn’t yet available there today).
# Unix concepts
os.fork
Copies the calling process as usual and returns the child’s process ID in the parent
process only.
os.execvp
Overlays a new program in the calling process; it’s just like the os.execlp used
earlier but takes a tuple or list of command-line argument strings (collected with
the *args form in the function header).
os.pipe
Returns a tuple of file descriptors representing the input and output ends of a pipe,
as in earlier examples.
os.close(fd)
Closes the descriptor-based file fd.
os.dup2(fd1,fd2)
Copies all system information associated with the file named by the file descriptor
fd1 to the file named by fd2.
In terms of connecting standard streams, os.dup2 is the real nitty-gritty here. For example,
the call os.dup2(parentStdin,stdinFd) essentially assigns the parent process’s
stdin file to the input end of one of the two pipes created; all stdin reads will henceforth
come from the pipe.
"""
|
def lastk(head,k):
slow,fast = head,head
while k > 0:
fast = fast.next
k -= 1
while fast != None:
slow = slow.next
fast = fast.next
return slow
|
from core.Model import *
from core.Utils import Utils
class AppVersion(Base, Model):
__tablename__ = 'app_version'
id = Column(Integer, primary_key = True, autoincrement=True)
version = Column(Float, nullable=False)
created = Column(DateTime, default = Utils.time())
formatters = {
"created": Utils.date_formatter
}
@staticmethod
def get_actual_version():
return AppVersion.max("version")
@staticmethod
def get_actual_version_class():
return AppVersion.get(AppVersion.max("id"))
|
import yaml
from lib.dashboard import Dashboard
from lib.widgets.wmata_widget import WMATAWidget
from lib.widgets.weather_widget import WeatherWidget
from lib.widgets.calendar_widget import CalendarWidget
from lib.widgets.rss_widget import RSSWidget
from lib.widgets.message_widget import MessageWidget
class DashboardConfig:
def __init__(self, path, args):
self.path = path
self.load_config()
if len(args) > 1:
self.config["output"] = args[1]
def load_config(self):
with open(self.path, 'r') as file:
self.config = yaml.load(file.read(), Loader=yaml.SafeLoader)
def get(self, key):
if key not in self.config:
raise Exception("Key \"%s\" not in config!" % (key))
return self.config[key]
def generate_dashboard(self):
text_global_config = self.get("text")
font_size = text_global_config["font_size"]
font_path = text_global_config["font_path"]
dashboard_config = self.get("dashboard")
return Dashboard(
dashboard_config["width"],
dashboard_config["height"],
dashboard_config["rows"],
dashboard_config["cols"],
dashboard_config["gutter"],
dashboard_config["show_status"],
font_size,
font_path
)
def generate_widgets(self):
widget_global_config = self.get("widget")
text_global_config = self.get("text")
font_size = text_global_config["font_size"]
padding = widget_global_config["padding"]
font_path = text_global_config["font_path"]
widgets = self.get("widgets")
widget_instances = []
for widget_config in widgets:
widget = self.init_widget(widget_config["type"], font_size, font_path, padding, widget_config)
widget_instances.append(widget)
return widget_instances
def init_widget(self, widget_type, font_size, font_path, padding, config):
if widget_type == "calendar":
return CalendarWidget(font_size, font_path, padding, config)
elif widget_type == "wmata":
return WMATAWidget(font_size, font_path, padding, config)
elif widget_type == "weather":
return WeatherWidget(font_size, font_path, padding, config)
elif widget_type == "rss":
return RSSWidget(font_size, font_path, padding, config)
elif widget_type == "message":
return MessageWidget(font_size, font_path, padding, config)
|
import subprocess
from manifestManager import ManifestManager
class DiskManager(object):
def __init__(self, configure):
self.imagesLocation = configure.VM_IMAGES_LOCATION
self.vmLocation = configure.INSTANTIATED_VM_LOCATION
self.manifestManager = ManifestManager(configure)
def retriveDisk(self, nfID, localNFID):
'''
Retrieve the Disk for a NF
'''
manifest = self.manifestManager.getManifest(nfID)
subprocess.call(["qemu-img", "create", "-b", self.imagesLocation+manifest['disk'], "-f", "qcow2", self.vmLocation+localNFID+".qcow2"])
return self.vmLocation+localNFID+".qcow2"
def removeLocalDisk(self, localNFID):
'''
Remove a Disk from the local drive
'''
subprocess.call(["rm", "-f", self.vmLocation+localNFID+".qcow2"])
|
from datetime import date
from django.shortcuts import render
from django.db.models import Count
from django.http import HttpResponseRedirect
# Create your views here.
from .models import Book, Author, BookInstance, Genre, Loan
def toggle_available_only(request):
if "available_only" in request.session:
request.session["available_only"] = not request.session["available_only"]
else:
request.session["available_only"] = True
return HttpResponseRedirect(request.GET.get('next', "/books"))
def index(request):
"""View function for home page of site."""
# Generate counts of some of the main objects
num_books = Book.objects.all().count()
num_instances = BookInstance.objects.all().count()
# Available copies of books
num_instances_available = num_instances - Loan.objects.filter(return_date__isnull=True).count()
num_authors = Author.objects.count() # The 'all()' is implied by default.
# Render the HTML template index.html with the data in the context variable.
return render(
request,
'index.html',
context={'num_books': num_books, 'num_instances': num_instances,
'num_instances_available': num_instances_available, 'num_authors': num_authors,
},
)
from django.views import generic
class BookListView(generic.ListView):
"""Generic class-based view for a list of books."""
model = Book
paginate_by = 12
class BookDetailView(generic.DetailView):
"""Generic class-based detail view for a book."""
model = Book
class AuthorListView(generic.ListView):
"""Generic class-based list view for a list of authors."""
model = Author
paginate_by = 20
queryset = Author.objects.all().annotate(book_count=Count("book"))
class AuthorDetailView(generic.DetailView):
"""Generic class-based detail view for an author."""
model = Author
class GenreListView(generic.ListView):
model = Genre
paginate_by = 20
queryset = Genre.objects.all().annotate(book_count=Count("book"))
class GenreDetailView(generic.DetailView):
model = Genre
from django.contrib.auth.mixins import LoginRequiredMixin
class LoanedBooksByUserListView(LoginRequiredMixin, generic.ListView):
"""Generic class-based view listing books on loan to current user."""
model = Loan
template_name = 'catalog/bookinstance_list_borrowed_user.html'
paginate_by = 10
def get_queryset(self):
return Loan.objects.filter(borrower=self.request.user).order_by('due_date')
# Added as part of challenge!
from django.contrib.auth.mixins import PermissionRequiredMixin
class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView):
"""Generic class-based view listing all books on loan. Only visible to users with can_mark_returned permission."""
model = BookInstance
permission_required = 'catalog.can_mark_returned'
template_name = 'catalog/bookinstance_list_borrowed_all.html'
paginate_by = 10
def get_queryset(self):
return Loan.objects.order_by('due_date')
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
import datetime
from django.contrib.auth.decorators import login_required, permission_required
# from .forms import RenewBookForm
from catalog.forms import RenewBookForm
@login_required
@permission_required('catalog.can_mark_returned', raise_exception=True)
def renew_book_librarian(request, pk):
"""View function for renewing a specific BookInstance by librarian."""
book_instance = get_object_or_404(BookInstance, pk=pk)
# If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request (binding):
form = RenewBookForm(request.POST)
# Check if the form is valid:
if form.is_valid():
# process the data in form.cleaned_data as required (here we just write it to the model due_back field)
book_instance.due_back = form.cleaned_data['renewal_date']
book_instance.save()
# redirect to a new URL:
return HttpResponseRedirect(reverse('all-borrowed'))
# If this is a GET (or any other method) create the default form
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(weeks=1)
form = RenewBookForm(initial={'renewal_date': proposed_renewal_date})
context = {
'form': form,
'book_instance': book_instance,
}
return render(request, 'catalog/book_renew_librarian.html', context)
from django.contrib import messages
@login_required
def reserve_book(request, pk):
"""View function for reserving a book."""
if request.user.loan_set.filter(reserved_date__isnull=False, return_date__isnull=True).count() >= request.user.max_books:
messages.error(request, 'Already reached the maximum number of {} Reserved books.'.format(request.user.max_books))
else:
book_instance= get_object_or_404(BookInstance, pk=pk)
if book_instance.status != "Available":
messages.error(request, 'Book not available')
else:
loan = Loan(book_instance=book_instance, borrower=request.user, reserved_date=date.today())
loan.save()
return HttpResponseRedirect(reverse('my-borrowed'))
@login_required
def cancel_reservation(request, pk):
loan = get_object_or_404(Loan, pk=pk)
if loan.borrower == request.user and loan.is_reservation:
loan.return_date = date.today()
loan.save()
return HttpResponseRedirect(reverse('my-borrowed'))
|
def test_import_databand():
print("Starting Import")
import dbnd
str(dbnd)
def test_import_airflow_settings():
print("Starting Import")
import airflow.settings
str(airflow.settings)
|
from __future__ import annotations
from typing import List, TYPE_CHECKING
from numpy.core.numeric import _outer_dispatcher
if TYPE_CHECKING:
from typing import Any
from .modifiers import Passive
from .stats import Stats
class Item:
def __init__(self, name, cost, stat, passives=None) -> None:
self.name = name
self.cost = cost
self.stat = stat
self.passives = passives if passives != None else []
def __str__(self) -> str:
return f"{self.name} | {self.cost}g\n{self.stat}"
def __repr__(self) -> str:
return str(self)
class Inventory:
# deliberately limit constructor to 6 items.
def __init__(self, item1: Item=None, item2: Item = None, item3: Item = None,
item4: Item = None, item5: Item = None, item6: Item = None) -> None:
self.items = list(filter(None, [item1, item2, item3, item4, item5, item6]))
self.current_stats = sum(map(lambda x: x.stat, self.items), Stats())
def get_all_unique_passives(self) -> List[Passive]:
out = set()
for item in self.items:
out.add(*item.passives)
return list(out)
# forward the attributes from curent_stats so inventory can be used as a stats object
# TODO: this feels hacky
def __getattribute__(self, name: str) -> Any:
try:
return super().__getattribute__(name)
except:
return self.current_stats.__dict__[name]
from .modifiers import SteelTipped
from .modifiers import IcathianBite
from .modifiers import SpawnBuff
from .modifiers import BringItDown
from .modifiers import GiantSlayer
'''AD Base Items'''
Long_Sword = Item("Long Sword", 350, Stats(ad=10))
Pickaxe = Item("Pickaxe", 875, Stats(ad=25))
BFSword = Item("B. F. Sword", 1300, Stats(ad=40))
'''AP Base Items'''
AmplifyingTome = Item("Amplifying Tome", 350, Stats(ap=20))
BlastingWand = Item("Blasting Wand", 850, Stats(ap=40))
NeedlesslyLargeRod = Item("Needlessly Large Rod", 1300, Stats(ap=60))
'''ASPD Base Items'''
Dagger = Item("Dagger", 300, Stats(aspd=25))
RecurveBow = Item("Recurve Bow", 1000, Stats(aspd=25), passives=[SteelTipped()])
'''Crit Chance'''
CloakOfAgility = Item("Cloak of Agility", 600, Stats(cs=15))
'''Hp'''
RubyCrystal = Item("Ruby Crystal", 400, Stats(hp=150))
GiantsBelt = Item("Giant's Belt", 900, Stats(hp=350))
'''Armour '''
ClothArmor = Item("Cloth Armor", 300, Stats(ar=15))
ChainVest = Item("Chain Vest", 800, Stats(ar=40))
'''Magic Resist'''
NullMagicMantle = Item("Null-Magic Mantle", 450, Stats(mr=25))
NegatronCloak = Item("Negatron Cloak", 900, Stats(mr=50))
'''marksmen items'''
KrakenSlayer = Item("Kraken Slayer", 3400, Stats(ad=65, aspd=25, cs=20), passives=[BringItDown()])
LordDominiksRegards = Item("Lord Dominik's Regards", 3000, Stats(ad=30,cs=20,arp=35), passives=[GiantSlayer()])
''' Sheen Items '''
from .buff import SheenSpellBlade, TriforceSpellBlade, LichBaneSpellBlade
Sheen = Item("Sheen", 700, Stats(), passives=[SpawnBuff(toSpawn=SheenSpellBlade())])
Triforce = Item("Trinity Force", 3333, Stats(ad=30, ah=20, aspd=30, hp=200), passives=[SpawnBuff(toSpawn=TriforceSpellBlade())])
# TODO: MS
LichBane = Item("Lich Bane", 3000, Stats(ap=70), passives=[SpawnBuff(toSpawn=LichBaneSpellBlade())])
NashorsTooth = Item("Nashor's Tooth", 3000, Stats(ap=100, aspd=50), passives=[IcathianBite()])
|
from .AbbreviationLegalFormNormalizer import AbbreviationLegalFormNormalizer
from .AndNormalizer import AndNormalizer
from .CharacterNormalizer import CharacterNormalizer
from .CommonAbbreviationNormalizer import CommonAbbreviationNormalizer
from .KeepOtherWordsNormalizer import KeepOtherWordsNormalizer
from .MisplacedCharacterNormalizer import MisplacedCharacterNormalizer
from .OtherWordsAbbreviationNormalizer import OtherWordsAbbreviationNormalizer
from .SplitNormalizer import SplitNormalizer
from .StripNormalizer import StripNormalizer
from .TokenCategoryNormalizer import TokenCategoryNormalizer
from .UnicodeNormalizer import UnicodeNormalizer
|
import asyncio
from datetime import datetime
from nats.aio.client import Client as NATS
async def run(loop):
nc = NATS()
await nc.connect(loop=loop)
async def message_handler(msg):
with open('received.jpg', 'wb') as f:
f.write(msg.data)
await nc.subscribe('webcam', cb=message_handler, is_async=True)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(run(loop))
loop.run_forever()
loop.close()
|
import pytest
from core.location.mobility import WayPoint
POSITION = (0.0, 0.0, 0.0)
class TestMobility:
@pytest.mark.parametrize(
"wp1, wp2, expected",
[
(WayPoint(10.0, 1, POSITION, 1.0), WayPoint(1.0, 2, POSITION, 1.0), False),
(WayPoint(1.0, 1, POSITION, 1.0), WayPoint(10.0, 2, POSITION, 1.0), True),
(WayPoint(1.0, 1, POSITION, 1.0), WayPoint(1.0, 2, POSITION, 1.0), True),
(WayPoint(1.0, 2, POSITION, 1.0), WayPoint(1.0, 1, POSITION, 1.0), False),
],
)
def test_waypoint_lessthan(self, wp1, wp2, expected):
assert (wp1 < wp2) == expected
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.