content stringlengths 5 1.05M |
|---|
def function(x, y, food="spam"):
print(food)
function(1, 2)
# The argument '1' is passed in to parameter x, the argument '2' is passed in
# to y, no argument is passed in to the parameter food (so the default is
# used).
function(3, 4, "egg")
# "egg" passed in to parameter food
"""
In case the argument is passed in, the default value is ignored.
If the argument is not passed in, the default value is used.
"""
# passed in means
# It means if you do not assign something to "food", then it will default to
# what was already "passed in"("spam").
|
import json
from datetime import datetime
from flask import request, Response, Blueprint, jsonify
from flask_login import current_user
from c3bottles import app, db
from c3bottles.model.drop_point import DropPoint
from c3bottles.model.report import Report
from c3bottles.model.visit import Visit
bp = Blueprint("api", __name__)
@bp.route("/api", methods=("POST", "GET"))
def process():
if request.values.get("action") == "report":
return report()
elif request.values.get("action") == "visit":
return visit()
elif request.values.get("action") == "dp_json":
return dp_json()
return Response(
json.dumps(
"Invalid or missing API action.",
indent=4 if app.debug else None
),
mimetype="application/json",
status=400
)
@bp.route("/api/all_dp.json", methods=("POST", "GET"))
def all_dp():
return dp_json()
@bp.route("/api/map_source.json")
def map_source():
map_source = app.config.get('MAP_SOURCE', {})
return jsonify({
"attribution": map_source.get('attribution', ''),
"tileserver": map_source.get('tileserver', ''),
"tileserver_subdomains": map_source.get("tileserver_subdomains", []),
"bounds": map_source.get("bounds", None),
"initial_view": map_source.get("initial_view", None),
"level_config": map_source.get("level_config", None),
"min_zoom": map_source.get("min_zoom", 0),
"max_zoom": map_source.get("max_zoom", 0),
"simple_crs": map_source.get("simple_crs", False),
"hack_257px": map_source.get("hack_257px", False),
"tms": map_source.get("tms", False),
"no_wrap": map_source.get("no_wrap", False)
})
def report():
if not current_user.can_report:
return Response(
json.dumps(
[{"msg": "Not logged in or insufficient privileges."}],
indent=4 if app.debug else None
),
mimetype="application/json",
status=401
)
number = request.values.get("number")
try:
Report(
dp=DropPoint.query.get(number),
state=request.values.get("state")
)
except ValueError as e:
return Response(
json.dumps(e.args, indent=4 if app.debug else None),
mimetype="application/json",
status=400
)
else:
db.session.commit()
return Response(
DropPoint.get_dp_json(number),
mimetype="application/json"
)
def visit():
if not current_user.can_visit:
return Response(
json.dumps(
[{"msg": "Not logged in or insufficient privileges."}],
indent=4 if app.debug else None
),
mimetype="application/json",
status=401
)
number = request.values.get("number")
try:
Visit(
dp=DropPoint.query.get(number),
action=request.values.get("maintenance")
)
except ValueError as e:
return Response(
json.dumps(e.args, indent=4 if app.debug else None),
mimetype="application/json",
status=400
)
else:
db.session.commit()
return Response(
DropPoint.get_dp_json(number),
mimetype="application/json"
)
def dp_json():
ts = request.values.get("ts")
if ts:
try:
dps = DropPoint.get_dps_json(
time=datetime.fromtimestamp(float(ts))
)
except ValueError as e:
return Response(
json.dumps(e.args, indent=4 if app.debug else None),
mimetype="application/json",
status=400
)
else:
dps = DropPoint.get_dps_json()
return Response(
dps,
mimetype="application/json"
)
|
import argparse
import numpy as np
from hdf5zarr import HDF5Zarr
import h5py
import time
parser = argparse.ArgumentParser()
parser.add_argument('--selection', type=lambda s : eval('np.index_exp['+s+']'), default=':')
parser.add_argument('--dsetname', type=str, default='')
parser.add_argument('--url', type=str, default='')
args = parser.parse_args()
indexing = args.selection
dsetname = args.dsetname
url = args.url
start_time_file = time.time()
hf = h5py.File(url, 'r', driver='ros3')
end_time_file = time.time()
start_time_dset = time.time()
hgroup = hf[dsetname]
end_time_dset = time.time()
start_time = time.time()
arr = hgroup[indexing]
end_time = time.time()
print(f'time h5py_ros3 {end_time-start_time}')
print(f'time h5py_ros3 instantiate dataset {end_time_dset-start_time_dset}')
print(f'time h5py_ros3 instantiate file {end_time_file-start_time_file}')
|
from benchmarks.moo_bb_func import get_bb_func
from entmoot_moo.optimizer import Optimizer
# iterate through different problem names to
for bb_name in ['Battery','Windfarm']:
bb_func = get_bb_func(bb_name)
# we consider 25 different rnd states in this example
for itr in range(25):
seed = 101 + itr
# consider the windfarm example
if bb_func.name == 'Windfarm':
opt_core = bb_func.get_opt_core()
opt = Optimizer(
bb_func.bounds,
model="ENTING",
model_unc="BDD",
random_state=seed,
opt_core=opt_core
)
# generate initial points
x_init = []
for i_init in range(16):
# gets a model core with all problem constraints
temp_opt_core = bb_func.get_opt_core()
# pertubation in placement of first turbine to generate varying solutions
if i_init == 0:
import random
random.seed(seed)
pert = random.uniform(0, 0.5)
temp_opt_core.addConstr(
temp_opt_core._cont_var_dict[0] == 0.5 + pert
)
temp_opt_core.addConstr(
temp_opt_core._cont_var_dict[16] == 0.5 + pert
)
# activate different turbines at every iteration
for var in range(0, i_init + 1):
temp_opt_core.addConstr(
temp_opt_core._cont_var_dict[var + 32] == 1
)
for var in range(i_init + 1, 16):
temp_opt_core.addConstr(
temp_opt_core._cont_var_dict[var + 32] == 0
)
# solve the problem with maximized distance between points
x_init.append(
opt.ask_feas_samples(
1,
opt_core=temp_opt_core,
init_samples=x_init
)
)
elif bb_func.name == 'Battery':
opt_core = bb_func.get_opt_core()
opt = Optimizer(
bb_func.bounds,
model="ENTING",
model_unc="BDD",
random_state=seed,
opt_core=opt_core
)
# generate initial points
x_init = []
import random
random.seed(seed)
for i_init in range(10):
temp_opt_core = bb_func.get_opt_core()
# pick param set and randomized c-rate
if i_init == 0 or i_init == 5:
param_set = 0
set_ub_c_rate = 3.2
elif i_init == 1 or i_init == 6:
param_set = 1
set_ub_c_rate = 2.2
elif i_init == 2 or i_init == 7:
param_set = 2
set_ub_c_rate = 8.2
elif i_init == 3 or i_init == 8:
param_set = 3
set_ub_c_rate = 5.2
elif i_init == 4 or i_init == 9:
param_set = 4
set_ub_c_rate = 8.2
lb_c_rate = bb_func.bounds[1][0]
ub_c_rate = bb_func.bounds[1][1]
c_rate = random.uniform(
lb_c_rate, int(set_ub_c_rate))
range_c_rate = ub_c_rate - lb_c_rate
temp_opt_core.addConstr(
temp_opt_core._cat_var_dict[0][param_set] == 1
)
temp_opt_core.addConstr(
temp_opt_core._cont_var_dict[1] == \
(c_rate - lb_c_rate) / range_c_rate
)
print(f"* * * param_set: {param_set}")
print(f"* * * c_rate: {c_rate}")
x_init.append(
opt.ask_feas_samples(
1,
opt_core=temp_opt_core,
init_samples=x_init
)
)
trafo_x_init = []
for x in x_init:
x_temp = [float(xi) for xi in x]
trafo_x_init.append(x_temp)
import os
if not os.path.exists('moo_results'):
os.makedirs('moo_results')
f = open("moo_results/bb_init.json", "x")
f.write("{}")
f.close()
import json
with open("moo_results/bb_init.json") as json_file:
init_dict = json.load(json_file)
if bb_func.name not in init_dict.keys():
init_dict[bb_func.name] = {}
init_dict[bb_func.name][seed] = trafo_x_init
with open("moo_results/bb_init.json", 'w') as json_file:
json.dump(init_dict, json_file, indent=4)
print(f"... save rnd seed: {seed}")
|
"""
Script to generate random user data in RWTHOnline .csv format
Author: L. Lamm (lamm@ifam.rwth-aachen.de)
"""
from miraculix.participant import Participant
from miraculix.utils import write_csv
"""Generate list of participants"""
participant_list = Participant.generate_random_participant_list(50)
"""Arrange Data for .csv export"""
file_data = [['STUDY_PROGRAMME',
'CODE_OF_STUDY_PROGRAMME',
'Studienplan_Version',
'SPO_KKONTEXT',
'REGISTRATION_NUMBER',
'FAMILY_NAME_OF_STUDENT',
'FIRST_NAME_OF_STUDENT',
'GESCHLECHT',
'DATE_OF_ASSESSMENT',
'GUEL_U_AKTUELLE_ANTRITTE_SPO',
'GRADE',
'REMARK',
'Number_Of_The_Course',
'SEMESTER_OF_The_COURSE',
'COURSE_TITLE',
'Examiner',
'Start_Time',
'TERMIN_ORT',
'DB_Primary_Key_Of_Exam',
'DB_Primary_Key_Of_Candidate',
'COURSE_GROUP_NAME',
'FILE_REMARK',
'EMAIL_ADDRESS',
'ECTS_GRADE',
'Information']]
for participant in participant_list:
file_data.append(['Masterstudium - Musterstudiengang',
'9999 99 999',
'3000',
'[3000] Musteringenieurwesen',
str(participant.Matriculation),
str(participant.Lastname),
str(participant.Firstname),
'D',
'01.01.3000',
str(participant.NumberOfTrials),
'',
'',
'99PV99999',
'00W',
'Musterwissenschaften',
'Max Mustermann Dr.-Ing. Univ.-Prof.',
'12:00',
'BS I (2131|101), <br>BS II (2131|102)',
'999999',
'999999',
'',
'',
str(participant.Lastname) + '(at)rwth-aachen.de',
''])
"""Export data to .csv file"""
write_csv('./data/example_inputdata.csv', file_data)
|
# -*- coding: utf-8 -*-
MOCK_TEXT_1 = (
"The seething sea ceaseth and thus the seething sea sufficeth us.")
MOCK_TEXT_2 = (
"She sells sea shells on the sea shore. "
"The shells she sells are sea shells, I'm sure. "
"For if she sells sea shells on the sea shore "
"Then I'm sure she sells sea shore shells."
)
MOCK_TEXT_3 = ("I can't believe that she was a sea shell seller! "
u"—— I really can't! !!!!! ")
MOCK_SUBMISSION = {
'permalink': (u'https://www.reddit.com/r/fake/comments'
u'/000000/submission_title/'
),
'score': 100,
'author': u'fakeuser1',
'num_comments': 500,
'downs': 0,
'title': u'Submission title',
'created_utc': 1415713246.0,
'subreddit_id': u't5_000000',
'ups': 100,
'selftext': u'',
'fullname': u't3_aaaaaa',
'archived': True,
'id': u'aaaaaa'
}
MOCK_COMMENT1 = {
'body': u'This is an amazing comment. This app has excellent features!',
'submission_id': u'aaaaaa',
'name': u't1_bbbbbb',
'author': u'fakeuser1',
'downs': 0,
'created_utc': 1427062942.0,
'ups': 2,
'parent_id': u't3_aaaaaa',
'score': 2,
'id': u'bbbbbb'
}
MOCK_COMMENT2 = {
'body': u'I hate this subreddit! It is full of disgusting people.',
'submission_id': u'aaaaaa',
'name': u't1_cccccc',
'author': u'fakeuser2',
'downs': 1,
'created_utc': 1427063543.0,
'ups': -3,
'parent_id': u't3_aaaaaa',
'score': -2,
'id': u'cccccc'
}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2020 Richard Hull and contributors
# See LICENSE.rst for details.
"""
Test helpers.
"""
import platform
from pathlib import Path
import pytest
from PIL import ImageChops, ImageFont
from unittest.mock import mock_open
rpi_gpio_missing = f'RPi.GPIO is not supported on this platform: {platform.system()}'
spidev_missing = f'spidev is not supported on this platform: {platform.system()}'
def get_reference_file(fname):
"""
Get absolute path for ``fname``.
:param fname: Filename.
:type fname: str or pathlib.Path
:rtype: str
"""
return str(Path(__file__).resolve().parent.joinpath('reference', fname))
def get_reference_image(fname):
"""
:param fname: Filename.
:type fname: str or pathlib.Path
"""
return get_reference_file(Path('images').joinpath(fname))
def get_reference_font(fname, fsize=12):
"""
:param fname: Filename of the font.
:type fname: str or pathlib.Path
"""
path = get_reference_file(Path('font').joinpath(fname))
return ImageFont.truetype(path, fsize)
def get_reference_pillow_font(fname):
"""
Load :py:class:`PIL.ImageFont` type font from provided fname
:param fname: The name of the file that contains the PIL.ImageFont
:type fname: str
:rtype: :py:class:`PIL.ImageFont`
"""
path = get_reference_file(Path('font').joinpath(fname))
return ImageFont.load(path)
def get_spidev():
try:
import spidev
return spidev
except ImportError:
pytest.skip(spidev_missing)
def assert_identical_image(reference, target, img_path):
"""
:param img_path: Location of image.
:type img_path: str
"""
bbox = ImageChops.difference(reference, target).getbbox()
assert bbox is None, f'{img_path} is not identical to generated image'
def i2c_error(path_name, err_no):
expected_error = OSError()
expected_error.errno = err_no
expected_error.filename = path_name
def fake_open(a, b):
raise expected_error
return fake_open
def fib(n):
a, b = 0, 1
for _ in range(n):
yield a
a, b = b, a + b
# Attribution: https://gist.github.com/adammartinez271828/137ae25d0b817da2509c1a96ba37fc56
def multi_mock_open(*file_contents):
"""Create a mock "open" that will mock open multiple files in sequence
Args:
*file_contents ([str]): a list of file contents to be returned by open
Returns:
(MagicMock) a mock opener that will return the contents of the first
file when opened the first time, the second file when opened the
second time, etc.
"""
mock_files = [mock_open(read_data=content) for content in file_contents]
mock_opener = mock_files[-1]
mock_opener.side_effect = [mock_file.return_value for mock_file in mock_files]
return mock_opener
def skip_unsupported_platform(err):
pytest.skip(f'{type(err).__name__} ({str(err)})')
def _positional_args_list(mock):
return [call[0] for call in mock.call_args_list]
def assert_only_cleans_whats_setup(gpio):
setups = _positional_args_list(gpio.setup)
cleanups = _positional_args_list(gpio.cleanup)
for cleanup in cleanups:
assert len(cleanup) > 0, 'calling gpio.cleanup without specifying pins cleans all pins'
pins_set_up = {args[0] for args in setups}
pins_clean = {args[0] for args in setups}
assert pins_clean == pins_set_up, 'set pins {} but cleaned pins {}'.format(pins_set_up, pins_clean)
|
import collections
class Solution:
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
d = collections.defaultdict(list)
for i in strs:
d["".join(sorted(i))].append(i)
return list(d.values())
if __name__ == "__main__":
print(Solution().groupAnagrams(["eat", "tea", "tan", "ate", "nat", "bat"]))
|
from DataBases.DataBaseClass import Database
from datetime import datetime
class UserInfoDatabase(Database):
"""Stores information on users including currency, user ids, and items"""
def __init__(self, tablename='UserInformation'):
super().__init__(filename='UserInfo.db')
self.tablename = tablename
self.trycreatetable('user integer PRIMARY KEY, currency integer, itemkey integer, totalgamesplayed integer, wins integer, dailystreak integer, lastdaily TEXT')
self.useritems = {}
def cratestartinfo(self, id):
return [id, 500, self.randomString(), 0, 0, 0, self.getdatetime("""DATETIME('now', 'localtime', '-1 day')""")]
def updateuser(self, userid, **userchanges):
changestatment = self.createquerysql(userchanges, connector=', ')
self.data_navigatior.execute(f"""UPDATE {self.tablename} Set {changestatment} Where user={userid}""")
self.data.commit()
def checkadduser(self, userids):
checkedandadded = []
for id in userids:
if not(founduser := self.checkforentery(user=id)[0]):
itemkey = self.randomString()
self.useritems[itemkey] = []
new = self.cratestartinfo(id)
self.addentery(*new)
checkedandadded.append(new)
continue
checkedandadded.append(founduser)
return checkedandadded
def getitems(self, key):
return self.useritems[key]
|
# encoding=UTF-8
from __future__ import unicode_literals
from django.apps import AppConfig
class FoodConfig(AppConfig):
name = 'food'
verbose_name = '食物'
|
import json, os, sys
from pprint import pprint as print
from datetime import datetime
from datetime import date, timedelta
from collections import Counter
from collections import OrderedDict
import openpyxl
from openpyxl.worksheet.dimensions import ColumnDimension, DimensionHolder
from openpyxl.utils import get_column_letter
from openpyxl.styles import Color, PatternFill, Font, Border
from openpyxl.styles import Font
import pandas as pd
import lh3.api as lh3
try:
from home.models import UnansweredChat
from home.models import ReportMonthly
except:
pass
client = lh3.Client()
chats = client.chats()
FRENCH_QUEUES = ['algoma-fr', 'clavardez', 'laurentian-fr', 'ottawa-fr',
'saintpaul-fr', 'western-fr', 'york-glendon-fr']
SMS_QUEUES = ['carleton-txt', 'clavardez-txt', 'guelph-humber-txt',
'mcmaster-txt', 'ottawa-fr-txt', 'ottawa-txt',
'scholars-portal-txt', 'western-txt', 'york-txt']
PRACTICE_QUEUES = ['practice-webinars', 'practice-webinars-fr', 'practice-webinars-txt']
LIST_OF_HOURS = dict()
UNANSWERED_CHATS = list()
UNANSWERED_CHATS_HTML = ['<h1 align="center">UNANSWERED CHATS</h1><hr/><br/>']
def french_queues(chats):
french = list()
for chat in chats:
if chat.get('queue') in FRENCH_QUEUES:
french.append(chat)
return french
def sms_queues(chats):
sms = list()
for chat in chats:
if chat.get('queue') in SMS_QUEUES:
sms.append(chat)
return sms
def remove_practice_queues(chats_this_day):
res = [chat for chat in chats_this_day if not "practice" in chat.get("queue")]
return res
def select_specific_queues(chats_this_day, specific_queues):
res = [chat for chat in chats_this_day if specific_queues in chat.get("queue")]
return res
def get_chat_for_this_day(this_day):
day = this_day.day
year = this_day.year
month = this_day.month
all_chats = chats.list_day(year,month,day)
return all_chats
def get_daily_stats(chats_this_day, chat_not_none, today):
unanswered_chats = [chat for chat in chats_this_day if chat.get("accepted") is None]
answered_chats_nbr = len(chats_this_day)- len(unanswered_chats)
french_chats = french_queues(chat_not_none)
sms_chats = sms_queues(chat_not_none)
data = []
data.append({
'Date': today.strftime("%A, %b %d, %Y"),
'Day': today.strftime("%A"),
'Month': today.strftime("%B"),
'Year': today.year,
'Total chats': len(chats_this_day),
'Total Answered Chats': answered_chats_nbr,
'Total UnAnswered Chats': len(unanswered_chats),
'Total French Answered': len(french_chats),
'Total SMS Answered': len(sms_chats)
})
return data
def get_chat_per_hour(chat_not_none):
chat_per_hour_not_none = list()
for chat in chat_not_none:
d = datetime.strptime(chat.get('started'), "%Y-%m-%d %H:%M:%S")
chat["hour"] = d.hour
chat_per_hour_not_none.append(d.hour)
nb_chat_per_hours = dict(Counter(chat_per_hour_not_none))
sort_dic_hourly = {}
for i in sorted(nb_chat_per_hours):
sort_dic_hourly.update({i:nb_chat_per_hours[i]})
return sort_dic_hourly
def list_of_un_answered_chats(all_chats, this_day, queues):
chats_this_day = remove_practice_queues(all_chats)
chat_is_none = [chat for chat in chats_this_day if chat.get("accepted") == None]
for chat in chat_is_none:
# breakpoint()
try:
queue = [q for q in queues if q['name'] == chat.get('queue')]
url = "https://ca.libraryh3lp.com/dashboard/queues/" +str(queue[0].get('id')) +"/calls/"+ str(chat.get('guest')) + "/"+ str(chat.get('id'))
chat.update({'transcript_url':url})
UNANSWERED_CHATS.append(chat)
UNANSWERED_CHATS_HTML.append("<p>"+"<a target='_blank' href='"+ url +"'>"+chat.get('started') + "--> " + chat.get('profile') + " --> " + chat.get('protocol') + "</a>"+ "'</p>")
transcript = client.one('chats', chat.get('id')).get()['transcript'] or '<h3>No transcript found</h3>'
UNANSWERED_CHATS_HTML.append(transcript+"<hr/>")
except:
pass
return chat_is_none
def main(all_chats, this_day):
chats_this_day = remove_practice_queues(all_chats)
chat_not_none = [chat for chat in chats_this_day if chat.get("accepted") != None]
data = get_daily_stats(chats_this_day, chat_not_none, this_day)
data = data[-1]
sort_dic_hourly = get_chat_per_hour(chat_not_none)
print(data)
report = data.update(sort_dic_hourly)
LIST_OF_HOURS.update(sort_dic_hourly)
return data
#update_excel_file(data, sort_dic_hourly)
def unanswered_chats():
#print(UNANSWERED_CHATS)
df = pd.DataFrame(UNANSWERED_CHATS)
try:
del df['duration']
del df['reftracker_id']
del df['reftracker_url']
del df['desktracker_id']
del df['desktracker_url']
del df['wait']
del df['referrer']
del df['ip']
del df['accepted']
except:
print("error on deleting columns")
df['started'] = pd.to_datetime(df['started'])
df['ended'] = pd.to_datetime(df['ended'])
df["started_time"] = df['started'].apply(lambda x:x.time())
df["ended_time"] = None#df['ended'].apply(lambda x:x.time())
del df['ended']
df["guest"] = df['guest'].apply(lambda x:x[0:7])
df['shift'] =df['started'].dt.hour
cols = ['id', 'guest', 'protocol', 'started', "started_time" ,'shift' ,
'queue','operator', "ended_time", 'profile', 'transcript_url']
df = df[cols]
df.sort_values(by=['id'])
return df
def save_un_into_file(df):
df.to_excel("UNANSWERED_CHATS.xlsx", index=False)
try:
os.remove("unanswered_chats.html")
except:
pass
for val in UNANSWERED_CHATS_HTML:
with open("unanswered_chats.html", "a", encoding="utf-8") as f:
f.write(val)
def find_data_for_report(today=datetime.now(), specific_queues=None):
queues = client.all('queues').get_list()
month = 2021#today.month
year = 9#today.year
day = 25#today.day
d0 = date(2021, 10, 12)
d1 = date(today.year, today.month, today.day)
delta = d1 - d0
report = list()
for loop_day in range(1, delta.days):
all_chats = chats.list_day(d0.year,d0.month, d0.day)
#all_chats = get_chat_for_this_day(date(year, month, loop_day))
if specific_queues:
all_chats = select_specific_queues(all_chats, specific_queues)
report.append(main(all_chats, date(d0.year,d0.month, d0.day)))
list_of_un_answered_chats(all_chats, date(d0.year,d0.month, d0.day), queues)
d0 += timedelta(days=1)
return report
def save_unanswered_chat_into_db():
today = datetime.now()
found_object = UnansweredChat.objects.filter(started__month=today.month, started_year=today.year)
if found_object:
found_object.content = UNANSWERED_CHATS_HTML
found_object.save()
else:
UnansweredChat.objects.create(content=UNANSWERED_CHATS_HTML)
def save_daily_report_into_db(df):
today = datetime.now()
found_object = ReportMonthly.objects.filter(started__month=today.month, started_year=today.year)
if found_object:
found_object.content = df.astype(str)
found_object.save()
else:
ReportMonthly.objects.create(content= df.astype(str))
def real_report():
report = find_data_for_report(today=datetime.now(), specific_queues="york")
df = pd.DataFrame(report)
sorted_hours = sorted(LIST_OF_HOURS.keys())
cols = ['Date','Day', 'Month', 'Year',
'Total chats',
'Total Answered Chats',
'Total UnAnswered Chats',
'Total French Answered',
'Total SMS Answered',
]
cols = cols + (sorted_hours)
# print(cols)
df = df[cols]
df.fillna(0, inplace=True)
for hour in sorted_hours:
if isinstance(hour, int):
df = df.rename(columns={hour: str(hour) + ":00:00"})
filename = "daily.xlsx"
df.to_excel(filename, index=False)
try:
#save unanswered Chats into DB with timestamps
save_unanswered_chat_into_db()
#Save Report DF into DB with timestamps
save_daily_report_into_db(df)
except:
pass
def columns_best_fit(ws: openpyxl.worksheet.worksheet.Worksheet) -> None:
"""
Make all columns best fit
"""
column_letters = tuple(openpyxl.utils.get_column_letter(col_number + 1) for col_number in range(ws.max_column))
for column_letter in column_letters:
ws.column_dimensions[column_letter].bestFit = True
def resize_column():
wb = openpyxl.load_workbook("daily.xlsx")
ws = wb["Sheet1"]
#color cell background
#https://openpyxl.readthedocs.io/en/stable/styles.html
dark_orange_fill = PatternFill(
start_color='00FFCC00',
end_color='00FFCC00',
fill_type='solid')
light_blue_fill = PatternFill(
start_color='00CCFFFF',
end_color='00CCFFFF',
fill_type='solid')
light_yellow_fill = PatternFill(
start_color='00FFFF99',
end_color='00FFFF99',
fill_type='solid')
ws['F1'].fill = light_yellow_fill
ws['H1'].fill = light_yellow_fill
ws['I1'].fill = light_yellow_fill
ws.column_dimensions["E"].width = 12
ws.column_dimensions["F"].width = 24
ws.column_dimensions["G"].width = 24
ws.column_dimensions["H"].width = 24
ws.column_dimensions["I"].width = 24
wb.save("daily.xlsx")
if __name__ == '__main__':
real_report()
resize_column()
"""
Saved to Django Database
# https://stackoverflow.com/questions/37688054/saving-a-pandas-dataframe-to-a-django-model
json_list = json.loads(json.dumps(list(df.T.to_dict().values())))
for dic in json_list:
HistoricalPrices.objects.get_or_create(**dic)
"""
|
"""Document fields."""
import abc
import re
from datetime import datetime
from decimal import Decimal
import bson.errors
from bson import ObjectId, Decimal128
from aioodm.errors import ValidationError, StopValidation
from aioodm.utils import _Empty, import_class
__all__ = ['AnyField', 'StrField', 'EmailField', 'IntField',
'FloatField', 'DecimalField', 'DateTimeField',
'EmbDocField', 'ListField', 'RefField', 'SynonymField',
'ObjectIdField']
class Field(abc.ABC):
"""Base class for all fields.
Attributes:
name (str): Name of the field.
mongo_name (str): Name of the field in mongodb.
required (bool): Is field required.
allow_none (bool): Can field be assigned with ``None``.
default: Default value for field.
choices (dict, set): Dict or set of choices for a field. If it is a
``dict`` keys are used as choices.
"""
def __init__(self, *, required=True, default=_Empty, mongo_name=None,
name=None, allow_none=False, choices=None, field_type=None,
before_set=None, after_get=None):
"""Create field.
Args:
required (bool): Is field required. Defaults to ``True``.
default: Default value for a field. When document has no value for
field in ``__init__`` it try to use default value (if it is
not ``_Empty``). Defaults to ``_Empty``.
.. note::
Default value is ignored if field is not required.
.. note::
Default can be a value or a callable with no arguments.
mongo_name (str): Name of the field in MongoDB.
Defaults to ``None``.
.. note::
If ``mongo_name`` is None it is set to ``name`` of the
field.
name (str): Name of the field. Should not be used explicitly as
it is set by metaclass. Defaults to ``None``.
allow_none (bool): Can field be assign with ``None``. Defaults
to ``False``.
choices (dict, set): Possible values for field. If it is a
``dict``, keys should be possible values. To preserve values
order use ``collections.OrderedDict``. Defaults to ``None``.
before_set (func(value) => value): A function to apply before a set operation.
after_get (func(value) => value): A function to apply after a get operation.
.. note::
If ``choices`` are given then other constraints are ignored.
"""
self.field_type = field_type
self.mongo_name = mongo_name
self.name = name
self.required = required
self.allow_none = allow_none
self._default = default
self.before_set = before_set
self.after_get = after_get
if choices is None or isinstance(choices, dict):
self.choices = choices
else:
self.choices = set(choices)
self.validators = [self._validate_none,
self._validate_type]
if self.choices is not None:
self.validators.append(self._validate_choices)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
if self.mongo_name is None:
self.mongo_name = value
@property
def default(self):
try:
return self._default()
except TypeError: # is not callable
return self._default
def __get__(self, instance, instance_type):
if instance is None:
return self
try:
value = instance._data[self.name]
except KeyError:
# TODO: should we try to return default here?
value = None
return self.after_get(value) if self.after_get else value
def __set__(self, instance, value):
instance._data[self.name] = self.from_data(
self.before_set(value) if self.before_set else value)
def to_mongo(self, value):
"""Convert value to mongo format."""
return value
def from_mongo(self, value):
"""Convert value from mongo format to python field format."""
return value
def from_data(self, value):
"""Convert value from user provided data to field type.
Args:
value: Value provided by user.
Returns:
Converted value or value as is if error occured. If value is
``None`` return ``None``.
"""
try:
return None if value is None else self.field_type(value)
except (ValueError, TypeError):
return value
@property
def s(self):
"""Return mongodb name of the field.
This property can be used wherever mongodb field's name is required.
Example:
.. code-block:: python
User.q(db).find({User.name.s: 'Francesco', User.is_admin.s: True},
{User.posts.s: 1, User._id.s: 0})
.. note::
Field's ``name`` and ``mongo_name`` could be different so
``User.is_admin.s`` could be for example ``'isadm'``.
"""
return self.mongo_name
def _validate_none(self, value):
if value is None:
if self.allow_none:
raise StopValidation()
raise ValidationError('none value is not allowed')
def _validate_type(self, value):
if not isinstance(value, self.field_type):
raise ValidationError('invalid value type')
def _validate_choices(self, value):
if value in self.choices:
raise StopValidation()
raise ValidationError("value does not match any variant")
def validate(self, value):
try:
for func in self.validators:
func(value)
except StopValidation:
return
class AnyField(Field):
"""Any type field.
Can store any type of value. Store a value as is.
It's up to developer if a value can be stored in mongodb.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.validators = [self._validate_none]
if self.choices is not None:
self.validators.append(self._validate_choices)
def from_data(self, value):
return value
class StrField(Field):
"""String field."""
def __init__(self, *, regex=None, allow_blank=False,
min_length=None, max_length=None, **kwargs):
"""Create string field.
Args:
regex (str): Regular expression for field's values.
Defaults to ``None``.
allow_blank (bool): Can field be assigned with blank string.
Defaults to ``False``.
min_length (int): Minimum length of field's values.
Defaults to ``None``.
max_length (int): Maximum length of field's values.
Defaults to ``None``.
**kwargs: Other arguments from ``Field``.
"""
super().__init__(field_type=str, **kwargs)
self.regex = re.compile(regex) if isinstance(regex, str) else regex
self.allow_blank = allow_blank
self.min_length = min_length
self.max_length = max_length
if self.regex is not None:
self.validators.append(self._validate_regex)
self.validators.append(self._validate_blank)
if self.min_length:
self.validators.append(self._validate_min_length)
if self.max_length is not None:
self.validators.append(self._validate_max_length)
def _validate_max_length(self, value):
if len(value) > self.max_length:
raise ValidationError('length is greater than {constraint}',
constraint=self.max_length)
def _validate_min_length(self, value):
if len(value) < self.min_length:
raise ValidationError('length is less than {constraint}',
constraint=self.min_length)
def _validate_blank(self, value):
if value == '':
if self.allow_blank:
raise StopValidation()
raise ValidationError('blank value is not allowed')
def _validate_regex(self, value):
if not self.regex.match(value):
raise ValidationError(
'value does not match pattern {constraint}',
constraint=self.regex.pattern)
class BoolField(Field):
"""Boolean field."""
def __init__(self, **kwargs):
super().__init__(field_type=bool, **kwargs)
class NumberField(Field, metaclass=abc.ABCMeta):
"""Base class for number fields."""
def __init__(self, *, gte=None, lte=None, gt=None, lt=None, **kwargs):
"""Create number field.
Args:
gte: Greater than or equal limit. Defaults to ``None``.
lte: Less than or equal limit. Defaults to ``None``.
gt: Greater than limit. Defaults to ``None``.
lt: Less than limit. Defaults to ``None``.
**kwargs: Other arguments from ``Field``.
"""
super().__init__(**kwargs)
self.gte = gte
self.lte = lte
self.gt = gt
self.lt = lt
if gte is not None:
self.validators.append(self._validate_gte)
if lte is not None:
self.validators.append(self._validate_lte)
if gt is not None:
self.validators.append(self._validate_gt)
if lt is not None:
self.validators.append(self._validate_lt)
def _validate_gte(self, value):
if value < self.gte:
raise ValidationError('value is less than {constraint}',
constraint=self.gte)
def _validate_lte(self, value):
if value > self.lte:
raise ValidationError('value is greater than {constraint}',
constraint=self.lte)
def _validate_gt(self, value):
if value <= self.gt:
raise ValidationError('value should be greater than {constraint}',
constraint=self.gt)
def _validate_lt(self, value):
if value >= self.lt:
raise ValidationError('value should be less than {constraint}',
constraint=self.lt)
class IntField(NumberField):
"""Integer field."""
def __init__(self, **kwargs):
"""Create int field."""
super().__init__(field_type=int, **kwargs)
class FloatField(NumberField):
"""Float field."""
def __init__(self, **kwargs):
"""Create float field."""
super().__init__(field_type=float, **kwargs)
class DateTimeField(Field):
"""Date and time field based on datetime.datetime."""
def __init__(self, **kwargs):
super().__init__(field_type=datetime, **kwargs)
def from_data(self, value):
# Mongo only has millisecond accuracy.
return value.replace(microsecond=round(value.microsecond, -3)) if value else value
class ObjectIdField(Field):
"""ObjectId field."""
def __init__(self, **kwargs):
super().__init__(field_type=ObjectId, **kwargs)
def from_data(self, value):
"""Convert value to ObjectId.
Args:
value (ObjectId, str): ObjectId value or 24-character hex string.
Returns:
None or ObjectId value. If value is not ObjectId and can't
be converted return as is.
"""
if value is None or isinstance(value, ObjectId):
return value
try:
return ObjectId(value)
except (bson.errors.InvalidId, TypeError):
return value
class CompoundFieldNameBuilder:
"""Helper class to encapsulate compound name join."""
__slots__ = ['_obj', '_prefix']
def __init__(self, obj, prefix):
self._obj = obj
self._prefix = prefix
def __getattr__(self, name):
document_class = getattr(self._obj, 'document_class', None)
if not document_class:
raise AttributeError(
"'{0}' has no attribute {1}".format(
self._obj.__class__.__name__, name))
return CompoundFieldNameBuilder(getattr(self._obj, name),
self._prefix)
@property
def s(self):
return self._prefix + '.' + self._obj.s
class CompoundField(Field):
"""Base class for complex fields.
This class should be base for embedded document fields or list fields
which could contain embedded documents as their elements.
This class makes it possible to build a complex fields name using
attribute syntax and `s` property, i.e.:
.. code-block:: python
assert Comment.author.name.s == 'author.name'
assert Article.tags._id.s == 'tags._id'
assert Hotel.rooms.category.s == 'rooms.category'
assert Hotel.rooms.category.name.s == 'rooms.category.name'
so you can use them to build queries:
.. code-block:: python
Hotel.q(db).find({Hotel.rooms.category.name.s: 'Lux'})
"""
def __init__(self, document_class, base_document_class, **kwargs):
if (isinstance(document_class, str) or
document_class is None
or issubclass(document_class, base_document_class)):
self._document_class = document_class
else:
raise TypeError(
("document_class should be a "
"subclass of '{0}' or str, not a '{1}'").format(
base_document_class, document_class))
self._base_document_class = base_document_class
super().__init__(**kwargs)
@property
def document_class(self):
if isinstance(self._document_class, str):
self._document_class = import_class(self._document_class)
if not issubclass(self._document_class, self._base_document_class):
raise TypeError(
("document_class should be a "
"subclass of '{0}', not a '{1}'").format(
self._base_document_class, self._document_class))
return self._document_class
def __getattr__(self, name):
if self.document_class is None:
raise AttributeError(
"'{0}' has no attribute '{1}'".format(
self.__class__.__name__, name))
return CompoundFieldNameBuilder(
getattr(self.document_class, name), self.mongo_name)
class EmbDocField(CompoundField):
"""Embedded Document Field."""
def __init__(self, document_class, **kwargs):
"""Create Embedded Document field.
Args:
document_class: A subclass of the
``aioodm.EmbeddedDocument`` class or string with
absolute path to such class.
**kwargs: Other arguments from ``Field``.
"""
EmbeddedDocument = import_class('aioodm.EmbeddedDocument')
super().__init__(document_class, EmbeddedDocument, **kwargs)
self.validators.append(lambda value: value.validate())
def validate(self, value):
self.field_type = self.document_class
super().validate(value)
def to_mongo(self, value):
if value is None:
return None
return value.to_mongo()
def from_mongo(self, value):
if value is None:
return None
return self.document_class.from_mongo(value)
def from_data(self, value):
if value is None or isinstance(value, self.document_class):
return value
try:
return self.document_class.from_data(value)
except (TypeError, ValueError):
return value
class ListField(CompoundField):
"""List field."""
def __init__(self, item_field, *,
min_length=None, max_length=None, **kwargs):
"""Create List field.
Args:
item_field (Field): Instance of the field to reflect list
items' type.
min_length (int): Minimum length of the list. Defaults to ``None``.
max_length (int): Maximum length of the list. Defaults to ``None``.
**kwargs: Other arguments from ``Field``.
Raises:
TypeError: If item_field is not instance of the ``Field`` subclass.
"""
if not isinstance(item_field, Field):
raise TypeError(
('item_field should be an instance of the `Field` '
'subclass, not of the `{0}`').format(type(item_field)))
EmbeddedDocument = import_class('aioodm.EmbeddedDocument')
document_class, base_document_class = (
(item_field._document_class, EmbeddedDocument)
if isinstance(item_field, EmbDocField)
else (None, None))
super().__init__(document_class, base_document_class,
field_type=list, **kwargs)
self.item_field = item_field
self.min_length = min_length
self.max_length = max_length
if min_length is not None:
self.validators.append(self._validate_min_length)
if max_length is not None:
self.validators.append(self._validate_max_length)
self.validators.append(self._validate_items)
def _validate_min_length(self, value):
if len(value) < self.min_length:
raise ValidationError('list length is less than {constraint}',
constraint=self.min_length)
def _validate_max_length(self, value):
if len(value) > self.max_length:
raise ValidationError('list length is greater than {constraint}',
constraint=self.max_length)
def _validate_items(self, value):
errors = {}
for index, item in enumerate(value):
try:
self.item_field.validate(item)
except ValidationError as e:
errors[index] = e
if errors:
raise ValidationError(errors)
def to_mongo(self, value):
if value is None:
return None
return [self.item_field.to_mongo(item) for item in value]
def from_mongo(self, value):
if value is None:
return None
return [self.item_field.from_mongo(item) for item in value]
def from_data(self, value):
# if value not a list just return as is as well as None
if value is None or not isinstance(value, list):
return value
return [self.item_field.from_data(item) for item in value]
class RefField(CompoundField):
"""Reference field."""
def __init__(self, document_class, **kwargs):
"""Create Reference field.
Args:
document_class: A subclass of the ``aioodm.Document`` class
or string with absolute path to such class.
**kwargs: Other arguments from ``Field``.
"""
Document = import_class('aioodm.Document')
super().__init__(document_class, Document, **kwargs)
self.validators = [self._validate_none, self._validate_ref]
def _validate_ref(self, value):
# ref value could be reference instance
_id = value._id if isinstance(value, self.document_class) else value
self.document_class._id.validate(_id)
def to_mongo(self, value):
if isinstance(value, self.document_class):
return self.document_class._id.to_mongo(value._id)
return self.document_class._id.to_mongo(value)
def from_mongo(self, value):
return self.document_class._id.from_mongo(value)
def from_data(self, value):
if isinstance(value, self.document_class):
return value
return self.document_class._id.from_data(value)
class EmailField(StrField):
"""Email field."""
EMAIL_REGEX = re.compile(
r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$')
def __init__(self, *, regex=EMAIL_REGEX, **kwargs):
"""Create Email field.
Args:
regex (str, re.regex): Pattern for email address.
**kwargs: Other arguments from ``Field`` and ``StrField``.
"""
super().__init__(regex=regex, **kwargs)
def _validate_regex(self, value):
try:
super()._validate_regex(value)
except ValidationError:
raise ValidationError('value is not a valid email address')
class DecimalField(NumberField):
"""Decimal number field.
This field can be used only with MongoDB 3.4+.
"""
def __init__(self, **kwargs):
"""Create Decimal field."""
super().__init__(field_type=Decimal, **kwargs)
def to_mongo(self, value):
if value is None:
return None
return Decimal128(value)
def from_mongo(self, value):
if value is None:
return None
return value.to_decimal()
class SynonymField(object):
"""Create synonym name for real field."""
def __init__(self, original_field):
"""Create synonym for real document's field.
Args:
original_field: Field instance or string name of field.
Example:
.. code-block:: python
class Doc(Document):
_id = StrField()
name = SynonymField(_id)
class OtherDoc(Document):
# _id field will be added automaticly.
obj_id = SynonymField('_id')
"""
self._original_field = original_field
def __get__(self, instance, instance_type):
if not instance:
return instance_type.meta.fields[self.original_field_name]
return getattr(instance, self.original_field_name)
def __set__(self, instance, value):
setattr(instance, self.original_field_name, value)
@property
def original_field_name(self):
try:
return self._original_field.name
except AttributeError: # original field is a string name of the field
return self._original_field
|
# Players management module
#
import hashlib
import stores
from matchupsv2 import now
salt = 'superhero'
def userhash(name):
hash = hashlib.sha256()
hash.update(salt + name)
return hash.hexdigest()
def pswhash(name, psw):
hash = hashlib.sha256()
hash.update(salt + name + psw)
return hash.hexdigest()
def pswcheck(player, psw):
players = stores.get().restore('players', 1)
hname = userhash(player)
if hname not in players:
return False
hpsw = pswhash(player, psw)
return hpsw == players[hname]['psw']
def restore_db():
players = stores.get().restore('players', 1)
if players == '':
players = {}
return players
def store_db(players):
return stores.get().store('players', 1, players)
# add players
def add(name, psw, email='', admin=False):
players = restore_db()
hname = userhash(name)
if hname in players:
return False
hpsw = pswhash(name, psw)
players[hname] = {'name': name, 'psw': hpsw, 'email': email, 'admin': admin}
# Store in DB
return store_db(players)
# remove players
def remove(player):
players = restore_db()
hname = userhash(player)
if hname not in players:
return False
del players[hname]
# Store in DB
return store_db(players)
def change_email(player, email):
players = restore_db()
hname = userhash(player)
if hname not in players:
return False
players[hname]['email'] = email
# Store in DB
return store_db(players)
def change_psw(player, old, new, admin=False):
players = restore_db()
hname = userhash(player)
if hname not in players:
return False
if not admin and not pswcheck(player, old):
return False
hpsw = pswhash(player, new)
players[hname]['psw'] = hpsw
# Store in DB
return store_db(players)
def update_last_login(player):
players = restore_db()
hname = userhash(player)
if hname not in players:
return False
players[hname]['last_login'] = now().strftime("%Y-%m-%dT%H:%M:%SZ")
# Store in DB
return store_db(players)
def get_all_admin():
players = restore_db()
l = list(players.items())
result = []
for player in l:
p = player[1].copy()
del p['psw']
p['id'] = player[0]
result.append(p)
return result
def get_all():
players = restore_db()
l = list(players.values())
result = []
for player in l:
p = player.copy()
del p['psw']
result.append(p)
return result
def get(player):
players = restore_db()
hname = userhash(player)
if hname not in players:
return None
p = players[hname].copy()
del p['psw']
return p
def is_valid_player(hplayer):
players = restore_db()
return hplayer in players
def login(player, psw):
players = restore_db()
hname = userhash(player)
if player == 'guest':
return hname
if hname not in players:
return None
if not pswcheck(player, psw):
return None
update_last_login(player)
return hname
def root_access(psw):
root_psw = 'e32eb9019022b9f62627900fb92c2eb8ef315010710fa16ba565d0d8b90da18e'
hpsw = pswhash('root', psw)
if hpsw == root_psw:
return True
return False
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord, ICRS, Galactic, FK4, FK5, Longitude
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree
# These tests are cribbed directly from the Examples section of
# http://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html
def test_scalar_skycoord(tmpdir):
c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_vector_skycoord(tmpdir):
c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_fk4(tmpdir):
coords = ["1:12:43.2 +1:12:43", "1 12 43.2 +1 12 43"]
c = SkyCoord(coords, frame=FK4, unit=(u.deg, u.hourangle), obstime="J1992.21")
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize('coord', [
SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic), # Units from string
SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
])
def test_skycoord_galactic(coord, tmpdir):
tree = dict(coord=coord)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_ra_dec(tmpdir):
ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
c = SkyCoord(ra, dec, frame='icrs')
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_override_defaults(tmpdir):
c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_cartesian(tmpdir):
c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
representation_type='cartesian')
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_vector_frames(tmpdir):
c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason='Velocities are not properly serialized yet')
def test_skycoord_radial_velocity(tmpdir):
c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason='Velocities are not properly serialized yet')
def test_skycoord_proper_motion(tmpdir):
c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr,
pm_dec=1*u.mas/u.yr)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.skip(reason='Apparent loss of precision during serialization')
def test_skycoord_extra_attribute(tmpdir):
sc = SkyCoord(10*u.deg, 20*u.deg, equinox="2011-01-01T00:00", frame="fk4")
tree = dict(coord=sc.transform_to("icrs"))
def check_asdf(asdffile):
assert hasattr(asdffile['coord'], 'equinox')
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check_asdf)
|
"""
This program takes in the a url to an index.m3u8 file
and it will return a modified version of that file
that vlc media player can use
"""
import sys
import requests
def outputBuffer(buffer, outputFile):
buffer = buffer + '\n'
outputFile.write(buffer)
buffer = ""
return buffer
if __name__ == "__main__":
if len(sys.argv) == 2:
url = sys.argv[1]
fileToModify = requests.get(url).text #Save the Index.m3u8 into memory
urlSliceEnd = url.find("index.m3u8")
modifiedURL = url[0:(urlSliceEnd -1)]
outputFileLocation = "./Output.m3u8"
with open(outputFileLocation, "w") as outputFile:
buffer = ""
for character in fileToModify:
if character != '\n':
buffer = buffer + character
else:
if buffer[0] != '#': #This means that we are looking at a link and we need to add the url
buffer = modifiedURL + '/' + buffer
buffer = outputBuffer(buffer, outputFile)
else: #This means the buffer has a comment and we can just add it
buffer = outputBuffer(buffer, outputFile)
else:
print("invalid amount of arguments (Should be 1) (0 through 1)")
argumentNumber = 0
for argument in sys.argv:
print(f"{argumentNumber}, {argument}")
argumentNumber += 1
|
import time
import random
def timeit(func, *args):
t1 = time.time()
ret = func(*args)
cost_time = (time.time() - t1) * 1000
print("cost time: %sms" % cost_time)
try:
randint_wrap = random.randint
except:
# micropython
def randint_wrap(a, b):
return a + random.getrandbits(32) % (b-a)
def rand_int():
return randint_wrap(1, 100)
def test_add(n):
print("test_add: n=%d" % n)
for i in range(n):
a = rand_int() + rand_int()
return a
def test_sub(n):
print("test_sub: n=%d" % n)
for i in range(n):
a = rand_int() - rand_int()
return a
def test_mul(n):
print("test_mul: n=%d" % n)
for i in range(n):
a = rand_int() * rand_int()
return a
def test_div(n):
print("test_div: n=%d" % n)
for i in range(n):
a = rand_int() / rand_int()
return a
def test_modulo(n):
print("test_modulo: n=%d" % n)
for i in range(n):
a = rand_int() % rand_int()
return a
timeit(test_add, 100000)
timeit(test_sub, 100000)
timeit(test_mul, 100000)
timeit(test_div, 100000)
timeit(test_modulo, 100000) |
# Sneek peek on the results.
# Importing sentiment_mod module and then testing on some feeds and statements.
import sentiment_mod as senti
print(senti.sentiment("He is an incapable person. His projects are totally senseless."))
print(senti.sentiment("This movie was awesome! The acting was great, plot was wonderful !"))
print(senti.sentiment("This movie was utter junk.. I don't see what the point was at all. Horrible movie, 0/10"))
print(senti.sentiment("He is a freak."))
print(senti.sentiment("Movie was nice. Actors did very well. All together a nice experience."))
print(senti.sentiment("Are you fucking mad ?"))
print(senti.sentiment("You are dumb."))
|
from typing import Optional
import streamlit as st
import requests
import json
import spacy_streamlit
from spacy_streamlit.util import get_svg
from spacy import displacy
spacy_model = "en_core_web_sm"
def main():
# Applying styles to the buttons
st.markdown("""<style>
.st-eb {
background-color:#F9786F
} </style>""", unsafe_allow_html=True)
# Heading
st.header("Text Sentiment Analysis")
st.sidebar.title("TxT")
task = st.sidebar.selectbox("Choose Task: ", ("Sentiment Analysis", "Summarization", "Paraphrase"))
# Text area for user input
user_input = st.text_area("Enter your text here", "", height=200)
if(task == "Sentiment Analysis"):
if(st.button("Analyze")):
with st.spinner('Analyze Text'):
doc = spacy_streamlit.process_text(spacy_model, user_input)
visual_pos(doc)
output = forward_sentimentAnalysis(user_input)
st.header("Sentiment Analysis")
st.write(output)
pass
elif(task == "Summarization"):
if(st.button("Summarize")):
with st.spinner('Summarizing Text'):
doc = spacy_streamlit.process_text(spacy_model, user_input)
output = forward_summarization(user_input)
st.header("Summarized Text")
st.write(output)
elif(task == "Paraphrase"):
if(st.button("Paraphrase")):
with st.spinner('Paraphrasing Text'):
doc = spacy_streamlit.process_text(spacy_model, user_input)
output = forward_paraphrase(user_input)
st.header("Paraphrased Results")
st.write(output)
def forward_sentimentAnalysis(sentence):
# Making the request to the backend
headers = {"content-type": "application/json"}
r = requests.post("http://127.0.0.1:5000/run_forward", headers=headers,
data=json.dumps({'sentence': sentence}))
data = r.json()
return data["data"]
def forward_summarization(sentence):
# Making the request to the backend
headers = {"content-type": "application/json"}
r = requests.post("http://127.0.0.1:5000/run_forward_summarizer", headers=headers,
data=json.dumps({'sentence': sentence}))
data = r.json()
return data["data"]
def forward_paraphrase(sentence):
# Making the request to the backend
headers = {"content-type": "application/json"}
r = requests.post("http://127.0.0.1:5000/run_forward_paraphrase", headers=headers,
data=json.dumps({'sentence': sentence}))
data = r.json()
return data["data"]
def visual_pos(doc, title: Optional[str] = "Dependency Parse & Part-of-speech tags"):
if title:
st.header(title)
docs = [span.as_doc() for span in doc.sents]
for sent in docs:
html = displacy.render(sent, style="dep")
html = html.replace("\n\n", "\n")
st.write(get_svg(html), unsafe_allow_html=True)
if __name__ == "__main__":
main() |
from filelist import filelist
def functionlist(file_path):
with open(file_path) as fp:
file_lines = fp.read().splitlines()
return [(line, index) for index,line in enumerate(file_lines) if line.split(' ', 1)[0] == 'function']
def list_functionlist(list_file_path):
return [(file_path, functionlist(file_path))for file_path in list_file_path]
if __name__ == "__main__":
list = list_functionlist(filelist('/home/dullin/dev/files'))
print(list) |
import argparse
import os
import json
import numpy
import PIL.Image
from . import pidfile, tally, nethook, zdataset
from . import upsample, imgviz, imgsave, proggan, segmenter
def main():
parser = argparse.ArgumentParser(description='quickdissect')
parser.add_argument('--outdir', type=str, default='results')
parser.add_argument('--model', type=str, default='church')
parser.add_argument('--layer', type=str, default='layer4')
parser.add_argument('--seg', type=str, default='netpqc')
parser.add_argument('--sample_size', type=int, default=1000)
args = parser.parse_args()
resfn = pidfile.exclusive_dirfn(
args.outdir, args.model, args.layer, args.seg, str(args.sample_size))
import torch
torch.backends.cudnn.profile = True
model = nethook.InstrumentedModel(
proggan.load_pretrained(args.model)).cuda()
model.retain_layer(args.layer)
zds = zdataset.z_dataset_for_model(model, size=args.sample_size, seed=1)
model(zds[0][0][None].cuda())
sample_act = model.retained_layer(args.layer).cpu()
upfn = upsample.upsampler((64, 64), sample_act.shape[2:])
def flat_acts(zbatch):
_ = model(zbatch.cuda())
acts = upfn(model.retained_layer(args.layer))
return acts.permute(0, 2, 3, 1).contiguous().view(-1, acts.shape[1])
rq = tally.tally_quantile(flat_acts, zds, cachefile=resfn('rq.npz'))
level_at_cutoff = rq.quantiles(0.99)[None, :, None, None].cuda()
segmodel, seglabels = segmenter.load_segmenter(args.seg)
def compute_cond_indicator(zbatch):
image_batch = model(zbatch.cuda())
seg = segmodel.segment_batch(image_batch, downsample=4)
acts = upfn(model.retained_layer(args.layer))
iacts = (acts > level_at_cutoff).float()
return tally.conditional_samples(iacts, seg)
cmv = tally.tally_conditional_quantile(compute_cond_indicator, zds,
cachefile=resfn('cmv.npz'), pin_memory=True)
iou_table = tally.iou_from_conditional_indicator_mean(cmv).permute(1, 0)
numpy.save(resfn('iou.npy'), iou_table.numpy())
unit_list = enumerate(zip(*iou_table.max(1)))
unit_records = {
'units': [{
'unit': unit,
'iou': iou.item(),
'label': seglabels[segc],
'cls': segc.item()
} for unit, (iou, segc) in unit_list]
}
with open(resfn('labels.json'), 'w') as f:
json.dump(unit_records, f)
with open(resfn('seglabels.json'), 'w') as f:
json.dump(seglabels, f)
def compute_image_max(zbatch):
image_batch = model(zbatch.cuda())
return model.retained_layer(args.layer).max(3)[0].max(2)[0]
topk = tally.tally_topk(compute_image_max, zds,
cachefile=resfn('topk.npz'))
def compute_acts(zbatch):
image_batch = model(zbatch.cuda())
acts_batch = model.retained_layer(args.layer)
return (acts_batch, image_batch)
iv = imgviz.ImageVisualizer(128, quantiles=rq)
unit_images = iv.masked_images_for_topk(compute_acts, zds, topk, k=5)
imgsave.save_image_set(unit_images, resfn('imgs/unit_%d.png'))
pidfile.mark_job_done(resfn.dir)
if __name__ == '__main__':
main()
class DissectVis:
'''
Code to read out the dissection computed in the program above.
'''
def __init__(self, outdir='results', model='church', layers=None,
seg='netpqc', sample_size=1000):
if not layers:
layers = ['layer%d' % i for i in range(1, 15)]
basedir = 'results/church'
setting = 'netpqc/1000'
labels = {}
iou = {}
images = {}
for k in layers:
dirname = os.path.join(outdir, model, k, seg, str(sample_size))
with open(os.path.join(dirname, 'labels.json')) as f:
labels[k] = json.load(f)['units']
iou[k] = numpy.load(os.path.join(dirname, 'iou.npy'))
images[k] = [None] * len(iou[k])
with open(os.path.join(dirname, 'seglabels.json')) as f:
self.seglabels = json.load(f)
self.labels = labels
self.ioutable = iou
self.images = images
self.basedir = os.path.join(outdir, model)
self.setting = os.path.join(seg, str(sample_size))
def label(self, layer, unit):
return self.labels[layer][unit]['label']
def iou(self, layer, unit):
return self.labels[layer][unit]['iou']
def top_units(self, layer, seglabel, k=20):
return self.ioutable[layer][:, self.seglabels.index(seglabel)
].argsort()[::-1][:k].tolist()
def image(self, layer, unit):
result = self.images[layer][unit]
# Lazy loading of images.
if result is None:
result = PIL.Image.open(os.path.join(
self.basedir, layer,
self.setting, 'imgs/unit_%d.png' % unit))
result.load()
self.images[layer][unit] = result
return result
|
from PyQt5 import uic, QtWidgets
from datetime import date
import win32com.client as win32
from openpyxl import Workbook
import pandas as pd
class SistemaGeral:
### INICIO
def __init__(self):
### SETUP LAYOUT APP
# Chamando sistema Layout
app = QtWidgets.QApplication([])
# Layout's
self.tela_login = uic.loadUi('./pyqt5-templates/login.ui')
self.tela_geral = uic.loadUi('./pyqt5-templates/geral.ui')
self.tela_projetos = uic.loadUi('./pyqt5-templates/projetos.ui')
self.tela_email = uic.loadUi('./pyqt5-templates/email.ui')
self.tela_calculo = uic.loadUi('./pyqt5-templates/calculo.ui')
self.tela_analise = uic.loadUi('./pyqt5-templates/analise.ui')
# Layouts's Projetos
self.tela_projetos_resumo = uic.loadUi('./pyqt5-templates/projetos-resumo.ui')
# Executar funções ao chamar
### TELA LOGIN BOTOES
self.tela_login.btn_login.clicked.connect(self.logar_login) # botao logar
self.tela_login.btn_cadastrar.clicked.connect(self.cadastrar_login) # botao cadastrar
### TELA GERAL BOTOES
### Linha 1
self.tela_geral.btn_projetos.clicked.connect(self.projetos_geral) # botao projetos
self.tela_geral.btn_email.clicked.connect(self.email_geral) # botao email
self.tela_geral.btn_calc.clicked.connect(self.calculo_geral) # botao calculo
### Linha 2
self.tela_geral.btn_analise.clicked.connect(self.analise_geral) # botao analise
self.tela_geral.btn_voltar.clicked.connect(self.voltar_geral) # botao voltar
### TELA PROJETOS BOTOES
self.tela_projetos.btn_resumo.clicked.connect(self.resumo_projetos) # botao calculo
self.tela_projetos.btn_situacao.clicked.connect(self.situacao_projetos) # botao situacao
self.tela_projetos.btn_cadastro.clicked.connect(self.cadastros_projetos) # botao cadastro
self.tela_projetos.btn_anterior.clicked.connect(self.anteriores_projetos) # botao anterior
self.tela_projetos.btn_entrega.clicked.connect(self.entrega_projetos) # botao entrega
self.tela_projetos.btn_voltar.clicked.connect(self.voltar_projetos) # botao voltar
### TELA RESUMO BOTOES
self.tela_projetos_resumo.btn_voltar.clicked.connect(self.voltar_projetos_resumo) # botao voltar
### TELA E-MAIL BOTOES
self.tela_email.btn_voltar.clicked.connect(self.voltar_email) # botao voltar
self.tela_email.btn_enviar.clicked.connect(self.enviar_email) # envia email
### TELA CALCULO BOTOES
self.tela_calculo.btn_calcular.clicked.connect(self.calcular_estatisticas) # botao calcular
self.tela_calculo.btn_voltar.clicked.connect(self.voltar_calculo) # botao voltar
### TELA ANALSIE BOTOES
self.tela_analise.btn_voltar.clicked.connect(self.voltar_analise) # botao voltar
### SETUP
# Mostrar layout
self.tela_login.show()
# Executar sistemax``
app.exec_()
### CRIAR BANCO DE DADOS
### CRIAR TABELA USER
### SISTEMA DE LOGIN
### LOGIN
def logar_login(self):
# Coleta dados dos input
login = self.tela_login.input_login.text()
senha = self.tela_login.input_senha.text()
print(f'Login: {login}\nSenha: {senha}')
# Conferencia do login e senha
if login == '' and senha == '':
print('Login Autorizado')
self.tela_login.close()
self.tela_geral.show()
else:
print('Erro no login')
def cadastrar_login(self):
print('Cadastrar')
### GERAL
def projetos_geral(self):
print('Projetos')
self.tabela = self.tela_projetos_resumo.tabelaResumo
self.base_de_dados = pd.read_csv('./projetos/csv/DB_PMCM.csv')
# Fecha Geral e abre Projetos
self.tela_geral.close()
self.tela_projetos.show()
def email_geral(self):
print('Email')
# Fecha Geral e abre Email
self.tela_geral.close()
self.tela_email.show()
def calculo_geral(self):
print('Calculo de Estatistica')
# Fecha Geral e abre Calculo
self.tela_geral.close()
self.tela_calculo.show()
def analise_geral(self):
print('Analise')
# Fecha Geral e abre Analise
self.tela_geral.close()
self.tela_analise.show()
def voltar_geral(self):
print('Deslogar')
# Fecha Geral e abre Login
self.tela_geral.close()
self.tela_login.show()
### PROJETOS
def resumo_projetos(self):
print('Resumo')
self.tela_projetos_resumo.tabelaResumo
# Fecha Projetos e abre Resumo
self.tela_projetos.close()
self.tela_projetos_resumo.show()
def situacao_projetos(self):
print('Situação')
def cadastros_projetos(self):
print('Cadastros')
def anteriores_projetos(self):
print('Anteriores')
def entrega_projetos(self):
print('Entrega')
def voltar_projetos(self):
print('Voltar')
# Fecha Projetos e abre Geral
self.tela_projetos.close()
self.tela_geral.show()
### PROJETO - RESUMO
def voltar_projetos_resumo(self):
print('Voltar')
# Fecha Resumo e abre Projeto
self.tela_projetos_resumo.close()
self.tela_projetos.show()
### EMAIL
def enviar_email(self):
self.tela_email = uic.loadUi('./pyqt5-templates/email.ui')
print('E-Mail')
# criar integração com o outlook
outlook = win32.Dispatch('outlook.application')
# criar um email
email = outlook.CreateItem(0)
# configurar as informações
email.To = self.tela_email.input_email.text()
email.Subject = 'Prefeitura Municipal de Campo Magro - Modelo Carimbo'
# variváveis
self.nome_email = self.tela_email.input_nome.text()
self.sobrenome_email = self.tela_email.input_snome.text()
# adicionando anexo
anexo = r'D:\Development\Python\app-pyqt5\app\emaildef\anexos\legenda-pmcm.dwg'
try:
email.Attachments.Add(anexo)
print('Anexado documento...')
except:
email.Attachments.Add(anexo)
print('Sem Anexo...')
css = '''
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
width: 100%;
overflow-x: hidden;
}
.email {
margin: 2%;
}
.topo {
padding: 0.3rem 0;
background-color: brown;
width: 100%;
text-align: center !important;
margin-bottom: 35px;
font-family: Arial, Helvetica, sans-serif;
}
.topo h2 {
font-size: 28px;
color: white;
padding-top: 25px;
}
img {
width: 80px;
height: 80px;
}
.capa {
text-align: center;
width: 100%;
padding: 0.5rem 0;
background-color: cadetblue;
}
.capa p {
font-size: 16px;
font-family: Arial, Helvetica, sans-serif;
color: white;
text-align: left !important;
padding: 0 30px;
}
.conteudo {
text-align: center;
width: 100%;
padding: 2rem;
background-color: gainsboro;
}
.conteudo p{
font-size: 20px;
font-family: Arial, Helvetica, sans-serif;
}
.conteudo p a {
text-decoration: none;
color: red;
}
.assinatura {
background-color: royalblue;
color: white;
font-family: Arial, Helvetica, sans-serif;
margin-top: 20px;
padding: 2%;
}
</style>
'''
assinatura = '''
<div class="assinatura">
<p>Atenciosamente,</p>
<br>
<br>
<h3>Jose Marinho</h3>
<br>
<p><a style="color: white; text-decoration: underline overline wavy green" href="https://wa.me/qr/LQM5O2QPPRDOH1">Whatsapp: (41) 9 9272-5388</a></p>
<p>Telefone: (41) 3677-4000 - Central Prefeitura</p>
<p>Telefone: (41) 3677-4050 - SEDUA</p>
<p style="color: white; text-decoration: none;">jose.marinho56@gmail.com</p>
<p>Prefeitura Municipal de Campo Magro / PR</p>
</div>
'''
topo = '''
<div class="topo">
<img src="https://leismunicipais.com.br/img/cidades/pr/campo-magro.png" alt="campo-magro">
<h2>Prefeitura Municipal de Campo Magro - SEDUA</h2>
</div> <!--topo-->
'''
conteudo = f'''
<div class="conteudo">
<p>Bom dia {self.nome_email} {self.sobrenome_email},</p>
<br>
<p>Entro em contato para atender a sua solicitação</p>
<br>
<p>Está anexado a esse mensagem, um arquivo em dwg, onde o mesmo contém a estrutura de carimbo padrão da prefeitura, logo, a tabela de estatística está junto.</p>
<br>
<p>Nome do arquivo: <b>legenda-pmcm.dwg</b></p>
<p>Tamanho do arquivo: <b>75,3 KB</b></p>
<p>Caso eu não tenha esclarecido totalmente a sua dúvida, estou à disposição</p>
</div>
'''
capa = '''
<div class="capa">
<p>Atendimento via E-mail - A/C: <b>José Marinho - Estagiário</b></p>
</div> <!--capa-->
'''
email.HTMLBody = f'''
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
{css}
</head>
<body>
<section class="email">
{topo}
{capa}
{conteudo}
{assinatura}
</section>
</body>
</html>
'''
# body
# finalizando email
try:
email.Send()
print('E-mail encaminhado com sucesso')
except Exception as error:
print(f'Algo deu errado\nErro: {error}')
def voltar_email(self):
print('Voltar')
# Fecha Email e abre Geral
self.tela_email.close()
self.tela_geral.show()
### CALCULO
def calcular_estatisticas(self):
print('Calcular')
# Setup inicial
# __init__
arquivo_excel = Workbook()
planilha1 = arquivo_excel.active
data_atual = date.today()
# Coleta dados dos input e arnazena em variáveis
area_do_terreno = float(self.tela_calculo.input_area_do_terreno.text())
area_anteriormente_construido = float(self.tela_calculo.input_area_anteriormente_construido.text())
area_computavel_subsolo = float(self.tela_calculo.input_area_computavel_a_construir_no_subsolo.text())
area_nao_computavel_subsolo = float(self.tela_calculo.input_area_nao_computavel_a_construir_no_subsolo.text())
area_computavel_terreo = float(self.tela_calculo.input_area_computavel_a_construir_terreo.text())
area_nao_computavel_terreo = float(self.tela_calculo.input_area_nao_computavel_a_construir_terreo.text())
area_computavel_superior = float(self.tela_calculo.input_area_computavel_a_construir_pavSup.text())
area_nao_computavel_superior = float(self.tela_calculo.input_area_nao_computavel_a_construir_pavSup.text())
area_nao_computavel_a_construir_atico = float(self.tela_calculo.input_area_nao_computavel_a_construir_atico.text())
area_livre = float(self.tela_calculo.input_area_livre.text())
numero_de_pavimento = int(self.tela_calculo.input_numero_de_pavimento.text())
altura_total = float(self.tela_calculo.input_altura_total.text())
area_de_cobertura = float(self.tela_calculo.input_area_de_cobertura.text())
# Gerando calculos
projecao_edificacao = area_anteriormente_construido + area_computavel_terreo + area_nao_computavel_terreo
taxa_ocupacao = (projecao_edificacao / area_do_terreno) * 100
taxa_permeabilidade = (area_livre / area_do_terreno) * 100
area_total_contruir_subsolo = area_computavel_subsolo + area_nao_computavel_subsolo
area_total_contruir_terreo = area_computavel_terreo + area_nao_computavel_terreo
area_total_contruir_superior = area_computavel_superior + area_nao_computavel_superior
area_total_contruir_computavel = area_computavel_subsolo + area_computavel_terreo + area_computavel_superior
area_total_contruir_nao_computavel = area_nao_computavel_subsolo + area_nao_computavel_terreo + area_nao_computavel_superior
coeficiente_aproveitamento = ((area_total_contruir_computavel + area_anteriormente_construido)/area_do_terreno)
area_total_construida_liberada = area_total_contruir_computavel + area_total_contruir_nao_computavel
dicionario_com_resultado = {
'Area do terreno': area_do_terreno,
'Area anteriormente construido' : area_anteriormente_construido,
'Area computavel a construir no subsolo': area_computavel_subsolo,
'Area nao computavel a construir no subsolo': area_nao_computavel_subsolo,
'Area computavel a construir no pavimento terreo': area_computavel_terreo,
'Area nao computavel a construir no no pavimento terreo': area_nao_computavel_terreo,
'Area computavel a construir no no pavimento superior': area_computavel_superior,
'Area nao computavel a construir no no pavimento superior': area_nao_computavel_superior,
'Area nao computavel a construir no atico': area_nao_computavel_a_construir_atico,
'Area livre': area_livre,
'Numero de pavimento': numero_de_pavimento,
'Altura total': altura_total,
'Projecao da edificacao': projecao_edificacao,
'Taxa de ocupacao': taxa_ocupacao,
'Taxa de permeabilidade': taxa_permeabilidade,
'Area total a contruir no subsolo': area_total_contruir_subsolo,
'Area total a contruir no pavimneto terreo': area_total_contruir_terreo,
'Area total a contruir no pavimneto superior': area_total_contruir_superior,
'Area total a contruir computavel': area_total_contruir_computavel,
'Area total a contruir nao computavel': area_total_contruir_nao_computavel,
'Coeficiente de aproveitamento': coeficiente_aproveitamento,
'Area de cobertura': area_de_cobertura,
'Area total a ser construida': area_total_construida_liberada
}
# Coletando dados input do protocolo e do interessado
numero_do_protocolo = str(self.tela_calculo.input_protocolo.text())
interessado_projeto = str(self.tela_calculo.input_interessado.text())
# Colunas
planilha1['A2'] = 'Item'
planilha1['B2'] = 'Descrição'
planilha1['C2'] = 'Dado'
planilha1['D2'] = 'Unidade'
planilha1['A1'] = 'Registro:'
planilha1['B1'] = data_atual
planilha1['C1'] = f'Protocolo {numero_do_protocolo}'
planilha1['D1'] = f'Interessado {interessado_projeto}'
# Passar por todos os dados do dicionarios e adicionar em linhas na planilha
for item, descricao in enumerate(dicionario_com_resultado):
linha = (item+1, descricao, dicionario_com_resultado[descricao])
planilha1.append(linha)
# Retornando dados
# Colocando unidades nas linhas
planilha1['D3'] = 'M²' # Item 1
planilha1['D4'] = 'M²' # Item 2
planilha1['D5'] = 'M²' # Item 3
planilha1['D6'] = 'M²' # Item 4
planilha1['D7'] = 'M²' # Item 5
planilha1['D8'] = 'M²' # Item 6
planilha1['D9'] = 'M²' # Item 7
planilha1['D10'] = 'M²' # Item 8
planilha1['D11'] = 'M²' # Item 9
planilha1['D12'] = 'M²' # Item 10
planilha1['D13'] = 'Pavimentos' # Item 11
planilha1['D14'] = 'M' # Item 12
planilha1['D15'] = 'M²' # Item13
planilha1['D16'] = '%' # Item 14
planilha1['D17'] = '%' # Item 15
planilha1['D18'] = 'M²' # item 16
planilha1['D19'] = 'M²' # item 17
planilha1['D20'] = 'M²' # item 18
planilha1['D21'] = 'M²' # item 19
planilha1['D22'] = 'M²' # item 20
planilha1['D23'] = '' # Item 21
planilha1['D24'] = 'M²' # Item 22
planilha1['D25'] = 'M²' # Item 23
try:
arquivo_excel.save(f"./calc/relatorios/Relatorio {numero_do_protocolo} - Estatístico.xlsx")
print('Relatorio gerado com sucesso')
except:
print("Erro ao salvar o Relatório.")
def voltar_calculo(self):
print('Voltar')
# Fecha Calculo e abre Geral
self.tela_calculo.close()
self.tela_geral.show()
### ANALISES
def voltar_analise(self):
print('Voltar')
# Fecha Analise e abre Geral
self.tela_analise.close()
self.tela_geral.show()
SistemaGeral()
### jose.marinho56@gmail.com
|
import tkinter as tk
class WelcomePage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
font13 = "-family {Tw Cen MT} -size 18"
font14 = "-family {Tw Cen MT} -size 22"
self.configure(background="#000")
self.Frame1 = tk.Frame(self)
self.Frame1.place(relx=0.034, rely=0.182, relheight=0.549
, relwidth=0.529)
self.Frame1.configure(relief="groove")
self.Frame1.configure(background="#000")
self.Label1 = tk.Label(self.Frame1)
self.Label1.place(relx=-0.064, rely=0.275, height=74, width=368)
self.Label1.configure(background="#000000")
self.Label1.configure(disabledforeground="#a3a3a3")
self.Label1.configure(font=font13)
self.Label1.configure(foreground="#fff")
self.Label1.configure(text='''2x2 Nash Equiliberia ''')
self.Label1_1 = tk.Label(self.Frame1)
self.Label1_1.place(relx=0.278, rely=0.441, height=44, width=233)
self.Label1_1.configure(activebackground="#f9f9f9")
self.Label1_1.configure(activeforeground="black")
self.Label1_1.configure(background="#000")
self.Label1_1.configure(disabledforeground="#a3a3a3")
self.Label1_1.configure(font=font14)
self.Label1_1.configure(foreground="#00ff00")
self.Label1_1.configure(highlightbackground="#d9d9d9")
self.Label1_1.configure(highlightcolor="black")
self.Label1_1.configure(text='''Solver''')
self.Frame2 = tk.Frame(self)
self.Frame2.place(relx=0.566, rely=0.242, relheight=0.514
, relwidth=0.292)
self.Frame2.configure(relief="groove")
self.Frame2.configure(background="#000")
self.btnPureNE = tk.Button(self.Frame2)
self.btnPureNE.place(relx=0.078, rely=0.059, height=53, width=206)
self.btnPureNE.configure(activebackground="#80ff00")
self.btnPureNE.configure(activeforeground="#000000")
self.btnPureNE.configure(background="#000")
self.btnPureNE.configure(borderwidth="3")
self.btnPureNE.configure(cursor="hand2")
self.btnPureNE.configure(disabledforeground="#a3a3a3")
self.btnPureNE.configure(font=font13)
self.btnPureNE.configure(foreground="#fff")
self.btnPureNE.configure(highlightbackground="#ffff00")
self.btnPureNE.configure(highlightcolor="black")
self.btnPureNE.configure(pady="0")
self.btnPureNE.configure(relief="ridge")
self.btnPureNE.configure(text='''Pure NE''')
self.btnPureNE.configure(command=lambda: controller.show_frame("PureNEPage"))
self.btnMixedNE = tk.Button(self.Frame2)
self.btnMixedNE.place(relx=0.078, rely=0.265, height=53, width=206)
self.btnMixedNE.configure(activebackground="#80ff00")
self.btnMixedNE.configure(activeforeground="#000000")
self.btnMixedNE.configure(background="#000")
self.btnMixedNE.configure(borderwidth="3")
self.btnMixedNE.configure(disabledforeground="#a3a3a3")
self.btnMixedNE.configure(font=font13)
self.btnMixedNE.configure(foreground="#fff")
self.btnMixedNE.configure(highlightbackground="#d9d9d9")
self.btnMixedNE.configure(highlightcolor="black")
self.btnMixedNE.configure(pady="0")
self.btnMixedNE.configure(relief="ridge")
self.btnMixedNE.configure(text='''Mixed NE''')
self.btnMixedNE.configure(command= lambda : controller.show_frame("MixedNEPage"))
self.btnMixedNE.configure(cursor="hand2")
self.btnAiONE = tk.Button(self.Frame2)
self.btnAiONE.place(relx=0.078, rely=0.471, height=53, width=206)
self.btnAiONE.configure(activebackground="#80ff00")
self.btnAiONE.configure(activeforeground="#000000")
self.btnAiONE.configure(background="#000")
self.btnAiONE.configure(borderwidth="3")
self.btnAiONE.configure(disabledforeground="#a3a3a3")
self.btnAiONE.configure(font=font13)
self.btnAiONE.configure(foreground="#fff")
self.btnAiONE.configure(highlightbackground="#d9d9d9")
self.btnAiONE.configure(highlightcolor="black")
self.btnAiONE.configure(pady="0")
self.btnAiONE.configure(relief="ridge")
self.btnAiONE.configure(text='''AiO''')
self.btnAiONE.configure(command= lambda : controller.show_frame("AiOPage"))
self.btnAiONE.configure(cursor="hand2")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''abcat.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Apr 2018
License: MIT - see the LICENSE file for the full text.
This contains functions to interface with astrobase checkplots, lcproc object
catalogs and generate useful files and database tables for use with the LC
server.
'''
#############
## LOGGING ##
#############
import logging
from lccserver import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import os.path
import pickle
import sqlite3
import json
import sys
import os
import importlib
import glob
from functools import reduce
from operator import getitem
from textwrap import indent, dedent
import gzip
import operator
from functools import partial
from itertools import count as icounter
from datetime import datetime
import numpy as np
from scipy.spatial import cKDTree
from tqdm import tqdm
from tornado.escape import squeeze
#########################################
## DEFAULT COLUMNS THAT ARE RECOGNIZED ##
#########################################
from .abcat_columns import COLUMN_INFO, COMPOSITE_COLUMN_INFO
#####################################################
## FUNCTIONS TO BREAK OUT INFO FROM LCPROC RESULTS ##
#####################################################
def kdtree_from_lclist(lclistpkl, outfile):
'''
This pulls out the kdtree and object IDs from an astrobase.lcproc.catalogs
created light curve catalog pickle.
Parameters
----------
lclistpkl : str
The catalog pickle file produced by the
``astrobase.lcproc.catalogs.make_lclist`` function.
outfile : str
The output pickle file to write.
Returns
-------
outfile : str
Returns the name of the generated output pickle file containing the
kdtree and the object list.
'''
with open(lclistpkl, 'rb') as infd:
lclist = pickle.load(infd)
if 'kdtree' in lclist and isinstance(lclist['kdtree'], cKDTree):
kdtree = lclist['kdtree']
objectids = lclist['objects']['objectid']
ra, decl = lclist['objects']['ra'], lclist['objects']['decl']
outdict = {'kdtree':kdtree,
'objectid':objectids,
'ra':ra,
'decl':decl,
'lclistpkl':os.path.abspath(lclistpkl)}
with open(outfile, 'wb') as outfd:
pickle.dump(outdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
LOGINFO('wrote kdtree from %s to %s' % (lclistpkl, outfile))
return outfile
else:
LOGERROR("no kdtree present in %s, can't continue" % lclistpkl)
return None
# this is used to map the operator spec in the COMPOSITE_COLUMN_INFO dict to an
# actual Python operator function
OPERATORS = {'+':operator.add,
'-':operator.sub,
'*':operator.mul,
'/':operator.truediv}
def objectinfo_to_sqlite(augcatpkl,
outfile,
lcc_name,
lcc_desc=None,
lcc_project=None,
lcc_datarelease=None,
lcc_citation=None,
lcc_owner=1,
lcc_visibility='public',
lcc_sharedwith=None,
colinfo=None,
indexcols=None,
ftsindexcols=None,
overwrite_existing=False):
'''This writes the object information for an LCC to an SQLite file.
NOTE: This function requires FTS5 to be available in SQLite because we don't
want to mess with text-search ranking algorithms to be implemented for FTS4.
lcc_name must be provided. It will be used as name of the DB in several
frontend LCC server controls. This is a string, e.g. 'HATNet DR0: Kepler
Field'.
The other lcc_* kwargs set some metadata for the project. this is used by
the top-level lcc-index.sqlite database for all LC collections. The frontend
will use these to render HTML descriptions, etc.
If colinfo is not None, it should be either a dict or JSON with elements
that are of the form::
'column_name':{'title':'column title',
'dtype':numpy dtype of the column,
'format':string format specifier for this column,
'description':'a long description of the column',
'index': True if this should be indexed, False otherwise,
'ftsindex': True if this should be FTS indexed},
where column_name should be each column in the augcatpkl file. Any column
that doesn't have a key in colinfo won't have any extra information
associated with it.
If colinfo is not provided, this function will use the column definitions
provided in abcat.COLUMN_INFO and abcat.COMPOSITE_COLUMN_INFO. These are
fairly extensive and should cover all of the data that the upstream
astrobase tools can generate for object information.
This function makes indexes for fast look up by objectid by default and any
columns included in indexcols. also makes a full-text search index for any
columns in ftsindexcols. If either of these are not provided, will look for
and make indices as specified in abcat_columns.COLUMN_INFO and
COMPOSITE_COLUMN_INFO.
If overwrite_existing is True, any existing catalog DB in the target
directory will be overwritten.
'''
if os.path.exists(outfile):
LOGWARNING('An existing objectinfo catalog DB exists at: %s' % outfile)
if overwrite_existing and os.path.exists(outfile):
LOGWARNING('overwrite_existing = True, removing old DB: %s' % outfile)
os.remove(outfile)
elif not overwrite_existing and os.path.exists(outfile):
LOGWARNING('not overwriting existing catalog DB and returning it')
return outfile
with open(augcatpkl, 'rb') as infd:
augcat = pickle.load(infd)
# pull the info columns out of the augcat
cols = list(augcat['objects'].keys())
# get the magnitude columns
magcols = augcat['magcols']
# separate the info cols into columns that are independent of magcol and
# those affiliated with each magcol
mag_affil_cols = []
for mc in magcols:
for col in cols:
if mc in col:
mag_affil_cols.append(col)
unaffiliated_cols = list(set(cols) - set(mag_affil_cols))
# get the dtypes for each column to generate the create statement
coldefs = []
colnames = []
LOGINFO('collecting column information from %s' % augcatpkl)
defaultcolinfo = {}
# go through the unaffiliated columns first
for col in unaffiliated_cols:
thiscol_name = col.replace('.','_')
thiscol_dtype = augcat['objects'][col].dtype
colnames.append(thiscol_name)
# set up the default info element
defaultcolinfo[thiscol_name] = {'title':None,
'description':None,
'dtype':None,
'format':None,
'index':False,
'ftsindex':False}
colinfo_key = col
#
# now go through the various formats
#
# strings
if thiscol_dtype.type is np.str_ or thiscol_dtype.type is np.unicode_:
coldefs.append(('%s text' % thiscol_name, str))
if colinfo_key in COLUMN_INFO:
defaultcolinfo[thiscol_name] = COLUMN_INFO[colinfo_key]
else:
defaultcolinfo[thiscol_name]['format'] = '%s'
# this gets the string representation of the numpy dtype object
defaultcolinfo[thiscol_name]['dtype'] = thiscol_dtype.str
# floats
elif thiscol_dtype.type is np.float64:
coldefs.append(('%s double precision' % thiscol_name, float))
if colinfo_key in COLUMN_INFO:
defaultcolinfo[thiscol_name] = COLUMN_INFO[colinfo_key]
else:
defaultcolinfo[thiscol_name]['format'] = '%.7f'
defaultcolinfo[thiscol_name]['dtype'] = thiscol_dtype.str
# integers
elif thiscol_dtype.type is np.int64:
coldefs.append(('%s integer' % thiscol_name, int))
if colinfo_key in COLUMN_INFO:
defaultcolinfo[thiscol_name] = COLUMN_INFO[colinfo_key]
else:
defaultcolinfo[thiscol_name]['format'] = '%i'
defaultcolinfo[thiscol_name]['dtype'] = thiscol_dtype.str
# everything else is coerced into a string
else:
coldefs.append(('%s text' % thiscol_name, str))
if colinfo_key in COLUMN_INFO:
defaultcolinfo[thiscol_name] = COLUMN_INFO[colinfo_key]
else:
defaultcolinfo[thiscol_name]['format'] = '%s'
defaultcolinfo[thiscol_name]['dtype'] = thiscol_dtype.str
# go through the composite unaffiliated columns next (these are generated
# from two key:val pairs in the objectinfo dict
composite_cols = COMPOSITE_COLUMN_INFO.keys()
# make sure not to get columns that are already in the augcat
composite_cols = list(set(composite_cols) - set(augcat['objects'].keys()))
for col in composite_cols:
#
# actually generate the column
#
col_opstr, col_operand1, col_operand2 = (
COMPOSITE_COLUMN_INFO[col]['from']
)
col_op = OPERATORS[col_opstr]
try:
# this should magically work because of numpy arrays (hopefully)
augcat['objects'][col] = col_op(augcat['objects'][col_operand1],
augcat['objects'][col_operand2])
augcat['columns'].append(col)
LOGINFO('generated composite column: %s '
'using operator: %r on cols: %s and %s' % (col,
col_op,
col_operand1,
col_operand2))
thiscol_name = col.replace('.','_')
thiscol_dtype = augcat['objects'][col].dtype
colnames.append(thiscol_name)
# set up the default info element
defaultcolinfo[thiscol_name] = {'title':None,
'description':None,
'dtype':None,
'format':None,
'index':False,
'ftsindex':False}
colinfo_key = col
#
# now go through the various formats
#
# strings
if thiscol_dtype.type is np.str_:
coldefs.append(('%s text' % thiscol_name, str))
if colinfo_key in COMPOSITE_COLUMN_INFO:
defaultcolinfo[thiscol_name] = COMPOSITE_COLUMN_INFO[
colinfo_key
]
else:
defaultcolinfo[thiscol_name]['format'] = '%s'
# this gets the string representation of the numpy dtype
defaultcolinfo[thiscol_name]['dtype'] = thiscol_dtype.str
# floats
elif thiscol_dtype.type is np.float64:
coldefs.append(('%s double precision' % thiscol_name, float))
if colinfo_key in COMPOSITE_COLUMN_INFO:
defaultcolinfo[thiscol_name] = COMPOSITE_COLUMN_INFO[
colinfo_key
]
else:
defaultcolinfo[thiscol_name]['format'] = '%.7f'
defaultcolinfo[thiscol_name]['dtype'] = thiscol_dtype.str
# integers
elif thiscol_dtype.type is np.int64:
coldefs.append(('%s integer' % thiscol_name, int))
if colinfo_key in COMPOSITE_COLUMN_INFO:
defaultcolinfo[thiscol_name] = COMPOSITE_COLUMN_INFO[
colinfo_key
]
else:
defaultcolinfo[thiscol_name]['format'] = '%i'
defaultcolinfo[thiscol_name]['dtype'] = thiscol_dtype.str
# everything else is coerced into a string
else:
coldefs.append(('%s text' % thiscol_name, str))
if colinfo_key in COMPOSITE_COLUMN_INFO:
defaultcolinfo[thiscol_name] = COMPOSITE_COLUMN_INFO[
colinfo_key
]
else:
defaultcolinfo[thiscol_name]['format'] = '%s'
defaultcolinfo[thiscol_name]['dtype'] = thiscol_dtype.str
# the operand columns aren't present in the augcat, skip
except Exception:
pass
# finally, go though the mag affiliated columns, per magcol
for mc in magcols:
for col in mag_affil_cols:
# see if this is a magcol affiliated column
if mc in col:
sub_mc = mc
else:
continue
thiscol_name = col.replace('.','_')
thiscol_dtype = augcat['objects'][col].dtype
colnames.append(thiscol_name)
# set up the default info element
defaultcolinfo[thiscol_name] = {'title':None,
'description':None,
'dtype':None,
'format':None,
'index':False,
'ftsindex':False}
# this gets the correct substitution for the magcol
if sub_mc is not None:
colinfo_key = '{magcol}.%s' % col.split('.')[-1]
else:
colinfo_key = col
#
# now go through the various formats
#
# strings
if thiscol_dtype.type is np.str_:
coldefs.append(('%s text' % thiscol_name, str))
if colinfo_key in COLUMN_INFO:
defaultcolinfo[thiscol_name] = (
COLUMN_INFO[colinfo_key].copy()
)
else:
defaultcolinfo[thiscol_name]['format'] = '%s'
defaultcolinfo[thiscol_name]['dtype'] = thiscol_dtype.str
if sub_mc is not None:
defaultcolinfo[thiscol_name]['title'] = (
defaultcolinfo[thiscol_name]['title'].format(
magcol=sub_mc
)
)
defaultcolinfo[thiscol_name]['description'] = (
defaultcolinfo[thiscol_name]['description'].format(
magcol=sub_mc
)
)
# floats
elif thiscol_dtype.type is np.float64:
coldefs.append(('%s double precision' % thiscol_name, float))
if colinfo_key in COLUMN_INFO:
defaultcolinfo[thiscol_name] = (
COLUMN_INFO[colinfo_key].copy()
)
else:
defaultcolinfo[thiscol_name]['format'] = '%.7f'
defaultcolinfo[thiscol_name]['dtype'] = thiscol_dtype.str
if sub_mc is not None:
defaultcolinfo[thiscol_name]['title'] = (
defaultcolinfo[thiscol_name]['title'].format(
magcol=sub_mc
)
)
defaultcolinfo[thiscol_name]['description'] = (
defaultcolinfo[thiscol_name]['description'].format(
magcol=sub_mc
)
)
# integers
elif thiscol_dtype.type is np.int64:
coldefs.append(('%s integer' % thiscol_name, int))
if colinfo_key in COLUMN_INFO:
defaultcolinfo[thiscol_name] = (
COLUMN_INFO[colinfo_key].copy()
)
else:
defaultcolinfo[thiscol_name]['format'] = '%i'
defaultcolinfo[thiscol_name]['dtype'] = thiscol_dtype.str
if sub_mc is not None:
defaultcolinfo[thiscol_name]['title'] = (
defaultcolinfo[thiscol_name]['title'].format(
magcol=sub_mc
)
)
defaultcolinfo[thiscol_name]['description'] = (
defaultcolinfo[thiscol_name]['description'].format(
magcol=sub_mc
)
)
# everything else is coerced into a string
else:
coldefs.append(('%s text' % thiscol_name, str))
if colinfo_key in COLUMN_INFO:
defaultcolinfo[thiscol_name] = (
COLUMN_INFO[colinfo_key].copy()
)
else:
defaultcolinfo[thiscol_name]['format'] = '%s'
defaultcolinfo[thiscol_name]['dtype'] = thiscol_dtype.str
if sub_mc is not None:
defaultcolinfo[thiscol_name]['title'] = (
defaultcolinfo[thiscol_name]['title'].format(
magcol=sub_mc
)
)
defaultcolinfo[thiscol_name]['description'] = (
defaultcolinfo[thiscol_name]['description'].format(
magcol=sub_mc
)
)
# now, we'll generate the create statement
# now these are all cols
all_available_cols = (unaffiliated_cols +
list(composite_cols) +
mag_affil_cols)
# test if these cols are all in the augcat
cols = []
for col in all_available_cols:
if col in augcat['objects']:
cols.append(col)
# generate the final column list for insertion
column_and_type_list = ', '.join([x[0] for x in coldefs])
# this is the final SQL used to create the database.
# extra columns we create:
# extra_info_json
# object_owner
# object_visibility
# object_sharedwith
sqlcreate = dedent(
'''create table object_catalog ({column_type_list},
extra_info_json text default
'{{"bibcode": null, "parent": null, "comments": null}}',
object_owner integer default 1,
object_visibility text default 'public',
object_sharedwith text,
primary key (objectid))'''
)
sqlcreate = sqlcreate.format(column_type_list=column_and_type_list)
# add in the owner, visibility, and sharedwith columns. set these values for
# each object to the same as the provided values for this collection in
# lcc_owner, lcc_visibility, and lcc_sharedwith
# this is the insert statement
column_list = ', '.join(colnames +
['object_owner',
'object_visibility',
'object_sharedwith'])
placeholders = ','.join(['?']*(len(cols) + 3))
sqlinsert = ("insert into object_catalog ({column_list}) "
"values ({placeholders})")
sqlinsert = sqlinsert.format(column_list=column_list,
placeholders=placeholders)
LOGINFO('objects in %s: %s, generating SQLite database...' %
(augcatpkl, augcat['nfiles']))
LOGINFO('CREATE TABLE statement will be: "%s"' % sqlcreate)
LOGINFO('INSERT statement will be: "%s"' % sqlinsert)
# connect to the database
db = sqlite3.connect(outfile)
cur = db.cursor()
colformatters = [x[1] for x in coldefs]
#
# start the transaction
#
cur.executescript('pragma journal_mode = wal; '
'pragma journal_size_limit = 52428800;')
cur.execute('begin')
#
# create the version info table that we can use for migrations later
#
cur.executescript(
"create table object_catalog_vinfo (dbver integer, "
"lccserver_vtag text, "
"vdate date); "
"insert into object_catalog_vinfo values (2, 'v0.2', '2018-08-31');"
)
#
# create the main object_catalog table
#
cur.execute(sqlcreate)
LOGINFO('object_catalog table created successfully, '
'now inserting objects...')
# now we'll insert things into the table
for rowind in tqdm(range(augcat['objects'][cols[0]].size)):
instance_markers = icounter(start=1)
insert_done = False
this_marker = 1
# keep trying to insert until we succeed. this is to overcome multiple
# objects with the same objectid (a common occurence when we have
# multiple planets for a single transiting system for example). in this
# case, we'll insert the objects with markers indicating how many
# repeats there are
while not insert_done:
try:
thisrow = [
y(augcat['objects'][x][rowind]) for
x,y in zip(cols, colformatters)
]
if this_marker > 1:
new_objectid = '%s-%s' % (
augcat['objects']['objectid'][rowind],
this_marker
)
thisrow[cols.index('objectid')] = new_objectid
# add in the per-object permissions using the LCC permissions as
# base
thisrow.extend([lcc_owner,
lcc_visibility,
lcc_sharedwith])
for ind, rowelem in enumerate(thisrow):
if (isinstance(rowelem, (float, int)) and
not np.isfinite(rowelem)):
thisrow[ind] = None
elif isinstance(rowelem, str) and len(rowelem) == 0:
thisrow[ind] = None
elif isinstance(rowelem, str) and rowelem.strip() == 'nan':
thisrow[ind] = None
cur.execute(sqlinsert, tuple(thisrow))
instance_markers = icounter(start=1)
insert_done = True
this_marker = 1
except sqlite3.IntegrityError:
this_marker = next(instance_markers)
if this_marker > 1:
LOGERROR(
"objectid: %s already exists in the DB, "
"will tag this objectid instance with marker '%s'"
% (augcat['objects']['objectid'][rowind],
this_marker)
)
# get the column information if there is any
if isinstance(colinfo, dict):
overridecolinfo = colinfo
elif isinstance(colinfo, str) and os.path.exists(colinfo):
with open(colinfo,'r') as infd:
overridecolinfo = json.load(infd)
elif isinstance(colinfo, str):
try:
overridecolinfo = json.loads(colinfo)
except Exception:
LOGERROR('could not understand colinfo argument, skipping...')
overridecolinfo = None
else:
overridecolinfo = None
if overridecolinfo:
for col in defaultcolinfo:
if col in overridecolinfo:
if overridecolinfo[col]['title'] is not None:
defaultcolinfo[col]['title'] = overridecolinfo[col]['title']
if overridecolinfo[col]['dtype'] is not None:
defaultcolinfo[col]['dtype'] = overridecolinfo[col]['dtype']
if overridecolinfo[col]['format'] is not None:
defaultcolinfo[col]['format'] = (
overridecolinfo[col]['format']
)
if overridecolinfo[col]['description'] is not None:
defaultcolinfo[col]['description'] = (
overridecolinfo[col]['description']
)
# now create any indexes we want
if indexcols:
LOGINFO('creating indexes on columns %s' % repr(indexcols))
for indexcol in indexcols:
# the user gives the name of the col in the augcat pickle, which we
# convert to the database column name
indexcolname = indexcol.replace('.','_')
sqlindex = ('create index %s_idx on object_catalog (%s)' %
(indexcolname, indexcolname))
cur.execute(sqlindex)
else:
indexcols = []
for icol in defaultcolinfo.keys():
if defaultcolinfo[icol]['index']:
sqlindex = ('create index %s_idx on object_catalog (%s)' %
(icol, icol))
cur.execute(sqlindex)
indexcols.append(icol)
# make an index on the object_owner and object_status columns
# this doesn't show up in the column or indexes lists
cur.execute('create index object_owner_idx '
'on object_catalog (object_owner)')
cur.execute('create index object_visibility_idx '
'on object_catalog (object_visibility)')
cur.execute('create index object_sharedwith_idx '
'on object_catalog (object_sharedwith)')
# create any full-text-search indices we want
if ftsindexcols:
LOGINFO('creating an FTS index on columns %s' % repr(ftsindexcols))
#
# generate the FTS table structure
#
# the extra_info_json will be a JSON column for storing stuff that
# doesn't really belong in the other columns like comments, object
# cluster membership, and object histories, etc.
ftscreate = ("create virtual table catalog_fts "
"using fts5({column_list}, extra_info_json, "
"content=object_catalog)")
fts_column_list = ', '.join(
[x.replace('.','_') for x in ftsindexcols]
)
ftscreate = ftscreate.format(column_list=fts_column_list)
# create the FTS index
cur.execute(ftscreate)
# create triggers to update the FTS indices automatically if the main
# object_catalog table gets updated
new_fts_column_list = ', '.join(
['new.%s' % x.replace('.','_') for x in ftsindexcols]
)
fts_triggers = (
"create trigger fts_beforeupdate before update on object_catalog "
"begin "
"delete from catalog_fts where rowid=old.rowid; "
"end; "
"create trigger fts_afterupdate after update on object_catalog "
"begin "
"insert into catalog_fts(rowid, {column_list}, extra_info_json) "
"values (new.rowid, {new_column_list}, new.extra_info_json); "
"end;"
).format(column_list=fts_column_list,
new_column_list=new_fts_column_list)
LOGINFO('FTS trigger create statement will be: %s' % fts_triggers)
cur.executescript(fts_triggers)
# execute the rebuild command to activate the indices
cur.execute("insert into catalog_fts(catalog_fts) values ('rebuild')")
else:
ftsindexcols = []
for icol in defaultcolinfo.keys():
if defaultcolinfo[icol]['ftsindex']:
ftsindexcols.append(icol)
LOGINFO('creating an FTS index on columns %s' % repr(ftsindexcols))
#
# generate the FTS table structure
#
# the extra_info_json will be a JSON column for storing stuff that
# doesn't really belong in the other columns like comments, object
# cluster membership, and object histories, etc.
ftscreate = ("create virtual table catalog_fts "
"using fts5({column_list}, extra_info_json, "
"content=object_catalog)")
fts_column_list = ', '.join(
[x.replace('.','_') for x in ftsindexcols]
)
ftscreate = ftscreate.format(column_list=fts_column_list)
# create the FTS index
cur.execute(ftscreate)
# create triggers to update the FTS indices automatically if the main
# object_catalog table gets updated
new_fts_column_list = ', '.join(
['new.%s' % x.replace('.','_') for x in ftsindexcols]
)
fts_triggers = (
"create trigger fts_beforeupdate before update on object_catalog "
"begin "
"delete from catalog_fts where rowid=old.rowid; "
"end; "
"create trigger fts_afterupdate after update on object_catalog "
"begin "
"insert into catalog_fts(rowid, {column_list}, extra_info_json) "
"values (new.rowid, {new_column_list}, new.extra_info_json); "
"end;"
).format(column_list=fts_column_list,
new_column_list=new_fts_column_list)
LOGINFO('FTS trigger create statement will be: %s' % fts_triggers)
cur.executescript(fts_triggers)
# execute the rebuild command to activate the indices
cur.execute("insert into catalog_fts(catalog_fts) values ('rebuild')")
# turn the column info into a JSON
columninfo_json = json.dumps(defaultcolinfo)
# add some metadata to allow reading the LCs correctly later
m_indexcols = indexcols if indexcols is not None else []
m_ftsindexcols = ftsindexcols if ftsindexcols is not None else []
metadata = {
'basedir':augcat['basedir'],
'lcformat':augcat['lcformat'],
'fileglob':augcat['fileglob'],
'nobjects':augcat['nfiles'],
'magcols':augcat['magcols'],
'catalogcols':sorted(colnames),
'indexcols':sorted([x.replace('.','_') for x in m_indexcols]),
'ftsindexcols':sorted([x.replace('.','_') for x in m_ftsindexcols]),
'lcc_name':lcc_name,
'lcc_desc':lcc_desc,
'lcc_project':lcc_project,
'lcc_datarelease':lcc_datarelease,
'lcc_citation':lcc_citation,
'lcc_owner':lcc_owner,
'lcc_visibility':lcc_visibility,
'lcc_sharedwith':lcc_sharedwith
}
metadata_json = json.dumps(metadata)
cur.execute(
'create table catalog_metadata (metadata_json text, column_info text)'
)
cur.execute('insert into catalog_metadata values (?, ?)',
(metadata_json, columninfo_json))
# commit and close the database
db.commit()
db.close()
return outfile
def objectinfo_to_postgres_table(lclistpkl,
table,
pghost=None,
pguser=None,
pgpass=None,
pgport=None):
'''
This writes the object information to a Postgres table.
'''
##############################################
## LIGHT CURVE FORMAT MODULES AND FUNCTIONS ##
##############################################
def check_extmodule(module, formatkey):
'''This just imports the module specified.
'''
try:
if os.path.exists(module):
sys.path.append(os.path.dirname(module))
importedok = importlib.import_module(
os.path.basename(module.replace('.py',''))
)
else:
importedok = importlib.import_module(module)
except Exception:
LOGEXCEPTION('could not import the module: %s for LC format: %s. '
'check the file path or fully qualified module name?'
% (module, formatkey))
importedok = False
return importedok
##################################################
## FUNCTIONS THAT DEAL WITH LIGHT CURVE FORMATS ##
##################################################
def dict_get(datadict, keylist):
'''
This gets the requested key by walking the datadict.
'''
return reduce(getitem, keylist, datadict)
def get_lcformat_description(descpath):
'''This reads the lcformat column description file and returns a dict.
The description file is a JSON located under the collection's
collection_id directory/lcformat-description.json.
For the values in the JSON keys lc_readermodule, lc_readerfunc_kwargs,
lc_normalizemodule, lc_normalizefunc_kwargs, you can use automatic
substitutions for your user home and the current LC collection directories
by using:
{{home_dir}} -> substitute this pattern for the user's home directory
{{collection_dir}} -> substitute this pattern for the current LC
collection's directory
See an example at:
https://github.com/waqasbhatti/lcc-server/docs/lcformat-desc-example.json
This example LC format description JSON is associated with the
lcc-server-setup.ipynb notebook at:
https://github.com/waqasbhatti/astrobase-notebooks
'''
# read the JSON
with open(descpath,'rb') as infd:
formatdesc = json.load(infd)
formatkey = formatdesc['lc_formatkey']
# 1. generate the metadata info dict
metadata_info = {}
for key in formatdesc['metadata_keys']:
desc, textform, caster = formatdesc['metadata_keys'][key]
deref_key = key.split('.')
thiskey_info = {'deref': deref_key,
'desc': desc,
'format': textform,
'caster': caster}
metadata_info[key] = thiskey_info
# 2. get the column info
column_info = {}
column_keys = []
# 2a. first, get the unaffiliated columns
for key in formatdesc['unaffiliated_cols']:
desc, textform, dtype = formatdesc['column_keys'][key]
column_info[key] = {'desc':desc,
'format':textform,
'dtype':dtype}
column_keys.append(key)
# 2b. next, get the per magnitude columns
apertures = formatdesc['mag_apertures']
aperturejoiner = formatdesc['aperture_joiner']
for key in formatdesc['per_aperture_cols']:
for ap in apertures:
fullkey = '%s%s%s' % (key, aperturejoiner, ap)
desc, textform, dtype = formatdesc['column_keys'][key]
if '%s' in desc:
desc = desc % ap
column_info[fullkey] = {'desc':desc,
'format':textform,
'dtype':dtype}
column_keys.append(fullkey)
# 3. load the reader module and get the reader and normalize functions
reader_module_name = formatdesc['lc_readermodule']
reader_func_name = formatdesc['lc_readerfunc']
reader_func_kwargs = formatdesc['lc_readerfunc_kwargs']
norm_module_name = formatdesc['lc_normalizemodule']
norm_func_name = formatdesc['lc_normalizefunc']
norm_func_kwargs = formatdesc['lc_normalizefunc_kwargs']
#
# do some convenient directory name substitutions in the reader module
# import paths and reader function kwargs
#
if '{{collection_dir}}' in reader_module_name:
reader_module_name = reader_module_name.replace(
'{{collection_dir}}',
os.path.abspath(os.path.dirname(descpath))
)
elif '{{home_dir}}' in reader_module_name:
reader_module_name = reader_module_name.replace(
'{{home_dir}}',
os.path.abspath(os.path.expanduser('~'))
)
if isinstance(reader_func_kwargs, dict):
for kwarg in reader_func_kwargs:
if (isinstance(reader_func_kwargs[kwarg], str) and
'{{collection_dir}}' in reader_func_kwargs[kwarg]):
reader_func_kwargs[kwarg] = reader_func_kwargs[kwarg].replace(
'{{collection_dir}}',
os.path.abspath(os.path.dirname(descpath))
)
elif isinstance(reader_func_kwargs, dict):
for kwarg in reader_func_kwargs:
if (isinstance(reader_func_kwargs[kwarg], str) and
'{{home_dir}}' in reader_func_kwargs[kwarg]):
reader_func_kwargs[kwarg] = reader_func_kwargs[kwarg].replace(
'{{home_dir}}',
os.path.abspath(os.path.expanduser('~'))
)
#
# do some convenient directory name substitutions in the norm module
# import paths and norm function kwargs
#
if (norm_module_name is not None and
'{{collection_dir}}' in norm_module_name):
norm_module_name = norm_module_name.replace(
'{{collection_dir}}',
os.path.abspath(os.path.dirname(descpath))
)
elif norm_module_name is not None and '{{home_dir}}' in norm_module_name:
norm_module_name = norm_module_name.replace(
'{{home_dir}}',
os.path.abspath(os.path.expanduser('~'))
)
if isinstance(norm_func_kwargs, dict):
for kwarg in norm_func_kwargs:
if (isinstance(norm_func_kwargs[kwarg], str) and
'{{collection_dir}}' in norm_func_kwargs[kwarg]):
norm_func_kwargs[kwarg] = norm_func_kwargs[kwarg].replace(
'{{collection_dir}}',
os.path.abspath(os.path.dirname(descpath))
)
elif isinstance(norm_func_kwargs, dict):
for kwarg in norm_func_kwargs:
if (isinstance(norm_func_kwargs[kwarg], str) and
'{{home_dir}}' in norm_func_kwargs[kwarg]):
norm_func_kwargs[kwarg] = norm_func_kwargs[kwarg].replace(
'{{home_dir}}',
os.path.abspath(os.path.expanduser('~'))
)
# see if we can import the reader module
readermodule = check_extmodule(reader_module_name, formatkey)
if norm_module_name:
normmodule = check_extmodule(norm_module_name, formatkey)
else:
normmodule = None
# then, get the function we need to read the lightcurve
readerfunc_in = getattr(readermodule, reader_func_name)
if norm_module_name and norm_func_name:
normfunc_in = getattr(normmodule, norm_func_name)
else:
normfunc_in = None
# add in any optional kwargs that need to be there for readerfunc
if isinstance(reader_func_kwargs, dict):
readerfunc = partial(readerfunc_in, **reader_func_kwargs)
else:
readerfunc = readerfunc_in
# add in any optional kwargs that need to be there for normfunc
if normfunc_in is not None:
if isinstance(norm_func_kwargs, dict):
normfunc = partial(normfunc_in, **norm_func_kwargs)
else:
normfunc = normfunc_in
else:
normfunc = None
# get whether the measurements are in mags or fluxes
flux_or_mag = formatdesc['lc_measurements_flux_or_mag']
if flux_or_mag == 'flux':
magsarefluxes = True
elif flux_or_mag == 'mag':
magsarefluxes = False
# this is the final metadata dict
returndict = {
'formatkey':formatkey,
'fileglob':formatdesc['lc_fileglob'],
'readermodule':readermodule,
'normmodule':normmodule,
'readerfunc':readerfunc,
'normfunc':normfunc,
'columns':column_info,
'colkeys':column_keys,
'metadata':metadata_info,
'magsarefluxes':magsarefluxes,
'parsed_formatinfo':{
'formatkey':formatkey,
'fileglob':formatdesc['lc_fileglob'],
'readermodule':reader_module_name,
'readerfunc':reader_func_name,
'readerfunc_kwargs':reader_func_kwargs,
'normmodule':norm_module_name,
'normfunc':norm_func_name,
'normfunc_kwargs':norm_func_kwargs,
'magsarefluxes':magsarefluxes,
}
}
return returndict
def convert_to_csvlc(lcfile,
objectid,
lcformat_dict,
normalize_lc=False,
csvlc_version=1,
comment_char='#',
column_separator=',',
skip_converted=False):
'''This converts any readable LC to a common-format CSV LC.
The first 3 lines of the file are always:
LCC-CSVLC-<csvlc_version>
<comment_char>
<column_separator>
The next lines are offset with comment_char and are JSON formatted
descriptions of: (i) the object metadata, (ii) the column info. Finally, we
have the columns separated with column_separator.
so reader functions can recognize it automatically (like
astrobase.hatsurveys.hatlc.py).
This will normalize the light curve as specified in the
lcformat-description.json file if normalize_lc is True. If this is False,
will leave the light curve alone.
'''
# use the lcformat_dict to get the reader and normalization functions
readerfunc = lcformat_dict['readerfunc']
if normalize_lc:
normfunc = lcformat_dict['normfunc']
else:
normfunc = None
# if the object is not None, we can return early without trying to read the
# original format LC if the output file exists already and skip_converted =
# True
if objectid is not None:
# the filename
outfile = '%s-csvlc.gz' % squeeze(objectid).replace(' ','-')
# we'll put the CSV LC in the same place as the original LC
outpath = os.path.join(os.path.dirname(lcfile), outfile)
# if we're supposed to skip an existing file, do so here
if skip_converted and os.path.exists(outpath):
LOGWARNING(
'%s exists already and skip_converted = True, skipping...' %
outpath
)
return outpath
# now read in the light curve
lcdict = readerfunc(lcfile)
if isinstance(lcdict, (tuple, list)) and isinstance(lcdict[0], dict):
lcdict = lcdict[0]
# at this point, we can get the objectid from the lcdict directly if it's
# None, generate the output filepath, check if it exists, and return early
# if skip_converted = True
if objectid is None:
objectid = lcdict['objectid']
# the filename
outfile = '%s-csvlc.gz' % squeeze(objectid).replace(' ','-')
# we'll put the CSV LC in the same place as the original LC
outpath = os.path.join(os.path.dirname(lcfile), outfile)
# if we're supposed to skip an existing file, do so here
if skip_converted and os.path.exists(outpath):
LOGWARNING(
'%s exists already and skip_converted = True, skipping...' %
outpath
)
return outpath
# normalize the lcdict if we have to
if normfunc:
lcdict = normfunc(lcdict)
# extract the metadata keys
meta = {}
for key in lcformat_dict['metadata']:
try:
thismetainfo = lcformat_dict['metadata'][key]
val = dict_get(lcdict, thismetainfo['deref'])
meta[thismetainfo['deref'][-1]] = {
'val':val,
'desc':thismetainfo['desc'],
}
except Exception:
pass
# extract the column info
columns = {}
# generate the format string for each line
line_formstr = []
available_keys = []
ki = 0
for key in lcformat_dict['colkeys']:
try:
dict_get(lcdict, key.split('.'))
thiscolinfo = lcformat_dict['columns'][key]
line_formstr.append(thiscolinfo['format'])
columns[key] = {
'colnum': ki,
'dtype':thiscolinfo['dtype'],
'desc':thiscolinfo['desc']
}
available_keys.append(key)
ki = ki + 1
except Exception:
pass
# generate the header bits
metajson = indent(json.dumps(meta, indent=2), '%s ' % comment_char)
coljson = indent(json.dumps(columns, indent=2), '%s ' % comment_char)
# now, put together everything
with gzip.open(outpath, 'wb') as outfd:
# first, write the format spec
outfd.write(('LCC-CSVLC-V%s\n' % csvlc_version).encode())
outfd.write(('%s\n' % comment_char).encode())
outfd.write(('%s\n' % column_separator).encode())
# second, write the metadata JSON
outfd.write(('%s OBJECT METADATA\n' % comment_char).encode())
outfd.write(('%s\n' % metajson).encode())
outfd.write(('%s\n' % (comment_char,)).encode())
# third, write the column JSON
outfd.write(('%s COLUMN DEFINITIONS\n' % comment_char).encode())
outfd.write(('%s\n' % coljson).encode())
# finally, prepare to write the LC columns
outfd.write(('%s\n' % (comment_char,)).encode())
outfd.write(('%s LIGHTCURVE\n' % comment_char).encode())
# last, write the columns themselves
nlines = len(lcdict[lcformat_dict['colkeys'][0]])
for lineind in range(nlines):
thisline = []
for x in available_keys:
# we need to check if any col vals conflict with the specified
# formatter. in this case, we'll turn the formatter into %s so
# we don't fail here. this usually comes up if nan is provided
# as a value to %i
try:
thisline.append(
lcformat_dict['columns'][x]['format'] %
dict_get(lcdict, x.split('.'))[lineind]
)
except Exception:
thisline.append(
str(dict_get(lcdict, x.split('.'))[lineind])
)
formline = '%s\n' % ('%s' % column_separator).join(thisline)
outfd.write(formline.encode())
return outpath
##############################################
## COLLECTING METADATA ABOUT LC COLLECTIONS ##
##############################################
SQLITE_LCC_CREATE = '''\
create table lcc_index_vinfo (dbver integer,
lccserver_vtag text,
vdate date);
insert into lcc_index_vinfo values (2, 'v0.2', '2018-08-31');
pragma journal_mode = wal;
pragma journal_size_limit = 52428800;
-- make the main table
create table lcc_index (
collection_id text not null,
lcformat_key text not null,
lcformat_desc_path text not null,
lcformat_magcols text not null,
object_catalog_path text not null,
kdtree_pkl_path text not null,
lightcurves_dir_path text not null,
periodfinding_dir_path text,
checkplots_dir_path text,
ra_min real not null,
ra_max real not null,
decl_min real not null,
decl_max real not null,
nobjects integer not null,
catalog_columninfo_json text not null,
columnlist text,
indexedcols text,
ftsindexedcols text,
name text,
description text,
project text,
citation text,
datarelease integer default 0,
last_updated datetime,
last_indexed datetime,
collection_owner integer default 1,
collection_visibility integer default 2,
collection_sharedwith text,
primary key (collection_id, name, project, datarelease)
);
-- fts indexes below
create virtual table lcc_index_fts using fts5(
collection_id,
columnlist,
name,
description,
project,
citation,
content=lcc_index
);
-- triggers for updating FTS index when things get changed
create trigger fts_before_update before update on lcc_index begin
delete from lcc_index_fts where rowid=old.rowid;
end;
create trigger fts_before_delete before delete on lcc_index begin
delete from lcc_index_fts where rowid=old.rowid;
end;
create trigger fts_after_update after update on lcc_index begin
insert into lcc_index_fts(rowid, collection_id, columnlist, name,
description, project, citation)
values (new.rowid, new.collection_id, new.columnlist, new.name,
new.description, new.project, new.citation);
end;
create trigger fts_after_insert after insert on lcc_index begin
insert into lcc_index_fts(rowid, collection_id, columnlist, name,
description, project, citation)
values (new.rowid, new.collection_id, new.columnlist, new.name,
new.description, new.project, new.citation);
end;
-- activate the fts indexes
insert into lcc_index_fts(lcc_index_fts) values ('rebuild');
'''
SQLITE_LCC_INSERT = '''\
insert or replace into lcc_index (
collection_id,
lcformat_key, lcformat_desc_path, lcformat_magcols,
object_catalog_path, kdtree_pkl_path, lightcurves_dir_path,
periodfinding_dir_path, checkplots_dir_path,
ra_min, ra_max, decl_min, decl_max,
nobjects,
catalog_columninfo_json,
columnlist, indexedcols, ftsindexedcols,
name, description, project, citation, datarelease,
last_updated, last_indexed,
collection_owner, collection_visibility, collection_sharedwith
) values (
?,
?,?,?,
?,?,?,
?,?,
?,?,?,?,
?,
?,
?,?,?,
?,?,?,?,?,
?,datetime('now'),
?,?,?
)
'''
def sqlite_make_lcc_index_db(lcc_basedir):
'''
This just makes the lcc-index.sqlite file in the DB.
'''
# find the root DB
lccdb = os.path.join(lcc_basedir, 'lcc-index.sqlite')
database = sqlite3.connect(
lccdb,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
)
cursor = database.cursor()
cursor.executescript(SQLITE_LCC_CREATE)
database.commit()
return lccdb
def sqlite_collect_lcc_info(
lcc_basedir,
collection_id,
raiseonfail=False,
):
'''This writes or updates the lcc-index.sqlite file in lcc_basedir.
each LC collection is identified by its subdirectory name. The following
files must be present in each LC collection subdirectory:
- lclist-catalog.pkl
- catalog-kdtree.pkl
- catalog-objectinfo.sqlite
- this must contain lcc_* metadata for the collection, so we can give it
a name, description, project name, last time of update, datarelease
number
- lcformat-description.json
- this contains the basic information for the LC format recognition
Each LC collection must have the following subdirectories:
input:
- lightcurves/ -> the LCs in whatever format
- periodfinding/ -> the periodfinding result pickles
- checkplots/ -> the checkplot pickles
At the top level of the basedir we have:
- datasets/ -> the datasets generated from searches
- products/ -> the lightcurves.zip and dataset.zip for each dataset
- lcc-index.sqlite -> contains for each LC collection:
- collection-id (dirname), description, project name,
date of last update, number of objects, footprint in
RA/DEC, footprint in gl/gb, datareleae number, and
an ispublic flag
- basedir paths for each LC set to get to its catalog
sqlite, kdtree, and datasets
- columns, indexcols, ftscols for each dataset
- sets of columns, indexcols and ftscols for all LC
sets
'''
# find the root DB
lccdb = os.path.join(lcc_basedir, 'lcc-index.sqlite')
# if it exists already, open it
if os.path.exists(lccdb):
database = sqlite3.connect(
lccdb,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
)
cursor = database.cursor()
# if it doesn't exist, then make it
else:
database = sqlite3.connect(
lccdb,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
)
cursor = database.cursor()
cursor.executescript(SQLITE_LCC_CREATE)
database.commit()
#
# now we're ready to operate on the given lcc-collection basedir
#
# 1. get the various paths
object_catalog_path = os.path.abspath(os.path.join(lcc_basedir,
collection_id,
'lclist-catalog.pkl'))
catalog_kdtree_path = os.path.abspath(os.path.join(lcc_basedir,
collection_id,
'catalog-kdtree.pkl'))
catalog_objectinfo_path = os.path.abspath(
os.path.join(lcc_basedir,
collection_id,
'catalog-objectinfo.sqlite')
)
lightcurves_dir_path = os.path.abspath(
os.path.join(lcc_basedir,
collection_id,
'lightcurves')
)
periodfinding_dir_path = os.path.abspath(
os.path.join(lcc_basedir,
collection_id,
'periodfinding')
)
checkplots_dir_path = os.path.abspath(
os.path.join(lcc_basedir,
collection_id,
'checkplots')
)
lcformat_desc_path = os.path.abspath(
os.path.join(lcc_basedir,
collection_id,
'lcformat-description.json')
)
# check that all of these exist
if not os.path.exists(object_catalog_path):
LOGERROR('could not find an object catalog pkl: %s '
'for collection: %s, cannot continue' %
(object_catalog_path, collection_id))
return None
if not os.path.exists(catalog_kdtree_path):
LOGERROR('could not find a catalog kdtree pkl: %s '
'for collection: %s, cannot continue' %
(catalog_kdtree_path, collection_id))
return None
if not os.path.exists(catalog_objectinfo_path):
LOGERROR('could not find a catalog objectinfo sqlite DB: %s '
'for collection: %s, cannot continue' %
(catalog_objectinfo_path, collection_id))
return None
if not os.path.exists(lightcurves_dir_path):
LOGERROR('could not find the expected light curves directory: %s '
'for collection: %s, cannot continue' %
(lightcurves_dir_path, collection_id))
return None
if not os.path.exists(periodfinding_dir_path):
LOGERROR('could not find the expected '
'period-finding results directory: %s '
'for collection: %s, cannot continue' %
(periodfinding_dir_path, collection_id))
return None
if not os.path.exists(checkplots_dir_path):
LOGERROR('could not find the expected checkplot pickles directory: %s '
'for collection: %s, cannot continue' %
(checkplots_dir_path, collection_id))
return None
if not os.path.exists(lcformat_desc_path):
LOGERROR('no lcformat-description.json file '
'found in collection directory: %s, cannot continue'
'for collection: %s, making a new one' %
(lcformat_desc_path,))
# 2. check if we can successfully import the lcformat reader func
try:
# read the lcformat-description.json file to get the reader function
lcformat_dict = get_lcformat_description(lcformat_desc_path)
readerfunc = lcformat_dict['readerfunc']
normmodule = lcformat_dict['normmodule']
normfunc = lcformat_dict['normfunc']
lcformat_fileglob = lcformat_dict['fileglob']
lcformat_key = lcformat_dict['formatkey']
# use the lcformat_fileglob to find light curves in the LC dir
lcformat_lcfiles = glob.glob(os.path.join(lightcurves_dir_path,
lcformat_fileglob))
if len(lcformat_lcfiles) == 0:
LOGERROR('no light curves for lcformat key: %s, '
'matching provided fileglob: %s '
'found in expected light curves directory: %s, '
'cannot continue' % (lcformat_key, lcformat_fileglob,
lightcurves_dir_path))
return None
# finally, read in a light curve to see if it works as expected
lcdict = readerfunc(lcformat_lcfiles[0])
if isinstance(lcdict, (tuple, list)) and len(lcdict) == 2:
lcdict = lcdict[0]
LOGINFO('imported provided LC reader module and function, '
'and test-read a %s light curve successfully from %s...' %
(lcformat_key, lightcurves_dir_path))
# now test the normalization function
if normmodule and normfunc:
normfunc(lcdict)
LOGINFO('normalization function tested and works OK')
except Exception:
LOGEXCEPTION('could not import provided LC reader module/function or '
'could not read in a light curve from the expected '
'LC directory, cannot continue')
if raiseonfail:
raise
else:
return None
# 3. open the catalog sqlite and then:
# - get the minra, maxra, mindecl, maxdecl,
# - get the nobjects
# - get the magcols from the metadata
# - get the column, index, and ftsindex information,
# - get the name, desc, project, citation, ispublic, datarelease,
# last_updated
# now, calculate the required object info from this collection's
# objectinfo-catalog.sqlite file
try:
objectinfo = sqlite3.connect(
catalog_objectinfo_path,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
)
ocur = objectinfo.cursor()
query = ("select count(*), min(ra), max(ra), min(decl), max(decl) from "
"object_catalog")
ocur.execute(query)
row = ocur.fetchone()
# 1. this is the nobjects and footprint (or a poor approximation of one)
# FIXME: maybe use a convex hull or alpha shape here
nobjects, minra, maxra, mindecl, maxdecl = row
# 2. get the column info out
query = ("select metadata_json, column_info from catalog_metadata")
ocur.execute(query)
metadata, column_info = ocur.fetchone()
# from the metadata, we collect the collection's name, description,
# project, citation, ispublic, datarelease, columnlist, indexedcols,
# ftsindexedcols
metadata = json.loads(metadata)
# this is the time when the catalog-objectinfo.sqlite was last created
# (stat_result.st_ctime). we assume this is the same time at which the
# collection was updated to add new items or update information
last_updated = datetime.fromtimestamp(
os.stat(catalog_objectinfo_path).st_ctime
)
# close the objectinfo-catalog.sqlite file
ocur.close()
objectinfo.close()
# 3. put these things into the lcc-index database
# check if the lcc_ispublic
# if so, set collection_owner to user_id = 1 (the admin user)
# and set the collection_visibility to 2 (public)
# collections can only be owned by the admin user
# prepare the query items
items = (
collection_id,
lcformat_key,
lcformat_desc_path,
','.join(metadata['magcols']),
catalog_objectinfo_path,
catalog_kdtree_path,
lightcurves_dir_path,
periodfinding_dir_path,
checkplots_dir_path,
minra, maxra, mindecl, maxdecl,
nobjects,
column_info,
','.join(metadata['catalogcols']),
','.join(metadata['indexcols']),
','.join(metadata['ftsindexcols']),
metadata['lcc_name'],
metadata['lcc_desc'],
metadata['lcc_project'],
metadata['lcc_citation'],
metadata['lcc_datarelease'],
last_updated,
metadata['lcc_owner'],
metadata['lcc_visibility'],
metadata['lcc_sharedwith']
)
# 4. execute the queries to put all of this stuff into the lcc_index
# table and commit
cursor.execute(SQLITE_LCC_INSERT, items)
database.commit()
# all done!
LOGINFO('added light curve collection: '
'%s with %s objects, LCs at: %s to the light curve '
'collection index database: %s' %
(lcformat_key, nobjects, lightcurves_dir_path, lccdb))
# return the path of the lcc-index.sqlite database
return lccdb
except Exception:
LOGEXCEPTION('could not get collection data from the object '
'catalog SQLite database: %s, cannot continue' %
catalog_objectinfo_path)
database.close()
if raiseonfail:
raise
else:
return None
|
# --------------
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df = pd.read_csv(path)
print(df.head())
X = df.drop(['insuranceclaim'],1)
y = df['insuranceclaim']
X_train, X_test, y_train , y_test = train_test_split(X,y,test_size=0.2,random_state = 6)
# Code ends here
# --------------
import matplotlib.pyplot as plt
# Code starts here
q_value = X_train['bmi'].quantile(.95)
plt.boxplot(X_train['bmi'])
print(y_train.value_counts())
# Code ends here
# --------------
# Code starts here
relation = X_train.corr()
print(relation)
sns.pairplot(X_train)
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Code starts here
cols = ['children','sex','region','smoker']
fig , axes = plt.subplots(nrows=2,ncols=2)
for i in range(2):
for j in range(2):
col = cols[i*2+j]
sns.countplot(x=X_train[col],hue=y_train,ax=axes[i,j])
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
# Code starts here
lr = LogisticRegression()
grid = GridSearchCV(lr,param_grid = parameters)
grid.fit(X_train,y_train)
y_pred = grid.predict(X_test)
accuracy = accuracy_score(y_test,y_pred)
print(accuracy)
# Code ends here
# --------------
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# Code starts here
score = roc_auc_score(y_test,y_pred)
y_pred_proba = grid.predict_proba(X_test)[:,1]
# print(y_pred_proba)
print(y_test.head())
# print(y_pred.head()
fpr, tpr,threshold = metrics.roc_curve(y_test,y_pred_proba,pos_label=2)
roc_auc = roc_auc_score(y_test,y_pred_proba)
plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc))
# Code ends here
|
from enum import Enum
class Months(Enum):
janeiro = "Janeiro"
fevereiro = "Fevereiro"
marco = "Março"
abril = "Abril"
maio = "Maio"
junho = "Junho"
julho = "Julho"
agosto = "Agosto"
setembro = "Setembro"
outubro = "Outubro"
novembro = "Novembro"
dezembro = "Dezembro"
|
__all__ = ["LightningModelAdapter"]
import pytorch_lightning as pl
from icevision.imports import *
from icevision.metrics import *
class LightningModelAdapter(pl.LightningModule, ABC):
def __init__(self, metrics: List[Metric] = None):
super().__init__()
self.metrics = metrics or []
def accumulate_metrics(self, records, preds):
for metric in self.metrics:
metric.accumulate(records=records, preds=preds)
def finalize_metrics(self) -> dict:
all_logs = {}
for metric in self.metrics:
metric_logs = metric.finalize()
metric_logs = {f"{metric.name}/{k}": v for k, v in metric_logs.items()}
all_logs.update(metric_logs)
return all_logs
|
import os
import socket
import struct
import sys
import threading
import time
class Driver(object):
def __init__(self, topo):
"""
constructor
"""
self.topo = topo
self.host = '192.168.10.100' # driver ip address
self.port = 4747 # port number must match the one in router.py
self.hosts = self.populate_hosts() # populate hosts
self.clk = 1 # number of sync clocks
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # open socket
self.s.bind((self.host, self.port)) # bind to socket
def populate_hosts(self):
"""
populates list of hosts
"""
temp = []
with open(self.topo) as f:
for line in f.readlines():
segments = line.split()
if segments[0] not in temp:
temp.append(segments[0])
if segments[1] not in temp:
temp.append(segments[1])
return temp
def show_hosts(self):
"""
shows a list of hosts
"""
for host in self.hosts: # iterate over hosts
print(host) # print host
def show_help(self):
"""
shows help menu
"""
print('list of commands')
print('----------------')
print('help - shows a list of available commands')
print('hosts - lists all hosts')
print('cost <ip1> <ip2> <cost> - updates link cost between ip1 and ip2')
print('down <ip1> <ip2> - deactivates link between ip1 and ip2')
print('up <ip1> <ip2> - reactivates link between ip1 and ip2')
print('send <ip1> <ip2> <message-length> <message> - instruct ip1 to send message to ip2')
print('show <ip> - instruct ip to show its routing table')
print('clear - clears the screen')
print('exit - terminates the driver')
def update_cost(self, command):
"""
send cost to relevant routers
Note: sending to relevant routers only
"""
# 1 -> h1, 2 -> h2, 3 -> cost
segments = command.split() # split command on spaces
if len(segments) != 4 or segments[1] not in self.hosts or segments[2] not in self.hosts: # sanity check
print('invalid arguments')
return
else:
h1 = map(int, segments[1].split('.'))
h2 = map(int, segments[2].split('.'))
for host in [segments[1], segments[2]]: # iterate over hosts
print('sending cost update to {0}'.format(host)) # print a message before sending to each host
buf = struct.pack('4s4B4Bh', segments[0], h1[0], h1[1], h1[2], h1[3], h2[0], h2[1], h2[2], h2[3], int(segments[3]))
self.s.sendto(buf, (host, self.port)) # send to each host
def send(self, command):
"""
sends message from source to destination
"""
# 1 -> src, 2 -> dest, 3:end -> message
segments = command.split() # split command on spaces
if len(segments) < 5 or segments[1] not in self.hosts or segments[2] not in self.hosts: # sanity check
print('invalid arguments')
return
else:
h1 = map(int, segments[1].split('.'))
h2 = map(int, segments[2].split('.'))
fmt = '4s4B4Bh' + segments[3] + 's'
buf = struct.pack(fmt, segments[0], h1[0], h1[1], h1[2], h1[3], h2[0], h2[1], h2[2], h2[3], int(segments[3]), ' '.join(segments[4:]))
print('sending to {0}'.format(segments[1])) # print a message before sending to source
self.s.sendto(buf, (segments[1], self.port)) # send to source
#self.s.sendto(command, (segments[1], self.port)) # send to source
def show_rt(self, command):
"""
instructs a router to show its routing table
"""
# 1 -> router
segments = command.split()
if len(segments) != 2 or segments[1] not in self.hosts:
print('invalid arguments')
return
else:
router = map(int,segments[1].split('.'))
fmt = '4s4B';
buf = struct.pack(fmt, segments[0], router[0], router[1], router[2], router[3])
print('sending to {0}'.format(segments[1]))
self.s.sendto(buf, (segments[1], self.port))
def link_down(self, command):
"""
deactivates link between two routers
Note: should be done both ways
"""
# 1 -> h1, 2 -> h2
segments = command.split()
if len(segments) != 3 or segments[1] not in self.hosts or segments[2] not in self.hosts: # sanity check
print('invalid arguments')
return
else:
os.system('sudo iptables -I OUTPUT -s {0} -d {1} -j DROP'.format(segments[1], segments[2])) # drop from h1 to h2
os.system('sudo iptables -I OUTPUT -s {0} -d {1} -j DROP'.format(segments[2], segments[1])) # drop from h2 to h1
def link_up(self, command):
"""
reactivates link between two routers
Note: should be done both ways
"""
# 1 -> h1, 2 -> h2
segments = command.split()
if len(segments) != 3 or segments[1] not in self.hosts or segments[2] not in self.hosts: # sanity check
print('invalid arguments')
return
else:
os.system('sudo iptables -I OUTPUT -s {0} -d {1} -j ACCEPT'.format(segments[1], segments[2])) # accept from h1 to h2
os.system('sudo iptables -I OUTPUT -s {0} -d {1} -j ACCEPT'.format(segments[2], segments[1])) # accept from h2 to h1
def send_clock(self):
"""
sends clock to routers to exchange routing table
"""
for host in self.hosts: # iterate over hosts
self.s.sendto('clk {0}'.format(self.clk), (host, self.port)) # send to each host
self.clk += 1 # increment clock
t = threading.Timer(5, self.send_clock) # get a reference to timer
t.daemon = True # mark it daemonic
t.start() # start timer
def run(self):
"""
runs in an infinite loop
"""
self.send_clock()
print('type help to see a list of commands')
while True:
self.command = raw_input('> ')
if self.command == 'help':
self.show_help()
elif self.command == 'hosts':
self.show_hosts()
elif self.command.startswith('cost'):
self.update_cost(self.command)
elif self.command.startswith('down'):
self.link_down(self.command)
elif self.command.startswith('up'):
self.link_up(self.command)
elif self.command.startswith('send'):
self.send(self.command)
elif self.command.startswith('show'):
self.show_rt(self.command)
elif self.command == 'clear':
os.system('clear')
elif self.command == 'exit':
break
def __del__(self):
"""
destructor
"""
self.s.close()
if __name__ == '__main__':
if len(sys.argv) < 2:
print('python driver.py <topo>')
sys.exit(0)
driver = Driver(sys.argv[1])
driver.run()
|
print(42 if False == False else 777)
|
import numpy as np
from cereal import log, messaging
from common.filter_simple import FirstOrderFilter
from common.numpy_fast import interp, clip, mean
from common.realtime import DT_MDL
from selfdrive.hardware import EON, TICI
from selfdrive.swaglog import cloudlog
from common.params import Params
from decimal import Decimal
ENABLE_ZORROBYTE = False # Neokii with Zorrobyte
ENABLE_INC_LANE_PROB = False # Neokii
TRAJECTORY_SIZE = 33
# camera offset is meters from center car to camera
# model path is in the frame of the camera. Empirically
# the model knows the difference between TICI and EON
# so a path offset is not needed
PATH_OFFSET = -(float(Decimal(Params().get("PathOffsetAdj", encoding="utf8")) * Decimal('0.001'))) # default 0.0
if EON:
CAMERA_OFFSET = -(float(Decimal(Params().get("CameraOffsetAdj", encoding="utf8")) * Decimal('0.001'))) # m from center car to camera
CAMERA_OFFSET_A = CAMERA_OFFSET + 0.15
elif TICI:
CAMERA_OFFSET = 0.04
CAMERA_OFFSET_A = CAMERA_OFFSET + 0.15
else:
CAMERA_OFFSET = 0.0
CAMERA_OFFSET_A = CAMERA_OFFSET + 0.15
class LanePlanner:
def __init__(self, wide_camera=False):
self.ll_t = np.zeros((TRAJECTORY_SIZE,))
self.ll_x = np.zeros((TRAJECTORY_SIZE,))
self.lll_y = np.zeros((TRAJECTORY_SIZE,))
self.rll_y = np.zeros((TRAJECTORY_SIZE,))
self.params = Params()
self.lane_width_estimate = FirstOrderFilter(float(Decimal(self.params.get("LaneWidth", encoding="utf8")) * Decimal('0.1')), 9.95, DT_MDL)
self.lane_width_certainty = FirstOrderFilter(1.0, 0.95, DT_MDL)
self.lane_width = float(Decimal(self.params.get("LaneWidth", encoding="utf8")) * Decimal('0.1'))
self.spd_lane_width_spd = list(map(float, self.params.get("SpdLaneWidthSpd", encoding="utf8").split(',')))
self.spd_lane_width_set = list(map(float, self.params.get("SpdLaneWidthSet", encoding="utf8").split(',')))
self.lll_prob = 0.
self.rll_prob = 0.
self.d_prob = 0.
self.lll_std = 0.
self.rll_std = 0.
self.l_lane_change_prob = 0.
self.r_lane_change_prob = 0.
self.camera_offset = -CAMERA_OFFSET if wide_camera else CAMERA_OFFSET
self.path_offset = -PATH_OFFSET if wide_camera else PATH_OFFSET
self.left_curv_offset = int(self.params.get("LeftCurvOffsetAdj", encoding="utf8"))
self.right_curv_offset = int(self.params.get("RightCurvOffsetAdj", encoding="utf8"))
self.drive_routine_on_co = self.params.get_bool("RoutineDriveOn")
if self.drive_routine_on_co:
option_list = list(self.params.get("RoutineDriveOption", encoding="utf8"))
if '0' in option_list:
self.drive_routine_on_co = True
else:
self.drive_routine_on_co = False
self.drive_close_to_edge = self.params.get_bool("CloseToRoadEdge")
self.left_edge_offset = float(Decimal(self.params.get("LeftEdgeOffset", encoding="utf8")) * Decimal('0.01'))
self.right_edge_offset = float(Decimal(self.params.get("RightEdgeOffset", encoding="utf8")) * Decimal('0.01'))
self.speed_offset = self.params.get_bool("SpeedCameraOffset")
self.road_edge_offset = 0.0
self.lp_timer = 0
self.lp_timer2 = 0
self.lp_timer3 = 0
self.sm = messaging.SubMaster(['liveMapData'])
self.total_camera_offset = self.camera_offset
self.readings = []
self.frame = 0
def parse_model(self, md, sm, v_ego):
curvature = sm['controlsState'].curvature
mode_select = sm['carState'].cruiseState.modeSel
if self.drive_routine_on_co:
self.sm.update(0)
current_road_offset = -self.sm['liveMapData'].roadCameraOffset
else:
current_road_offset = 0.0
Curv = round(curvature, 4)
# right lane is minus
lane_differ = round(abs(self.lll_y[0] + self.rll_y[0]), 2)
lean_offset = 0
if int(mode_select) == 4:
lean_offset = 0.15
else:
lean_offset = 0
if (self.left_curv_offset != 0 or self.right_curv_offset != 0) and v_ego > 8:
if curvature > 0.0008 and self.left_curv_offset < 0 and lane_differ >= 0: # left curve
if lane_differ > 0.6:
lane_differ = 0.6
lean_offset = -round(abs(self.left_curv_offset) * lane_differ * 0.05, 3) # move to left
elif curvature > 0.0008 and self.left_curv_offset > 0 and lane_differ <= 0:
if lane_differ > 0.6:
lane_differ = 0.6
lean_offset = +round(abs(self.left_curv_offset) * lane_differ * 0.05, 3) # move to right
elif curvature < -0.0008 and self.right_curv_offset < 0 and lane_differ >= 0: # right curve
if lane_differ > 0.6:
lane_differ = 0.6
lean_offset = -round(abs(self.right_curv_offset) * lane_differ * 0.05, 3) # move to left
elif curvature < -0.0008 and self.right_curv_offset > 0 and lane_differ <= 0:
if lane_differ > 0.6:
lane_differ = 0.6
lean_offset = +round(abs(self.right_curv_offset) * lane_differ * 0.05, 3) # move to right
else:
lean_offset = 0
self.lp_timer += DT_MDL
if self.lp_timer > 1.0:
self.lp_timer = 0.0
self.speed_offset = self.params.get_bool("SpeedCameraOffset")
if self.params.get_bool("OpkrLiveTunePanelEnable"):
self.camera_offset = -(float(Decimal(self.params.get("CameraOffsetAdj", encoding="utf8")) * Decimal('0.001')))
if self.drive_close_to_edge: # opkr
left_edge_prob = np.clip(1.0 - md.roadEdgeStds[0], 0.0, 1.0)
left_nearside_prob = md.laneLineProbs[0]
left_close_prob = md.laneLineProbs[1]
right_close_prob = md.laneLineProbs[2]
right_nearside_prob = md.laneLineProbs[3]
right_edge_prob = np.clip(1.0 - md.roadEdgeStds[1], 0.0, 1.0)
self.lp_timer3 += DT_MDL
if self.lp_timer3 > 3.0:
self.lp_timer3 = 0.0
if right_nearside_prob < 0.1 and left_nearside_prob < 0.1:
self.road_edge_offset = 0.0
elif right_edge_prob > 0.35 and right_nearside_prob < 0.2 and right_close_prob > 0.5 and left_nearside_prob >= right_nearside_prob:
self.road_edge_offset = -self.right_edge_offset
elif left_edge_prob > 0.35 and left_nearside_prob < 0.2 and left_close_prob > 0.5 and right_nearside_prob >= left_nearside_prob:
self.road_edge_offset = -self.left_edge_offset
else:
self.road_edge_offset = 0.0
else:
self.road_edge_offset = 0.0
if self.speed_offset:
speed_offset = -interp(v_ego, [0, 11.1, 16.6, 22.2, 31], [0.10, 0.05, 0.02, 0.01, 0.0])
else:
speed_offset = 0.0
self.total_camera_offset = self.camera_offset + lean_offset + current_road_offset + self.road_edge_offset + speed_offset
lane_lines = md.laneLines
if len(lane_lines) == 4 and len(lane_lines[0].t) == TRAJECTORY_SIZE:
self.ll_t = (np.array(lane_lines[1].t) + np.array(lane_lines[2].t))/2
# left and right ll x is the same
self.ll_x = lane_lines[1].x
self.lll_y = np.array(lane_lines[1].y) + self.total_camera_offset
self.rll_y = np.array(lane_lines[2].y) + self.total_camera_offset
self.lll_prob = md.laneLineProbs[1]
self.rll_prob = md.laneLineProbs[2]
self.lll_std = md.laneLineStds[1]
self.rll_std = md.laneLineStds[2]
desire_state = md.meta.desireState
if len(desire_state):
self.l_lane_change_prob = desire_state[log.LateralPlan.Desire.laneChangeLeft]
self.r_lane_change_prob = desire_state[log.LateralPlan.Desire.laneChangeRight]
def get_d_path(self, v_ego, path_t, path_xyz):
self.lp_timer2 += DT_MDL
if self.lp_timer2 > 1.0:
self.lp_timer2 = 0.0
if self.params.get_bool("OpkrLiveTunePanelEnable"):
self.path_offset = -(float(Decimal(self.params.get("PathOffsetAdj", encoding="utf8")) * Decimal('0.001')))
# Reduce reliance on lanelines that are too far apart or
# will be in a few seconds
path_xyz[:, 1] += self.path_offset
l_prob, r_prob = self.lll_prob, self.rll_prob
width_pts = self.rll_y - self.lll_y
prob_mods = []
for t_check in (0.0, 1.5, 3.0):
width_at_t = interp(t_check * (v_ego + 7), self.ll_x, width_pts)
prob_mods.append(interp(width_at_t, [4.0, 5.0], [1.0, 0.0]))
mod = min(prob_mods)
l_prob *= mod
r_prob *= mod
# Reduce reliance on uncertain lanelines
l_std_mod = interp(self.lll_std, [.15, .3], [1.0, 0.0])
r_std_mod = interp(self.rll_std, [.15, .3], [1.0, 0.0])
l_prob *= l_std_mod
r_prob *= r_std_mod
# Reduce reliance on uncertain lanelines
l_std_mod = interp(self.lll_std, [.15, .3], [1.0, 0.0])
r_std_mod = interp(self.rll_std, [.15, .3], [1.0, 0.0])
l_prob *= l_std_mod
r_prob *= r_std_mod
if ENABLE_ZORROBYTE:
# zorrobyte code
if l_prob > 0.5 and r_prob > 0.5:
self.frame += 1
if self.frame > 20:
self.frame = 0
current_lane_width = clip(abs(self.rll_y[0] - self.lll_y[0]), 2.5, 3.5)
self.readings.append(current_lane_width)
self.lane_width = mean(self.readings)
if len(self.readings) >= 30:
self.readings.pop(0)
# zorrobyte
# Don't exit dive
if abs(self.rll_y[0] - self.lll_y[0]) > self.lane_width:
r_prob = r_prob / interp(l_prob, [0, 1], [1, 3])
else:
# Find current lanewidth
self.lane_width_certainty.update(l_prob * r_prob)
current_lane_width = abs(self.rll_y[0] - self.lll_y[0])
self.lane_width_estimate.update(current_lane_width)
speed_lane_width = interp(v_ego, self.spd_lane_width_spd, self.spd_lane_width_set)
self.lane_width = self.lane_width_certainty.x * self.lane_width_estimate.x + \
(1 - self.lane_width_certainty.x) * speed_lane_width
clipped_lane_width = min(4.0, self.lane_width)
path_from_left_lane = self.lll_y + clipped_lane_width / 2.0
path_from_right_lane = self.rll_y - clipped_lane_width / 2.0
self.d_prob = l_prob + r_prob - l_prob * r_prob
# neokii
if ENABLE_INC_LANE_PROB and self.d_prob > 0.65:
self.d_prob = min(self.d_prob * 1.3, 1.0)
lane_path_y = (l_prob * path_from_left_lane + r_prob * path_from_right_lane) / (l_prob + r_prob + 0.0001)
safe_idxs = np.isfinite(self.ll_t)
if safe_idxs[0]:
lane_path_y_interp = np.interp(path_t, self.ll_t[safe_idxs], lane_path_y[safe_idxs])
path_xyz[:,1] = self.d_prob * lane_path_y_interp + (1.0 - self.d_prob) * path_xyz[:,1]
else:
cloudlog.warning("Lateral mpc - NaNs in laneline times, ignoring")
return path_xyz |
"""
File utilities
"""
import sys
import os
from os.path import join
import stat
from glob import glob
from pprint import pprint
import shutil
import distutils
import pathlib
from pathlib import Path
import json
import csv
import pickle
import threading
from queue import Queue
import time
import logging
import itertools
import collections
import hashlib
import pymediainfo
import click
from tqdm import tqdm
import cv2 as cv
from PIL import Image
import imutils
from src.settings import app_cfg as cfg
from src.settings import types
log = logging.getLogger(cfg.LOGGER_NAME)
# ------------------------------------------
# File I/O read/write little helpers
# ------------------------------------------
def get_file_list(fp_in, exts=['jpg', 'png'], recursive=False):
'''Returns a list of files or a list of one file
'''
fp_ims = []
fpp_in = Path(fp_in)
if fpp_in.is_dir():
fp_ims = glob_multi(fp_in, exts, recursive=recursive)
elif fpp_in.is_file():
fp_ims = [fp_in] # use single image
return fp_ims
def glob_multi(dir_in, exts, recursive=False):
files = []
for ext in exts:
if recursive:
fp_glob = join(dir_in, '**/*.{}'.format(ext))
log.info(f'glob {fp_glob}')
files += glob(fp_glob, recursive=True)
else:
fp_glob = join(dir_in, '*.{}'.format(ext))
files += glob(fp_glob)
return files
def get_ext(fpp, lower=True):
"""Retuns the file extension w/o dot
:param fpp: (Pathlib.path) filepath
:param lower: (bool) force lowercase
:returns: (str) file extension (ie 'jpg')
"""
fpp = ensure_posixpath(fpp)
ext = fpp.suffix.replace('.', '')
return ext.lower() if lower else ext
# ---------------------------------------------------------------------
# Filepath utilities
# ---------------------------------------------------------------------
def ensure_posixpath(fp):
"""Ensures filepath is pathlib.Path
:param fp: a (str, LazyFile, PosixPath)
:returns: a PosixPath filepath object
"""
if type(fp) == str:
fpp = Path(fp)
elif type(fp) == click.utils.LazyFile:
fpp = Path(fp.name)
elif type(fp) == pathlib.PosixPath:
fpp = fp
else:
raise TypeError('{} is not a valid filepath type'.format(type(fp)))
return fpp
def mkdirs(fp):
"""Ensure parent directories exist for a filepath
:param fp: string, Path, or click.File
"""
fpp = ensure_posixpath(fp)
fpp = fpp.parent if fpp.suffix else fpp
fpp.mkdir(parents=True, exist_ok=True)
def sha256(fp_in, block_size=65536):
"""Generates SHA256 hash for a file
:param fp_in: (str) filepath
:param block_size: (int) byte size of block
:returns: (str) hash
"""
sha256 = hashlib.sha256()
with open(fp_in, 'rb') as fp:
for block in iter(lambda: fp.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
|
# from downscale import utils
# from downscale.downscale import * |
from clearbit.resource import Resource
from clearbit.error import (ParamsInvalidError)
class NameToDomain(Resource):
endpoint = 'https://company.clearbit.com/v1/domains'
@classmethod
def find(cls, **options):
return cls.get('/find', **options)
|
""""Unit tests for the outbox."""
import unittest
from unittest.mock import patch
from models.metric_notification_data import MetricNotificationData
from models.notification import Notification
from outbox import Outbox
class OutboxTestCase(unittest.TestCase):
"""Unit tests for the outbox."""
def setUp(self):
"""Set variables for the other testcases."""
self.reason1 = "status_changed"
self.data_model = dict(
metrics=dict(metric_type=dict(name="type")),
sources=dict(
quality_time=dict(
parameters=dict(
status=dict(
api_values={"target met (green)": "target_met", "target not met (red)": "target_not_met"}
)
)
)
),
)
self.report_url = "https://report1"
self.report = dict(title="report_title", url=self.report_url)
metric1 = self.create_metric("default metric 1")
metric2 = self.create_metric("default metric 2")
self.metric_notification_data1 = MetricNotificationData(metric1, self.data_model, self.reason1)
self.metric_notification_data2 = MetricNotificationData(metric2, self.data_model, self.reason1)
metrics1 = [self.metric_notification_data1, self.metric_notification_data2]
metric3 = self.create_metric("default metric 3")
metric4 = self.create_metric("default metric 4")
metric_notification_data3 = MetricNotificationData(metric3, self.data_model, self.reason1)
metric_notification_data4 = MetricNotificationData(metric4, self.data_model, self.reason1)
metrics2 = [metric_notification_data3, metric_notification_data4]
self.notifications = [
Notification(self.report, metrics1, "uuid1", dict(webhook="https://url/1")),
Notification(self.report, metrics2, "uuid2", dict(webhook="https://url/2")),
]
@staticmethod
def create_metric(name):
"""Create a metric fixture."""
return dict(
type="metric_type",
name=name,
unit="units",
scale="count",
recent_measurements=[
dict(count=dict(value=10, status="target_met")),
dict(count=dict(value=20, status="target_not_met")),
],
)
def test_merge_notifications_into_nothing(self):
"""Test that notifications are merged, even if the outbox is currently empty."""
outbox = Outbox()
outbox.add_notifications(self.notifications)
self.assertEqual(outbox.notifications, self.notifications)
def test_merge_nothing_into_notifications(self):
"""Test that notifications are merged, even if no new metrics are given to be added."""
outbox = Outbox(self.notifications)
outbox.add_notifications([])
self.assertEqual(outbox.notifications, self.notifications)
def test_merge_nothing_into_nothing(self):
"""Test that notifications are merged, even if the destination is empty."""
outbox = Outbox()
outbox.add_notifications([])
self.assertEqual(outbox.notifications, [])
def test_merge_notifications_with_same_destination_but_different_report(self):
"""Test that the metrics are merged into the correct notification."""
report = dict(title="different_title", url="https://differentreport")
metric1 = dict(metric_name="new_metric 1")
metric2 = dict(metric_name="new_metric 2")
metrics1 = [metric1, metric2]
new_notifications = [Notification(report, metrics1, "uuid1", {})]
outbox = Outbox(self.notifications)
outbox.add_notifications(new_notifications)
self.assertEqual(
outbox.notifications[0].metrics,
[self.metric_notification_data1, self.metric_notification_data2],
)
def test_merge_notifications_with_same_destination(self):
"""Test that the metrics are merged into the correct notification."""
report = dict(title="report_title", url="https://differentreport")
metric1 = dict(metric_name="new metric 1")
metric2 = dict(metric_name="new metric 2")
metrics1 = [metric1, metric2]
new_notifications = [Notification(report, metrics1, "uuid1", dict(webhook="https://url/1"))]
outbox = Outbox(self.notifications)
outbox.add_notifications(new_notifications)
self.assertEqual(
outbox.notifications[0].metrics,
[
self.metric_notification_data1,
self.metric_notification_data2,
dict(metric_name="new metric 1"),
dict(metric_name="new metric 2"),
],
)
@patch("outbox.send_notification")
def test_send_notifications(self, mocked_send):
"""Test that notifications can be sent."""
outbox = Outbox(self.notifications)
self.assertEqual(2, outbox.send_notifications())
self.assertEqual([], outbox.notifications)
mocked_send.assert_called()
def test_notifications_without_destination(self):
"""Test that notifications without a destination aren't sent."""
notifications = self.notifications
notifications[0].destination["webhook"] = None
notifications[1].destination["webhook"] = None
outbox = Outbox(notifications)
self.assertEqual(0, outbox.send_notifications())
|
# %-*- coding: utf-8 -*-
"""
=============================================================================
Title : Compressive sensing framework
Project : Simulation environment for BckTrk app
File : CSNN_Framework.py
-----------------------------------------------------------------------------
Description :
This file contains the main function which is the core of the simulation.
More details here
References :
-
-----------------------------------------------------------------------------
Revisions :
Date Version Name Description
25-Apr-2018 1.0 Rami File created
=============================================================================
"""
# Python library import
import numpy as np
import sys
import os
import json
import datetime
import collections
import logging
import platform
# User-defined library import
from Helper_functions.proc_results import process_data, get_pickle_file
from Helper_functions.transforms import transforms
from Navigation.Random_walker import random_2d_path_generator
import Navigation.Coordinates as cord
from Navigation.AWGN import noise_generator
from Reconstruction_algorithms.Master_reconstruction import reconstructor, identify_algorithms
from Helper_functions.csv_interpreter import munge_csv
from Helper_functions.framework_error import CFrameworkError
from Helper_functions.framework_error import CErrorTypes
if platform.system() == "Windows":
direc_ident = "\\"
else:
direc_ident = "/"
def update(dictionary, updateDict):
for k, v in updateDict.items():
if isinstance(v, collections.Mapping):
dictionary[k] = update(dictionary.get(k, {}), v)
else:
dictionary[k] = v
return dictionary
class cFramework:
# Constructor
def __init__(self):
# Parameters / Config files handling
workingDir = os.getcwd()
self.paramPath = workingDir + direc_ident
self.local_struct = json.load(
open(self.paramPath + 'Parameter_files' + direc_ident + 'default_config.json', 'r'))
try:
os.stat(self.paramPath + 'Logs' + direc_ident)
except:
os.mkdir(self.paramPath + 'Logs' + direc_ident)
try:
os.stat(self.paramPath + 'Results' + direc_ident)
except:
os.mkdir(self.paramPath + 'Results' + direc_ident)
# create logger with 'spam_application'
self.logger = logging.getLogger('BckTrk')
REALIZATION_log = 15
logLevel = REALIZATION_log # can be logging.INFO or DEBUG
#logLevel = logging.DEBUG
self.logger.setLevel(logLevel)
# create file handler which logs even debug messages
now = datetime.datetime.now()
self.fh = logging.FileHandler(
self.paramPath + 'Logs' + direc_ident + 'BckTrk_Log_' + now.strftime("%Y-%m-%d") + '.log')
self.fh.setLevel(logLevel)
self.local_struct['currentTime'] = now
self.local_struct['workingDir'] = workingDir
# create console handler with same log level
self.ch = logging.StreamHandler()
self.ch.setLevel(logLevel)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.fh.setFormatter(formatter)
self.ch.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(self.fh)
self.logger.addHandler(self.ch)
self.frameworkError_list = {"bNoErrors": True}
self.frameworklog_list = {"bNologs": True}
def update_framework(self, arguments):
numberOfArgument = len(arguments)
if numberOfArgument == 1:
self.logger.info('Default config will be used')
elif numberOfArgument == 2:
parameters_filename = arguments[1] # first argument should be the parameters filename
updated_config = json.load(open(self.paramPath + parameters_filename,
'r')) # loads a dictionary written in a JSON file as a python dictionary
self.local_struct = update(self.local_struct, updated_config)
else:
self.logger.error('Invalid number of arguments %d' % numberOfArgument)
self.exit_framework()
# Define exit function
# TODO Since an exception can occur in one of the sub modules, this function will never be called
# Doubling logs bug will happen, solution 1 make all function return a value to main
# pass the exit function somehow to all submodules
def exit_framework(self):
self.fh.close()
self.ch.close()
self.logger.removeHandler(self.fh)
self.logger.removeHandler(self.ch)
# Main function definition
def mainComputation(self, local_struct):
# Variables initialization
use_random_seed = local_struct['bUse_random_seed']
random_seed = local_struct['random_seed']
numberOfRealizations = local_struct['realization']
noise_level_len = len(local_struct['noise_level_meter'])
use_csv_data = local_struct['CSV_DATA']['bUse_csv_data']
csv_path = local_struct['CSV_DATA']['csv_path']
path_length = local_struct['CSV_DATA']['path_length']
# Set seed
if use_random_seed:
np.random.seed(random_seed)
# Iterate over the total number of realizations
if use_csv_data:
local_struct['noise_level_meter'] = [0]
noise_level_len = 1
acquisition_length = path_length
paths_latlon_org, latlon_accuracy, latlon_interval = munge_csv(csv_path, path_length)
local_struct['realization'] = latlon_accuracy.shape[-1]
numberOfRealizations = local_struct['realization']
paths_latlon_org = paths_latlon_org.reshape((2, path_length, numberOfRealizations, noise_level_len))
else:
acquisition_length = local_struct['gps_freq_Hz'] * local_struct['acquisition_time_sec']
paths_latlon_org = np.zeros((2, acquisition_length, numberOfRealizations, noise_level_len))
local_struct['acquisition_length'] = acquisition_length
paths_wm_org = np.zeros((2, acquisition_length, numberOfRealizations, noise_level_len))
paths_wm_noisy = np.zeros((2, acquisition_length, numberOfRealizations, noise_level_len))
paths_latlon_noisy = np.zeros((2, acquisition_length, numberOfRealizations, noise_level_len))
noise_vals = np.zeros((acquisition_length, numberOfRealizations, noise_level_len))
transformed_paths = np.zeros((2, acquisition_length, numberOfRealizations, noise_level_len))
reconstructed_latlon_paths = {}
reconstructed_WM_paths = {}
final_sampling_ratio = {}
bNN_initialized = {}
reconstruction_algorithms = identify_algorithms(local_struct)
for algorithm in reconstruction_algorithms:
reconstructed_latlon_paths[algorithm] = np.zeros(
(2, acquisition_length, numberOfRealizations, noise_level_len))
reconstructed_WM_paths[algorithm] = np.zeros(
(2, acquisition_length, numberOfRealizations, noise_level_len))
final_sampling_ratio[algorithm] = np.zeros((numberOfRealizations, noise_level_len))
bNN_initialized[algorithm] = False
if local_struct['bTrainNetwork'] and local_struct['Train_NN']['bUseGeneratedData']:
self.logger.info('Using generated data')
try:
dataFileName = 'TrainingSet'
filename = self.paramPath + 'NeuralNetworks' + direc_ident + dataFileName + '.txt'
try:
loadedStruct = get_pickle_file(filename)
# Assign the paths to local variables
# TODO some other checks on some parameters can be done
paths_latlon_org = loadedStruct['RESULTS']['paths_latlon_org']
paths_latlon_noisy = loadedStruct['RESULTS']['paths_latlon_noisy']
except FileNotFoundError as filerr:
self.logger.debug("Training set not found")
errdict = {"file": __file__, "message": filerr.args[0],
"errorType": CErrorTypes.range}
raise CFrameworkError(errdict)
except CFrameworkError as frameErr:
self.errorAnalyzer(frameErr, "load training set")
else:
self.logger.info('Starting simulation with <%d> realizations and <%d> path length', numberOfRealizations,
acquisition_length)
for lvl in range(noise_level_len):
for realization in range(numberOfRealizations):
# Generate random data
self.logger.log(15, 'Generating random data for lvl <%d> for realization <%d>',
local_struct['noise_level_meter'][lvl], realization)
if not use_csv_data:
(paths_wm_org[:, :, realization, lvl], paths_latlon_org[:, :, realization, lvl]) = \
random_2d_path_generator(local_struct)
# Generate noise for each realization
(paths_wm_noisy[:, :, realization, lvl], paths_latlon_noisy[:, :, realization, lvl],
noise_vals[:, realization, lvl]) = \
noise_generator(local_struct, paths_wm_org[:, :, realization, lvl],
local_struct['noise_level_meter'][lvl])
else:
paths_wm_org[:, :, realization, lvl] = cord.generate_WM_array(
paths_latlon_org[:, :, realization, lvl])
paths_latlon_noisy[:, :, realization, lvl] = paths_latlon_org[:, :, realization, lvl]
paths_wm_noisy[:, :, realization, lvl] = paths_wm_org[:, :, realization, lvl]
# Apply transforms
if not local_struct['bTrainNetwork']:
transformed_paths[:, :, realization, lvl] = \
transforms(local_struct, paths_latlon_noisy[:, :, realization, lvl])
# Apply reconstruction algorithms
if local_struct['bReconstruct']:
for algorithm in reconstruction_algorithms:
if "NN" in algorithm and not bNN_initialized[algorithm]:
from NeuralNetworks.NN import CNeuralNetwork
nn_name = algorithm + "Obj"
try:
local_struct[nn_name] = CNeuralNetwork(local_struct, algorithm)
bNN_initialized[algorithm] = True
except CFrameworkError as frameErr:
self.errorAnalyzer(frameErr, str((algorithm, lvl)))
try:
try:
local_struct[algorithm]['baseline'] = local_struct[algorithm]['error_baseline'][
lvl]
except KeyError:
pass
temp, final_sampling_ratio[algorithm][realization, lvl] = reconstructor(
local_struct, paths_latlon_noisy[:, :, realization, lvl],
algorithm, noise_vals[:, realization, lvl])
reconstructed_latlon_paths[algorithm][:, :, realization, lvl] = temp
try:
reconstructed_WM_paths[algorithm][:, :, realization, lvl] = \
cord.generate_WM_array(temp)
except ValueError as valerr:
self.logger.debug("Lat/Lon out of range in degrees")
errdict = {"file": __file__, "message": valerr.args[0],
"errorType": CErrorTypes.range}
raise CFrameworkError(errdict)
except CFrameworkError as frameErr:
self.errorAnalyzer(frameErr, str((algorithm, lvl)))
if local_struct['bTrainNetwork']:
from NeuralNetworks.NN import CNeuralNetwork
# Iterate over the total number of realizations to generate training set
modelname_lat = self.paramPath + 'NeuralNetworks' + direc_ident + 'Models' + direc_ident \
+ local_struct["Train_NN"]["modelname_lat"]
modelname_lon = self.paramPath + 'NeuralNetworks' + direc_ident + 'Models' + direc_ident \
+ local_struct["Train_NN"]["modelname_lon"]
nnObj = CNeuralNetwork(local_struct, "Train_NN")
nnObj.design_nn()
results_lat, results_lon = nnObj.train_nn(paths_latlon_org, paths_latlon_noisy)
nnObj.save_models(modelname_lat, modelname_lon)
# if nnObj.dump_nn_summary():
# self.logAnalyzer(nnObj.messageSummary_dict, modelname_lat)
# self.logAnalyzer(nnObj.messageSummary_dict, modelname_lon)
if local_struct["Train_NN"]["bPlotTrainResults"]:
nnObj.train_result_visu(results_lat, results_lon, local_struct["Train_NN"]["modelname_lat"],
local_struct["Train_NN"]["modelname_lon"])
# Store data in local struct
local_struct['RESULTS']['paths_wm_org'] = paths_wm_org
local_struct['RESULTS']['paths_latlon_org'] = paths_latlon_org
local_struct['RESULTS']['paths_wm_noisy'] = paths_wm_noisy
local_struct['RESULTS']['paths_latlon_noisy'] = paths_latlon_noisy
local_struct['RESULTS']['transformed_paths'] = transformed_paths
local_struct['RESULTS']['reconstructed_latlon_paths'] = reconstructed_latlon_paths
local_struct['RESULTS']['reconstructed_WM_paths'] = reconstructed_WM_paths
local_struct['RESULTS']['final_sampling_ratio'] = final_sampling_ratio
local_struct['RESULTS']['noise_vals'] = noise_vals
self.logger.debug('Generating results and plotting')
try:
process_data(local_struct)
except CFrameworkError as frameErr:
self.errorAnalyzer(frameErr, "process_data")
self.exit_framework()
return self.frameworkError_list
def errorAnalyzer(self, frameErr, master_key):
if self.frameworkError_list["bNoErrors"]:
self.frameworkError_list["bNoErrors"] = False
if master_key in self.frameworkError_list.keys():
if frameErr.callermessage in self.frameworkError_list[master_key].keys():
self.frameworkError_list[master_key][frameErr.callermessage] += 1
else:
self.frameworkError_list[master_key][frameErr.callermessage] = 1
else:
self.frameworkError_list[master_key] = {frameErr.callermessage: 1}
def logAnalyzer(self, message, master_key):
# No need to append the message to the master key for now (message is a dict in this case)
if self.frameworklog_list["bNologs"]:
self.frameworklog_list["bNologs"] = False
self.frameworklog_list[master_key] = message
# Main function definition MUST BE at the END OF FILE
if __name__ == "__main__":
# Business logic for input arguments to main function
framework_model = cFramework()
framework_model.update_framework(sys.argv)
frameworkError_list = framework_model.mainComputation(framework_model.local_struct)
filename = framework_model.paramPath + 'Logs' + direc_ident + 'BckTrk_exception_' + \
framework_model.local_struct["currentTime"].strftime("%Y-%m-%d") + '.json'
with open(filename, "w") as data_file:
json.dump(frameworkError_list, data_file, indent=4, sort_keys=True)
if not framework_model.frameworklog_list["bNologs"]:
filename = framework_model.paramPath + 'Logs' + direc_ident + 'BckTrk_logDump_' + \
framework_model.local_struct["currentTime"].strftime("%Y-%m-%d") + '.json'
with open(filename, "w") as data_file:
json.dump(framework_model.frameworklog_list, data_file, indent=4, sort_keys=True)
|
from brownie import reverts
def test_constructor(SwapStrategy, vault, gov, keeper):
strategy = gov.deploy(SwapStrategy, vault, 2400, 1200, 500, 600, keeper)
assert strategy.vault() == vault
assert strategy.pool() == vault.pool()
assert strategy.baseThreshold() == 2400
assert strategy.limitThreshold() == 1200
assert strategy.maxTwapDeviation() == 500
assert strategy.twapDuration() == 600
assert strategy.keeper() == keeper
def test_constructor_checks(SwapStrategy, vault, gov, keeper):
with reverts("threshold % tickSpacing"):
gov.deploy(SwapStrategy, vault, 2401, 1200, 500, 600, keeper)
with reverts("threshold % tickSpacing"):
gov.deploy(SwapStrategy, vault, 2400, 1201, 500, 600, keeper)
with reverts("threshold > 0"):
gov.deploy(SwapStrategy, vault, 0, 1200, 500, 600, keeper)
with reverts("threshold > 0"):
gov.deploy(SwapStrategy, vault, 2400, 0, 500, 600, keeper)
with reverts("threshold too high"):
gov.deploy(SwapStrategy, vault, 887280, 1200, 500, 600, keeper)
with reverts("threshold too high"):
gov.deploy(SwapStrategy, vault, 2400, 887280, 500, 600, keeper)
with reverts("maxTwapDeviation"):
gov.deploy(SwapStrategy, vault, 2400, 1200, -1, 600, keeper)
with reverts("twapDuration"):
gov.deploy(SwapStrategy, vault, 2400, 1200, 500, 0, keeper)
def test_rebalance_negative_amount(SwapStrategy, vault, gov, keeper, user, pool):
strategy = gov.deploy(SwapStrategy, vault, 2400, 1200, 500, 600, keeper)
vault.setStrategy(strategy, { "from": gov })
vault.deposit(1e16, 1e18, 0, 0, user, { "from": user })
#zeroToOne true allora sto vendendo token0, false sto comprando token0
print('Case swapAmount is negative\n\n*************\n')
swapAmount = -2400
#minSQRTPrice = int(1.0001 ** (pool.slot0()[1] * 0.5) * (2 ** 96))
minSQRTPrice = 2 ** 96
maxSQRTPrice = 2 ** 159
SQRTPrice = minSQRTPrice if (swapAmount > 0) else maxSQRTPrice
assert (maxSQRTPrice == SQRTPrice)
balance0 = vault.getBalance0()
balance1 = vault.getBalance1()
tx = strategy.rebalance(swapAmount, SQRTPrice, { "from": keeper })
tx.wait(1)
newAmount0, newAmount1 = vault.getTotalAmounts()
assert(balance0 < newAmount0)
assert(balance1 > newAmount1)
newBalance0 = vault.getBalance0()
newBalance1 = vault.getBalance1()
assert newBalance0 != balance0
assert newBalance1 != balance1
with reverts("threshold > 0"):
newStrategy = gov.deploy(SwapStrategy, vault, 0, 0, 500, 600, keeper)
vault.setStrategy(newStrategy, { "from": gov })
tx = newStrategy.rebalance(swapAmount, SQRTPrice, { "from": keeper })
tx.wait(1)
def test_rebalance_positive_amount(SwapStrategy, vault, gov, keeper, user, pool):
strategy = gov.deploy(SwapStrategy, vault, 2400, 1200, 500, 600, keeper)
vault.setStrategy(strategy, { "from": gov })
vault.deposit(1e16, 1e18, 0, 0, user, { "from": user })
#zeroToOne true allora sto vendendo token0, false sto comprando token0
print('case swapAmount is positive\n\n\n**************\n')
swapAmount = 2400
minSQRTPrice = 2 ** 96
maxSQRTPrice = 2 ** 159
SQRTPrice = minSQRTPrice if (swapAmount > 0) else maxSQRTPrice
assert (minSQRTPrice == SQRTPrice)
balance0 = vault.getBalance0()
balance1 = vault.getBalance1()
tx = strategy.rebalance(swapAmount, SQRTPrice, { "from": keeper })
tx.wait(1)
newAmount0, newAmount1 = vault.getTotalAmounts()
assert(balance0 > newAmount0)
assert(balance1 < newAmount1)
newBalance0 = vault.getBalance0()
newBalance1 = vault.getBalance1()
assert newBalance0 != balance0
assert newBalance1 != balance1
|
import sys
import process_sequence_fasta as pro_seq_fasta
import sequence2vector as s2v_tools
import numpy as np
import pickle
from sklearn.model_selection import train_test_split
from keras.layers import LSTM, Dense
from keras.models import Sequential
from keras.layers import Embedding
from keras.preprocessing import sequence
import batchgenerator as bg
"""
Author: Carlos Garcia-Perez
Date: 25.06.2019 final version of model setting.
add new option to train all data set no splitting.
save the model and the weight separated
17.06.2019 fix the model definition
14.06.2019 split data set in train, validation and test sets in option 1
13.06.2019 create data set in one-hot-encoding and save as object.pkl in option 2
first version of the script
"""
opt = int(sys.argv[1])
type = 'nuc' # aa
print('running option = ', opt)
if opt == 1:
print('processing all...')
x_data_name = '/data/sequence_dataset.pkl'
y_data_name = '/data/label_dataset.pkl'
X = pickle.load(open(x_data_name, 'rb'))
Y = pickle.load(open(y_data_name, 'rb'))
classes = pickle.load(open("/data/classes.pkl", 'rb'))
print('defining model:')
features = 20
num_classes = classes
print('features: ', features)
print('clases: ', num_classes)
print('nodes: ', 128)
print('bacth size: ', 2000)
print('epochs: ', 50)
print('reshaping data...')
max_len = max([len(s) for s in X])
X_train = sequence.pad_sequences(X, maxlen=max_len)
print('training dataset: ', X_train.shape)
print('max_length:', max_len)
model = Sequential()
model.add(Embedding(len(X_train), features, input_length=max_len))
model.add(LSTM(128)) # 32
model.add(Dense(num_classes, activation='softmax'))
print('compiling the model...')
# compile the model
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
print('training the model...')
# metric
model.fit(X_train, Y,
epochs=50,
batch_size=2000)
results_eval = model.evaluate(X_train, Y, batch_size=2000)
print("%s: %.2f%%" % (model.metrics_names[1], results_eval[1] * 100))
pickle.dump(results_eval, open("/data/results_eval.pkl", 'wb'), protocol=4)
# serialize model to JSON
model_json = model.to_json()
with open("/data/model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("/data/model.h5")
print("Saved model to disk...")
elif opt == 2:
print('loading data...')
info = pickle.load(open("/data/info.pkl", 'rb'))
fname_train = '/data/train.txt'
fname_val = '/data/val_train.txt'
fname_test = '/data/test.txt'
print('defining model:')
ly = 128 # layer
btch_size = 250
epch = 20
features = 20
num_classes = info[0]
max_len = info[1]
nsamples_train = info[2]
nsamples_val = info[3]
nsamples_test = info[4]
train_steps_per_epoch = np.ceil(nsamples_train / btch_size)
val_steps_per_epoch = np.ceil(nsamples_val / btch_size)
test_steps_per_epoch = np.ceil(nsamples_test / btch_size)
print('features: ', features)
print('clases: ', num_classes)
print('max_length', max_len)
print('layer nodes: ', ly) # 128
print('bacth size: ', btch_size) # 2000
print('epochs: ', epch)
print('train steps per epoch: ', train_steps_per_epoch)
print('val steps per epoch: ', val_steps_per_epoch)
print('test steps per epoch: ', test_steps_per_epoch)
model = Sequential()
model.add(Embedding(max_len, features, input_length=max_len))
model.add(LSTM(ly)) # 128
model.add(Dense(num_classes, activation='softmax'))
print('compiling the model...')
# compile the model
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
print('training the model...')
# metric
train_generator = bg.generator(fname_train, max_len, btch_size)
validation_generator = bg.generator(fname_val, max_len, btch_size)
network = model.fit_generator(train_generator,
steps_per_epoch=train_steps_per_epoch,
epochs=epch,
validation_data=validation_generator,
validation_steps=val_steps_per_epoch) # 0.2
test_generator = bg.generator(fname_test, max_len, btch_size)
results_eval = model.evaluate_generator(test_generator, steps=test_steps_per_epoch)
print('training the model... done!!!')
print('savinig the history...')
pickle.dump(network, open("/data/history.pkl", 'wb'), protocol=4)
pickle.dump(results_eval, open("/data/results_eval.pkl", 'wb'), protocol=4)
print('done...')
elif opt == 3:
"""
Create training and testing shuffled datasets
"""
fname = '/data/subdataset_RDP_nucl.fasta'
sequence_df = pro_seq_fasta.process_fasta(fname, type)
Y = np.array(sequence_df['bacteria'])
X = np.array(sequence_df['sequence'])
max_len = max([len(s) for s in X])
classes = len(np.unique(Y))
Y = s2v_tools.label2one_hot_encoding(Y)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.20)
nsamples_train = len(X_train)
nsamples_val = len(X_val)
nsamples_test = len(X_test)
with open('/data/train.txt', 'w') as f:
for i in range(len(X_train)):
line = str(list(Y_train[i])).strip('[]').replace(', ', ',') + '\t' + str(X_train[i]).strip('[]').replace(', ', ',') + '\n'
f.write(line)
f.close()
with open('/data/val_train.txt', 'w') as f:
for i in range(len(X_val)):
line = str(list(Y_val[i])).strip('[]').replace(', ', ',') + '\t' + str(X_val[i]).strip('[]').replace(', ', ',') + '\n'
f.write(line)
f.close()
with open('/data/test.txt', 'w') as f:
for i in range(len(X_test)):
line = str(list(Y_test[i])).strip('[]').replace(', ', ',') + '\t' + str(X_test[i]).strip('[]').replace(', ', ',') + '\n'
f.write(line)
f.close()
info = (classes, max_len, nsamples_train, nsamples_val, nsamples_test)
pickle.dump(info, open("/data/info.pkl", 'wb'), protocol=4)
|
# recursive approach to find the number of set
# bits in binary representation of positive integer n
def count_set_bits(n):
if (n == 0):
return 0
else:
return (n & 1) + count_set_bits(n >> 1)
# Get value from user
n = 41
# Function calling
print(count_set_bits(n))
|
import numpy as np
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Lambda
def euclidean_distance(vects):
x, y = vects
sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
return K.sqrt(K.maximum(sum_square, K.epsilon()))
def contrastive_loss1(y_true, y_pred):
margin = 1.0
square_pred = K.square(y_pred)
margin_square = K.square(K.maximum(margin - y_pred, 0))
return K.mean((1 - y_true) * square_pred + y_true * margin_square)
def pair_accuracy(y_true, y_pred):
return K.mean(K.equal(y_true, K.cast(y_pred > 0.5, y_true.dtype)))
def create(base_model):
i1 = Input(shape=base_model.input_shape[1:], name='i1')
i2 = Input(shape=base_model.input_shape[1:], name='i2')
p1 = base_model(i1)
p2 = base_model(i2)
o = Lambda(euclidean_distance)([p1, p2])
siam_model = Model([i1, i2], o)
emb_model = Model(i1, p1)
return siam_model, emb_model
def compile(siam_model, loss, optimizer):
siam_model.compile(loss=loss, optimizer=optimizer,
metrics=[pair_accuracy])
|
from argparse import ArgumentParser
from enum import Enum
from more_itertools import sliced
from pathlib import Path
from robber_baron import Bot
class Difficulty(Enum):
EASY = "easy"
MEDIUM = "medium"
DIFFICULT = "difficult"
INSANE = "insane"
def __str__(self) -> str:
return self.value
DIFFICULTY_TO_ID = {
Difficulty.EASY: "e",
Difficulty.MEDIUM: "m",
Difficulty.DIFFICULT: "d",
Difficulty.INSANE: "i",
}
class SudokuBot(Bot):
def play(self, difficulty: Difficulty):
"""Play a Sudoku game."""
difficulty_id = DIFFICULTY_TO_ID[difficulty]
new_game_url = f"https://sudoku.puzzlebaron.com/init.php?d={difficulty_id}"
print(f"Loading new game URL: {new_game_url} ...")
self.browser.get(new_game_url)
print("Starting game ...")
self.browser.find_element("td > a.button_orange").click()
boxes = [
self.browser.find_element(f"div#box{i+1}").get_attribute("innerText")
for i in range(81)
]
initial_state = list(sliced([int(b) if len(b) > 0 else 0 for b in boxes], 9))
print(f"Extracted initial game state: {initial_state}")
print("Solving instance with MiniZinc ...")
model_file = Path(__file__).parent / "models" / "sudoku.mzn"
result = self.solver.solve(model_file, {"start": initial_state})
solution = result["puzzle"]
print(f"Found solution: {solution}")
# TODO: reverse engineer encoding logic instead of filling in grid manually
print("Filling in grid ...")
for j in range(9):
for i in range(9):
if initial_state[j][i] == 0:
self.browser.execute_script(
"document.getElementById('box' + arguments[0]).innerText = arguments[1]",
str((j * 9) + i + 1),
solution[j][i],
)
print("Submitting game ...")
self.browser.execute_script("window.xmlhttpPost2('check.php')")
print("Verifying submission ...")
_ = self.browser.find_element('div#widgetresponse a[href="init.php"]')
def parse_args():
"""Parse command-line arguments."""
parser = ArgumentParser(description="Play a Sudoku game")
parser.add_argument(
"-d",
"--difficulty",
type=Difficulty,
default=Difficulty.EASY,
choices=list(Difficulty),
help="Puzzle difficulty; default 'easy'",
)
parser.add_argument(
"--login", action="store_true", help="Login to Puzzle Baron account"
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
bot = SudokuBot()
if args.login:
bot.login()
bot.play(args.difficulty)
input("Press enter to quit: ")
bot.browser.quit()
|
from flask_wtf import Form
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired, EqualTo, Length
# Set your classes here.
class RegisterForm(Form):
name = TextField(
'User Name', validators=[DataRequired(), Length(min=6, max=25)]
)
email = TextField(
'Email', validators=[DataRequired(), Length(min=6, max=40)]
)
password = PasswordField(
'Password', validators=[DataRequired(), Length(min=6, max=40)]
)
confirm = PasswordField(
'Password Confirmation', validators=[DataRequired(),
EqualTo('password', message='Password does not match')]
)
class LoginForm(Form):
name = TextField('User Name', [DataRequired()])
password = PasswordField('Password', [DataRequired()])
class ForgotForm(Form):
email = TextField(
'Email', validators=[DataRequired(), Length(min=6, max=40)]
)
class AddTeamForm(Form):
team_name = TextField(
'Team Name', validators=[DataRequired(), Length(min=6, max=25)]
)
league = TextField(
'League', validators=[DataRequired(), Length(min=6, max=40)]
)
division = TextField(
'Devision', validators=[DataRequired(), Length(min=1, max=40)]
) |
from iap2022.utils.coordinate_transformer import CoordinateTransformer
from iap2022.utils.coordinate_transformer import InternalCoordinate, InternalCoordinateAndOrientation
from iap2022.utils.functional import get_bonded_atoms
|
from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(r'^admin/inspect/browse/$', 'upy.contrib.inspect.views.browse', name='inspect_browse'),
)
|
import tensorflow as tf
import numpy as np
import gym
class Config(object):
input_size = None
output_size = None
num_layer = 2
num_hidden = 128
dtype = tf.float32
epsilon = 0.2
c1 = 0.5
c2 = 0.01
gamma = 0.99
normalize_advantages = True
learning_rate = 3e-4
epoch = 10
num_env = 16
max_path_length = 1000
class Nn_estimator(object):
def __init__(self, config):
self._config = config
input_size = config.input_size
output_size = config.output_size
dtype = config.dtype
with tf.name_scope('input_layer'):
self._input_states = tf.placeholder(dtype, [None,input_size], 'input_states')
self._action_indices = tf.placeholder(tf.int32, [None], 'action_indices')
self._old_action_prob = tf.placeholder(dtype, [None], 'action_index')
self._advantages = tf.placeholder(dtype, [None], 'advantages')
with tf.name_scope('labels'):
self._label_value = tf.placeholder(dtype, [None,1], 'label_value')
self._label_distribution = tf.placeholder(tf.int32, [None], 'label_distribution')
with tf.name_scope('MLP'):
layer_out = self._input_states
for i in range(config.num_layer):
layer_out = tf.layers.dense(layer_out, config.num_hidden, tf.nn.relu, name='MLP_layer_{}'.format(i))
with tf.name_scope('value_header'):
self._prediction_value = tf.layers.dense(layer_out, 1, name='value_layer')
with tf.name_scope('distribution_header'):
self._logits = tf.layers.dense(layer_out, output_size, name='logits')
self._prediction_distribution = tf.nn.softmax(self._logits)
with tf.name_scope('loss'):
# clip loss
action_onehot = tf.one_hot(self._action_indices, config.output_size)
p_prob = tf.reduce_sum(self._prediction_distribution * action_onehot, axis=1)
r_t = p_prob / self._old_action_prob
clipped = tf.clip_by_value(r_t, clip_value_min=1-self._config.epsilon, clip_value_max=1+self._config.epsilon)
l_clip = tf.minimum(r_t*self._advantages, clipped*self._advantages)
l_clip = tf.reduce_mean(l_clip)
self._l_clip = l_clip
# value loss
l_vf = tf.losses.mean_squared_error(labels=self._label_value, predictions=self._prediction_value)
self._l_vf = l_vf
# entropy
l_s = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self._prediction_distribution, logits=self._logits)
l_s = tf.reduce_mean(l_s)
# total loss
self._loss = - l_clip + self._config.c1 * l_vf - self._config.c2 * l_s
with tf.name_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate=config.learning_rate)
self._train_op = optimizer.minimize(self._loss)
init = tf.global_variables_initializer()
self._saver = tf.train.Saver()
self._sess = tf.Session()
try:
self.restore()
except ValueError as e:
print(e)
self._sess.run(init)
self.save()
# writer = tf.summary.FileWriter("./tensorboard/log/", self._sess.graph)
# writer.close()
def train(self, states, actions, act_prob, advs, values):
feed_dict = {
self._input_states:states,
self._action_indices:actions,
self._old_action_prob:act_prob,
self._advantages:advs,
self._label_distribution:actions,
self._label_value:values
}
for i in range(self._config.epoch):
l_clip, l_vf, _ = self._sess.run([self._l_clip, self._l_vf, self._train_op], feed_dict=feed_dict)
# print('clip_loss:', l_clip, 'value_loss', l_vf)
self.save()
def predict(self, states):
feed_dict = {self._input_states:states}
value_p, distribution_p = self._sess.run([self._prediction_value,self._prediction_distribution],feed_dict=feed_dict)
return value_p, distribution_p
def save(self, path="./model/latest.ckpt"):
self._saver.save(self._sess, path)
def restore(self, path="./model/latest.ckpt"):
self._saver.restore(self._sess, path)
def close(self):
self._sess.close()
class Agent(object):
def __init__(self, env_name, config=Config()):
env = gym.make(env_name)
config.input_size = env.observation_space.shape[0]
config.output_size = env.action_space.n
self._nn = Nn_estimator(config)
self._sshape = config.input_size
self._adim = config.output_size
self._max_path_length = config.max_path_length
self._gamma = config.gamma
self._normalize_advantages = config.normalize_advantages
self._env_name = env_name
self._num_env = config.num_env
def make_envs(self):
self._env_dics = []
for _ in range(self._num_env):
dic = {
'env':gym.make(self._env_name),
'obs':[],
'acs':[],
'probs':[],
'rewards':[],
'next_obs':[],
'terminals':[],
'done':False
}
self._env_dics.append(dic)
def sample_trajectory(self):
envs_ob = []
for dic in self._env_dics:
ob = dic['env'].reset()
dic['obs'].append(ob)
envs_ob.append(ob)
envs_ob = np.stack(envs_ob)
steps = 0
while True:
_, envs_prob = self._nn.predict(envs_ob)
envs_ob = []
for i, dic in enumerate(self._env_dics):
if dic['done']:
envs_ob.append(np.zeros(shape=self._sshape))
continue
act_idx = np.random.choice(self._adim,p=envs_prob[i])
dic['acs'].append(act_idx)
dic['probs'].append(envs_prob[i,act_idx])
ob, rew, done, _ = dic['env'].step(act_idx)
envs_ob.append(ob)
dic['next_obs'].append(ob)
dic['rewards'].append(rew)
if done or steps > self._max_path_length:
dic['terminals'].append(1)
dic['done'] = True
else:
dic['terminals'].append(0)
dic['obs'].append(ob)
steps += 1
is_done = True
for dic in self._env_dics:
if not dic['done']:
is_done = False
if is_done:
offset = steps % 5 if steps % 5 != 0 else 5
obs_t, next_obs_t, rewards_t, terminals_t, acs_t, probs_t = [], [], [], [], [], []
for dic in self._env_dics:
obs_t.extend(dic['obs'][steps-offset:steps])
next_obs_t.extend(dic['next_obs'][steps-offset:steps])
rewards_t.extend(dic['rewards'][steps-offset:steps])
terminals_t.extend(dic['terminals'][steps-offset:steps])
acs_t.extend(dic['acs'][steps-offset:steps])
probs_t.extend(dic['probs'][steps-offset:steps])
obs_t = np.stack(obs_t)
next_obs_t = np.stack(next_obs_t)
rewards_t = np.stack(rewards_t)
terminals_t = np.stack(terminals_t)
acs_t = np.stack(acs_t)
probs_t = np.stack(probs_t)
self.update_nn(obs_t, next_obs_t, rewards_t, terminals_t, acs_t, probs_t)
total_reward = 0
for dic in self._env_dics:
total_reward += np.sum(dic['rewards'])
print('average reward', total_reward/len(self._env_dics), '\n')
break
if steps % 5 == 0:
offset = 5
obs_t, next_obs_t, rewards_t, terminals_t, acs_t, probs_t = [], [], [], [], [], []
for dic in self._env_dics:
obs_t.extend(dic['obs'][steps-offset:steps])
next_obs_t.extend(dic['next_obs'][steps-offset:steps])
rewards_t.extend(dic['rewards'][steps-offset:steps])
terminals_t.extend(dic['terminals'][steps-offset:steps])
acs_t.extend(dic['acs'][steps-offset:steps])
probs_t.extend(dic['probs'][steps-offset:steps])
obs_t = np.stack(obs_t)
next_obs_t = np.stack(next_obs_t)
rewards_t = np.stack(rewards_t)
terminals_t = np.stack(terminals_t)
acs_t = np.stack(acs_t)
probs_t = np.stack(probs_t)
self.update_nn(obs_t, next_obs_t, rewards_t, terminals_t, acs_t, probs_t)
envs_ob = np.stack(envs_ob)
def estimate_advantage(self, obs, next_obs, rewards, terminals):
v0, _ = self._nn.predict(obs)
v1, _ = self._nn.predict(next_obs)
qs = rewards + (1 - terminals) * self._gamma * v1.flatten()
advs = qs - v0.flatten()
if self._normalize_advantages:
adv_mean = np.mean(advs)
adv_std = np.std(advs)
advs = (advs-adv_mean)/(adv_std+1e-8)
return advs, qs.reshape([-1,1])
def update_nn(self, obs, next_obs, rewards, terminals, acs, probs):
advs, values = self.estimate_advantage(obs, next_obs, rewards, terminals)
self._nn.train(obs, acs, probs, advs, values)
if __name__ == '__main__':
agent =Agent('LunarLander-v2')
for i in range(100):
agent.make_envs()
agent.sample_trajectory()
|
'''
Created on Jan 3, 2022
@author: immanueltrummer
'''
import argparse
import codexdb.catalog
import codexdb.code
import codexdb.engine
import contextlib
import json
import os
import openai
import pandas as pd
import time
def extract_samples(catalog, path_to_results):
""" Extracts completion examples from prior results file.
Args:
catalog: database catalog with schema information
path_to_results: path to prior results file
Returns:
list of extracted examples
"""
with open(path_to_results) as file:
prior_results = json.load(file)
examples = []
for cur_results in prior_results.values():
for r in cur_results:
if r['similarity'] == 1.0:
examples.append(r)
for e in examples:
if 'schema' not in e:
db_id = e['db_id']
e['schema'] = catalog.schema(db_id)
e['files'] = catalog.files(db_id)
return examples
def result_cmp(ref_output, cmp_output, reorder):
""" Compares query result output against reference.
Args:
ref_output: reference query result
cmp_output: compare this against reference
reorder: whether to consider reordering
Returns:
Comparable flag, number of differences, similarity
"""
print(f'-- CodexDB output:\n{cmp_output}\n--\n')
print(f'CodexDB Index: {cmp_output.index}')
print(f'CodexDB info: {cmp_output.info()}')
print(f'-- Reference output:\n{ref_output}\n--\n')
print(f'Reference Index: {ref_output.index}')
print(f'Reference info: {ref_output.info()}')
ref_output.columns = range(ref_output.shape[1])
cmp_output.columns = range(cmp_output.shape[1])
try:
print('Casting all columns to string type ...')
ref_output = ref_output.astype(str)
cmp_output = cmp_output.astype(str)
print('Normalizing representation of integers ...')
def to_int(float_str):
""" Transforms rounded float values into integers. """
if float_str.endswith('.0'):
return float_str[:-2]
else:
return float_str
ref_output = ref_output.applymap(to_int)
cmp_output = cmp_output.applymap(to_int)
print('Normalizing representation of lists ...')
def unwrap(cell):
""" Unwrap elements from singleton lists. """
if isinstance(cell, list) and len(cell) == 1:
return cell[0]
else:
return cell
ref_output = ref_output.applymap(unwrap)
cmp_output = cmp_output.applymap(unwrap)
if reorder:
print('Reordering Rows Before Comparison')
nr_columns = len(ref_output.columns)
column_idxs = list(range(nr_columns))
ref_output.sort_values(by=column_idxs, inplace=True)
cmp_output.sort_values(by=column_idxs, inplace=True)
ref_output.reset_index(drop=True, inplace=True)
cmp_output.reset_index(drop=True, inplace=True)
print(f'--- CodexDB column types:\n{cmp_output.dtypes}')
print(f'--- CodexDB normalized output:\n{cmp_output}\n--\n')
print(f'--- Reference column types:\n{ref_output.dtypes}')
print(f'--- Normalized reference output:\n{ref_output}\n--\n')
nr_ref_rows = ref_output.shape[0]
nr_cmp_rows = cmp_output.shape[0]
if nr_ref_rows == 0 and nr_cmp_rows == 0:
diffs = pd.DataFrame()
else:
diffs = ref_output.compare(cmp_output, align_axis=0)
print(f'-- Differences:\n{diffs}\n--\n')
nr_diffs = diffs.shape[0]
return True, nr_diffs, 1.0/(nr_diffs+1)
except Exception as e:
print('(Incomparable)')
print(f'Exception: {e}')
return False, -1, 0
def solve(catalog, test_case, coder, engine,
termination, max_tries, max_temperature):
""" Solve given test case by generating code.
Args:
catalog: database catalog
test_case: a natural language query
coder: code generator to use
engine: execution engine for code
termination: criterion to advance to next case
max_tries: maximal number of tries
max_temperature: maximal temperature
Returns:
list of dictionaries with generated code and statistics
"""
db_id = test_case['db_id']
schema = catalog.schema(db_id)
files = catalog.files(db_id)
question = test_case['question']
query = test_case['query']
reorder = False if 'order by' in query.lower() else True
temperature_step = max_temperature / max_tries
print(f'Treating query {query}, question {question}.')
results = []
for try_idx in range(max_tries):
print("Waiting due to OpenAI's rate limit ...")
time.sleep(3)
print(f'Starting try number {try_idx} ...')
gen_start_s = time.time()
temperature = try_idx * temperature_step
gen_stats, code = coder.generate(test_case, temperature)
print(f'Generated code:\n-------\n{code}\n-------\n')
print(f'Reference Query: "{query}"')
gen_total_s = time.time() - gen_start_s
executed, codb_result, elapsed_s = engine.execute(db_id, code, 30)
print(f'CodexDB executed: {executed} in {elapsed_s}s')
ref_output = pd.DataFrame(test_case['results'])
comparable, nr_diffs, similarity = result_cmp(
ref_output, codb_result, reorder)
nr_tries = try_idx + 1
results.append({
'nr_tries':nr_tries, 'executed':executed, 'comparable':comparable,
'nr_diffs':nr_diffs, 'similarity':similarity,
'outsize':len(codb_result),
'question':question, 'query':query,
'db':db_id, 'schema':schema, 'files':files,
'code':code, 'gen_stats':gen_stats, 'gen_total_s':gen_total_s,
'execution_s':elapsed_s})
if (termination == 'executed' and executed) or \
(termination == 'solved' and similarity >= 1.0):
print('Termination Criterion Satisfied.')
break
return results
def main(
data_dir, test_path, language, model_id, prompt_style, id_case,
mod_start, mod_between, mod_end, sample_path, nr_samples,
test_start, test_step, test_end, termination, max_tries,
max_temperature, log_path, result_path):
""" Try solving given test cases and write results to file.
Args:
data_dir: directory containing database
test_path: path to file with test cases
language: generate code in this language
model_id: OpenAI engine for code generation
prompt_style: choose prompt template
id_case: whether to consider letter case of identifiers
mod_start: modification at plan start
mod_between: modifications between steps
mod_end: modification at plan end
sample_path: path to example library
nr_samples: number of examples in prompt
test_start: index of first test case
test_step: gap between test case indexes
test_end: index of last test case + 1
termination: termination criterion
max_tries: maximal tries per test case
max_temperature: maximal temperature
log_path: path for logging output
result_path: path to result .json file
"""
catalog = codexdb.catalog.DbCatalog(data_dir)
os.environ['KMP_DUPLICATE_LIB_OK']='True'
with open(test_path) as file:
test_cases = json.load(file)
if language not in ['python', 'sql']:
raise ValueError(f'Unknown implementation language: {language}!')
examples = []
if sample_path:
with open(sample_path) as file:
examples = extract_samples(catalog, sample_path)
if prompt_style not in ['question', 'query', 'plan', 'data']:
raise ValueError(f'Unknown prompt style: {prompt_style}!')
if termination not in ['executed', 'solved']:
raise ValueError(f'Unknown termination criterion: {termination}')
with open(log_path, 'w') as log_file:
with contextlib.redirect_stdout(log_file):
if language == 'python':
coder = codexdb.code.PythonGenerator(
catalog, examples, nr_samples,
prompt_style, model_id,
id_case=id_case,
mod_start=mod_start,
mod_between=mod_between,
mod_end=mod_end)
engine = codexdb.engine.PythonEngine(
catalog, id_case)
elif language == 'sql':
coder = codexdb.code.SqlGenerator(
catalog, examples, nr_samples,
prompt_style, model_id)
engine = codexdb.engine.SqliteEngine(catalog)
idx_to_results = {}
for i in range(test_start, test_end, test_step):
print(f'Starting test case nr. {i} ...')
test_case = test_cases[i]
cur_results = solve(
catalog, test_case, coder, engine,
termination, max_tries, max_temperature)
idx_to_results[i] = cur_results
print(cur_results)
with open(result_path, 'w') as results_file:
json.dump(idx_to_results, results_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('ai_key', type=str, help='Key for OpenAI access')
parser.add_argument('data_dir', type=str, help='Data directory')
parser.add_argument('test_path', type=str, help='Path to test case file')
parser.add_argument('language', type=str, help='Implementation language')
parser.add_argument('model_id', type=str, help='ID of OpenAI model')
parser.add_argument('prompt_style', type=str, help='Style of prompt')
parser.add_argument('mod_start', type=str, help='Instructions at start')
parser.add_argument('mod_between', type=str, help='Execute between steps')
parser.add_argument('mod_end', type=str, help='Instructions at end')
parser.add_argument('sample_path', type=str, help='Path to sample file')
parser.add_argument('nr_samples', type=int, help='Number of samples in prompt')
parser.add_argument('nr_tests', type=int, help='Number of test cases')
parser.add_argument('termination', type=str, help='Termination criterion')
parser.add_argument('max_tries', type=int, help='Maximal number of tries')
parser.add_argument('log_path', type=str, help='Redirect output here')
parser.add_argument('result_path', type=str, help='Contains results')
args = parser.parse_args()
openai.api_key = args.ai_key
main(
args.data_dir, args.test_path, args.language, args.model_id,
args.prompt_style, args.mod_start, args.mod_between, args.mod_end,
args.sample_path, args.nr_samples, args.nr_tests, args.termination,
args.max_tries, 0.5, args.log_path, args.result_path) |
import setuptools
# -$ python setup.py sdist bdist_wheel
# -$ twine upload dist/* -u VicWang -p PYPI******
setuptools.setup(
name="leaptask",
version="2020.4.2.14",
author="Vic Wang",
author_email="305880887@qq.com",
description='leap task',
long_description="leap tasks",
long_description_content_type="text/markdown",
url='http://github.com/XRDX/pyleap',
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
),
)
|
import sys
import os.path
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir,)))
from graph.arango_utils import aql_query, connect_to_mim_database
from graphviz import Digraph
def main():
db = connect_to_mim_database()
# query = f'''
# FOR m IN Members
# FILTER m.name=='ABB Robotics UK'
# FOR u, e, p IN 2 ANY m MembersToClass OPTIONS {{uniqueVertices: true}}
# FILTER IS_SAME_COLLECTION(u, 'Members')
# LIMIT 5
# RETURN p
# '''
# query = f'''
# FOR m IN Members
# FILTER m.name=='ABB Robotics UK'
# FOR u, e, p IN 5 ANY m MembersToClass, ANY MemberMemberCommerce OPTIONS {{uniqueVertices: true}}
# FILTER IS_SAME_COLLECTION(u, 'Members')
# LIMIT 1
# RETURN p
# '''
# query = f'''
# FOR c IN UKClasses
# FILTER c.type=='sector'
# FILTER c.identifier == 'MANUFACTURING'
# LIMIT 1
# FOR u, e, p IN 3 INBOUND c UKClassHierarchy
# LIMIT 5
# RETURN p
# '''
query = f'''
FOR c IN UKClasses
FILTER c.type=='sector'
FILTER c.identifier == 'MANUFACTURING'
LIMIT 1
FOR u, e, p IN 4 INBOUND c UKClassHierarchy, INBOUND MembersToClass
LIMIT 4
RETURN p
'''
arango_graph = aql_query(db, query)
graph_name = 'SIC_hierarchy_members'
g = Digraph(graph_name, filename=graph_name, format='png', engine='dot')
g.attr(scale='3', label='', fontsize='9', #size="10,10!"
rankdir="LR",
# rankdir="BT",
)
g.attr('node', width='2')
for item in arango_graph:
for vertex in item['vertices']:
if "identifier" in vertex:
name = vertex["identifier"]
if name == "MANUFACTURING":
shape = "box"
else:
shape="circle"
else:
name = vertex["name"]
shape="box"
g.node(vertex['_id'], label="\n".join(name.split()), shape=shape)
for edge in item['edges']:
g.edge(edge['_from'], edge['_to'], )
# for i, edge in enumerate(item['edges']):
# if i == 0 or i == 3:
# g.edge(edge['_from'], edge['_to'], )
# elif i == 1 or i == 4:
# g.edge(edge['_to'], edge['_from'], )
# elif i == 2:
# g.edge(edge['_from'], edge['_to'], )
# g.edge(edge['_to'], edge['_from'], )
# Render to file into some directory
g.render(directory='.', filename=graph_name)
# Or just show rendered file using system default program
# g.view()
if __name__ == "__main__":
main() |
#from django.urls import path
from django.conf.urls import url, include
from django.conf.urls import url
from . import views
from django.conf.urls import include, url
from django.contrib import admin
from rest_framework import routers
urlpatterns = [
url('', views.homePageView, name='home')
]
'''
router = routers.DefaultRouter()
router.register(r'', views.homePageView, name='home')
urlpatterns = [
url(r'', include(router.urls)),
]
''' |
# Copyright 2020, 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dpctl
import numpy as np
import numba_dppy as dppy
@dppy.kernel(debug=True)
def data_parallel_sum(a_in_kernel, b_in_kernel, c_in_kernel):
i = dppy.get_global_id(0) # numba-kernel-breakpoint
l1 = a_in_kernel[i] # second-line
l2 = b_in_kernel[i] # third-line
c_in_kernel[i] = l1 + l2 # fourth-line
def driver(a, b, c, global_size):
print("before : ", a)
print("before : ", b)
print("before : ", c)
data_parallel_sum[global_size, dppy.DEFAULT_LOCAL_SIZE](a, b, c)
print("after : ", c)
def main():
global_size = 10
N = global_size
a = np.arange(N, dtype=np.float32)
b = np.arange(N, dtype=np.float32)
c = np.empty_like(a)
# Use the environment variable SYCL_DEVICE_FILTER to change the default device.
# See https://github.com/intel/llvm/blob/sycl/sycl/doc/EnvironmentVariables.md#sycl_device_filter.
device = dpctl.select_default_device()
print("Using device ...")
device.print_device_info()
with dpctl.device_context(device):
driver(a, b, c, global_size)
print("Done...")
if __name__ == "__main__":
main()
|
"""Module for server."""
import logging
import os
from typing import Any
from aiohttp import web
from aiohttp_middlewares import cors_middleware, error_middleware
from dotenv import load_dotenv
from .routes import (
delete_ontology,
get_ontology,
get_ontology_type,
get_slash,
ping,
put_ontology,
put_ontology_type,
ready,
)
load_dotenv()
LOGGING_LEVEL = os.getenv("LOGGING_LEVEL", "INFO")
SERVER_ROOT = os.getenv("SERVER_ROOT", "/srv/www/static-rdf-server")
DATA_ROOT = os.getenv("DATA_ROOT", os.path.join(SERVER_ROOT, "data"))
STATIC_ROOT = os.getenv("STATIC_ROOT", os.path.join(SERVER_ROOT, "static"))
DEFAULT_LANGUAGE = "nb"
async def create_app() -> web.Application:
"""Create an web application."""
app = web.Application(
middlewares=[
cors_middleware(allow_all=True),
error_middleware(), # default error handler for whole application
],
)
# Set up logging
logging.basicConfig(level=LOGGING_LEVEL)
logging.getLogger("chardet.charsetprober").setLevel(LOGGING_LEVEL)
# Set up routes:
app.router.add_get("/ready", ready)
app.router.add_get("/ping", ping)
app.router.add_get("/", get_slash)
app.router.add_get("/{ontology_type}", get_ontology_type)
app.router.add_put("/{ontology_type}", put_ontology_type)
app.router.add_get("/{ontology_type}/{ontology}", get_ontology)
app.router.add_put("/{ontology_type}/{ontology}", put_ontology)
app.router.add_delete("/{ontology_type}/{ontology}", delete_ontology)
async def app_context(app: Any) -> Any:
# Set up context:
app["SERVER_ROOT"] = SERVER_ROOT
app["DATA_ROOT"] = DATA_ROOT
app["STATIC_ROOT"] = STATIC_ROOT
app["DEFAULT_LANGUAGE"] = DEFAULT_LANGUAGE
yield
pass
app.cleanup_ctx.append(app_context)
return app
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import functools
import numpy as np
import dask.array as da
import xarray as xr
import skimage as ski
from scipy.ndimage.filters import convolve
#import bottlenack as bn
def view(offset_y, offset_x, size_y, size_x, step=1):
"""
Calculates views for windowes operations on arrays.
For windowed operations on arrays instead of looping through
every numpy element and then do a second loop for the window,
the element loop can be substituted by shifting the whole array
against itself while looping through the window.
In order to do this without padding the array first this implementation
swaps views when the shift is "outside" the array dimensions.
Parameters
----------
offset_y : integer
Row offset of current window row index from center point.
offset_x : integer
Column offset of current window column index from center point.
size_y : integer
Number of rows of array
size_x : integer
Number of columns of array
step : integer, optional
Returns
-------
tuple of 2D numpy slices
Example
-------
window_size = 3
radius = int(window/2)
rows, columns = data.shape
temp_sum = np.zeros((rows, columns))
# Our window loop
for y in range(window):
#we need offsets from centre !
y_off = y - radius
for x in range(window):
x_off = x - radius
view_in, view_out = view(y_off, x_off, rows, columns)
temp_sum[view_out] += data[view_in]
Notes
-----
Source: https://landscapearchaeology.org/2018/numpy-loops/
"""
x = abs(offset_x)
y = abs(offset_y)
x_in = slice(x , size_x, step)
x_out = slice(0, size_x - x, step)
y_in = slice(y, size_y, step)
y_out = slice(0, size_y - y, step)
# the swapping trick
if offset_x < 0: x_in, x_out = x_out, x_in
if offset_y < 0: y_in, y_out = y_out, y_in
# return window view (in) and main view (out)
return np.s_[y_in, x_in], np.s_[y_out, x_out]
def num_neighbours(lag=1):
"""
Calculate number of neigbour pixels for a given lag.
Parameters
----------
lag : int
Lag distance, defaults to 1.
Returns
-------
int
Number of neighbours
"""
win_size = 2*lag + 1
neighbours = win_size**2 - (2*(lag-1) + 1)**2
return neighbours
def create_kernel(n=5, geom="square", kernel=None):
"""
Create a kernel of size n.
Parameters
----------
n : int, optional
Kernel size, defaults to 5.
geom : {"square", "round"}
Geometry of the kernel. Defaults to square.
kernel : np.array, optional
Custom kernel to convolve with. If kernel argument is given
parameters n and geom are ignored.
Returns
-------
np.array
"""
if kernel is None:
if geom == "square":
k = np.ones((n,n))
elif geom == "round":
xind, yind = np.indices((n, n))
c = n // 2
center = (c, c)
radius = c / 2
circle = (xind - center[0])**2 + (yind - center[1])**2 < radius**2
k = circle.astype(np.int)
else:
k = kernel
return k
def nd_variogram(x, y):
"""
Inner most calculation step of variogram and pseudo-cross-variogram
This function is used in the inner most loop of `neighbour_diff_squared`.
Parameters
----------
x, y : np.array
Returns
-------
np.array
"""
res = np.square(x - y)
return res
def nd_madogram(x, y, *args):
"""
Inner most calculation step of madogram
This function is used in the inner most loop of `neighbour_diff_squared`.
Parameters
----------
x, y : np.array
Returns
-------
np.array
"""
res = np.abs(x - y)
return res
def nd_rodogram(x, y, *args):
"""
Inner most calculation step of rodogram
This function is used in the inner most loop of `neighbour_diff_squared`.
Parameters
----------
x, y : np.array
Returns
-------
np.array
"""
res = np.sqr(np.abs(x - y))
return res
def nd_cross_variogram(x1, y2, x2, y1):
"""
Inner most calculation step of cross-variogram
This function is used in the inner most loop of `neighbour_diff_squared`.
Parameters
----------
x, y : np.array
Returns
-------
np.array
"""
res = (x1 - x2)*(y1 - y2)
return res
def neighbour_diff_squared(arr1, arr2=None, lag=1, func="nd_variogram"):
"""
Calculates the squared difference between a pixel and its neighbours
at the specified lag.
If only one array is supplied variogram is calculated
for itself (same array is used as the second array).
Parameters
----------
arr1 : np.array
arr2 : np.array, optional
lag : int, optional
The lag distance for the variogram, defaults to 1.
func : {nd_variogram, nd_pseudo_cross_variogram, nd_madogram, nd_rodogram, nd_cross_variogram}
Calculation method of innermost step of the different variogram methods.
Returns
-------
np.array
Variogram
"""
method = globals()[func]
win = 2*lag + 1
radius = win // 2
rows, cols = arr1.shape
if arr2 is None:
arr2 = arr1.copy()
out_arr = np.zeros_like(arr1)
r = list(range(win))
for y in r:
y_off = y - radius
if y == min(r) or y == max(r):
x_r = r
else:
x_r = [max(r), min(r)]
for x in x_r:
x_off = x - radius
view_in, view_out = view(y_off, x_off, rows, cols)
if func == "nd_cross_variogram":
out_arr[view_out] += method(arr1[view_out], arr2[view_in], arr1[view_in], arr2[view_out])
else:
out_arr[view_out] += method(arr1[view_out], arr2[view_in])
#out_arr[view_out] += method(arr1[view_out], arr2[view_in])
#a1 = arr1[view_out]
#a2 = arr2[view_in]
#out_arr[view_out] += (a1 - a2)**2
return out_arr
def _dask_neighbour_diff_squared(x, y=None, lag=1, func="nd_variogram"):
"""
Calculate quared difference between pixel and its
neighbours at specified lag for dask arrays
Parameters
----------
x : np.array
y : np.array, optional
Defaults to None
lag : int, optional
func : {nd_variogram, nd_madogram, nd_rodogram, nd_cross_variogram}
Calculation method of innermost step of different variogram methods.
Returns
-------
np.array
Difference part of variogram calculations
"""
pvario = functools.partial(neighbour_diff_squared, lag=lag, func=func)
if y is None:
x = da.overlap.overlap(x, depth={0: lag, 1: lag}, boundary={0: "reflect", 1: "reflect"})
y = x
else:
x = da.overlap.overlap(x, depth={0: lag, 1: lag}, boundary={0: "reflect", 1: "reflect"})
y = da.overlap.overlap(y, depth={0: lag, 1: lag}, boundary={0: "reflect", 1: "reflect"})
res = da.map_blocks(pvario, x, y)
res = da.overlap.trim_internal(res, {0: lag, 1: lag})
return res
def window_sum(x, lag, win_size, win_geom):
"""
Calculate the window sum for the various textures
Parameters
----------
x : array like
Input array
lag : int
Lag distance for variogram, defaults to 1.
win_size : int, optional
Length of one side of window. Window will be of size window*window.
geom : {"square", "round"}
Geometry of the kernel. Defaults to square.
Returns
-------
array like
Array where each element is the variogram of the window around the element
"""
k = create_kernel(n=win_size, geom=win_geom)
#create convolve function with reduced parameters for map_overlap
pcon = functools.partial(convolve, weights=k)
if isinstance(x, da.core.Array):
conv_padding = int(win_size//2)
res = x.map_overlap(pcon, depth={0: conv_padding, 1: conv_padding})
else:
res = pcon(x)
#calculate 1/2N part of variogram
neighbours = num_neighbours(lag)
num_pix = np.sum(k)
factor = 2 * num_pix * neighbours
return res / factor
def _win_view_stat(x, win_size=5, stat="nanmean"):
"""
Calculates specified basic statistical measure for a moveing window
over an array.
Parameters
----------
x : np.array
win_size : int, optional
Window size, defaults to 5.
stat : {"nanmean", "nanmax", "nanmin", "nanmedian", "nanstd"}
Statistical measure to calculate.
Returns
-------
np.array
"""
#if x.shape == (1, 1):
#return x
measure = getattr(np, stat)
pad = int(win_size//2)
data = np.pad(x, (pad, pad), mode="constant", constant_values=(np.nan))
#sh = np.asarray(x).shape
#mask = np.zeros_like(x)
#mask[pad:sh[0]-pad, pad:sh[1]-pad] = 1
#data = np.where(mask==1, x, np.nan)
#get windowed view of array
windowed = ski.util.view_as_windows(data, (win_size, win_size))
#calculate measure over last to axis
res = measure(windowed, axis=(2, 3))
return res
def xr_wrapper(fun):
@functools.wraps(fun)
def wrapped_fun(*args, **kwargs):
if isinstance(args[0], xr.core.dataarray.DataArray):
out = args[0].copy()
if len(args) == 2:
out.data = fun(args[0].data, args[1].data, **kwargs)
out.attrs["name"] = fun.__name__ + "_{}_{}".format(args[0].attrs["name"], args[1].attrs["name"])
else:
out.data = fun(args[0].data, **kwargs)
out.attrs["name"] = fun.__name__ + "_{}".format(args[0].attrs["name"])
if fun.__name__ == "window_statistic":
out.attrs["statistic"] = kwargs.get("func")
else:
out.attrs["lag_distance"] = kwargs.get("lag")
out.attrs["window_geometry"] = kwargs.get("win_geom")
out.attrs["window_size"] = kwargs.get("win_size")
out.name = out.attrs["name"]
else:
if len(args) == 2:
out = fun(args[0], args[1], **kwargs)
else:
out = fun(args[0], **kwargs)
return out
return wrapped_fun
##########################
#alternative version test
##########################
#def neighbour_diff_squared1(arr1, arr2=None, lag=1):
#"""
#Calculates the (pseudo-) variogram between two arrays.
#If only one array is supplied variogram is calculated
#for itself (same array is used as the second array).
#Parameters
#----------
#arr1 : np.array
#arr2 : np.array, optional
#lag : int, optional
#The lag distance for the variogram, defaults to 1.
#Returns
#-------
#np.array
#Variogram
#"""
#twoband = False
#win = 2*lag + 1
#radius = int(win/2)
##if arr2 is None:
## arr2 = arr1.copy()
#inshape0 = arr1.shape[0]
#if len(arr1.shape) == 3:# and inshape0 == 2:
#input1 = arr1[0,:,:]
#input2 = arr1[1,:,:]
#twoband = True
##elif len(arr1.shape) == 2:
## print(arr1.shape)
## input1 = arr1
## input2 = arr1.copy()
#elif arr2 is not None:
##Raise error only two bands are allowed
##pass
#input1 = arr1
#input2 = arr2
#input1 = np.asarray(input1)
#rows, cols = input1.shape
#out_arr = np.zeros(input1.shape, dtype=input1.dtype.name)
#r = list(range(win))
#for x in r:
#x_off = x - radius
#if x == min(r) or x == max(r):
#y_r = r
#else:
#y_r = [max(r), min(r)]
#for y in y_r:
#y_off = y - radius
##view_in, view_out = view(y_off, x_off, rows, cols)
#x_in = slice(abs(x_off) , cols, 1)
#x_out = slice(0, cols - abs(x_off), 1)
#y_in = slice(abs(y_off), rows, 1)
#y_out = slice(0, rows - abs(y_off), 1)
## the swapping trick
#if x_off < 0: x_in, x_out = x_out, x_in
#if y_off < 0: y_in, y_out = y_out, y_in
## return window view (in) and main view (out)
##return np.s_[y_in, x_in], np.s_[y_out, x_out]
#out_arr[y_out, x_out] += (input1[y_out, x_out] - input2[y_in, x_in])**2
#if twoband:
#arr1[0,:,:] = out_arr
#return arr1
#else:
#return out_arr
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
apptrace_URL_PATTERNS = ['top50']
apptrace_TRACE_MODULES = ['models.py', 'app.py', 'views.py']
def webapp_add_wsgi_middleware(app):
from google.appengine.ext.appstats import recording
app = recording.appstats_wsgi_middleware(app)
return app
|
"""Python setup.py for project_name package"""
import io
import os
from setuptools import find_packages, setup
def read(*paths, **kwargs):
"""Read the contents of a text file safely.
>>> read("project_name", "VERSION")
'0.1.0'
>>> read("README.md")
...
"""
content = ""
with io.open(
os.path.join(os.path.dirname(__file__), *paths),
encoding=kwargs.get("encoding", "utf8"),
) as open_file:
content = open_file.read().strip()
return content
def take_package_name(name):
if name.startswith("-e"):
return name[name.find("=") + 1 : name.rfind("-")]
else:
return name.strip()
def read_requirements(path):
return [line.strip() for line in read(path).split("\n") if not line.startswith(('"', "#", "-", "git+"))]
# def load_requires_from_file(filepath):
# with open(filepath) as fp:
# return [take_package_name(pkg_name) for pkg_name in fp.readlines()]
def load_links_from_file(filepath):
res = []
with open(filepath) as fp:
for pkg_name in fp.readlines():
if pkg_name.startswith("-e"):
res.append(pkg_name.split(" ")[1])
return res
setup(
name="spotify_dlx",
version=read("spotify_dlx", "VERSION"),
description="Download songs from Spotify like youtube-dl",
url="https://github.com/ozora-ogino/spotify_dlx/",
long_description=read("README.md"),
long_description_content_type="text/markdown",
author="ozora-ogino",
packages=find_packages(exclude=["tests", ".github"]),
install_requires=read_requirements("requirements.txt"),
dependency_links=load_links_from_file("requirements.txt"),
entry_points={"console_scripts": ["project_name = project_name.__main__:main"]},
extras_require={"test": read_requirements("requirements-test.txt")},
)
|
#!/usr/bin/python3
import os
import time
from lsb import Lsb
from util import cmd_parser
def main():
args = cmd_parser()
coding = args.coding
secret_message = args.message
source_file = args.source_file
result_file = args.result_file
secret_file = args.secret_file
# Input check
if args.embed is True and args.message is None and args.secret_file is None:
print("ERROR: Embedding requires a secret message!")
return
# Read the secret file, if it's specified
if args.secret_file is not None:
with open(secret_file, 'r') as text_file:
secret_message = text_file.read().replace('\n', '')
if coding is not None:
try:
coding = int(coding)
except ValueError:
print("ERROR: unsupported encoding format '" + coding + "'")
return
else:
coding = 8
# Embedding
if args.embed:
# Set the result file if it's not specified
if not args.result_file:
dir_name = os.path.dirname(os.path.normpath(source_file))
result_file = dir_name + "/secret_image.bmp"
res_name, res_extension = os.path.splitext(result_file)
if res_extension != ".bmp":
result_file = res_name + ".bmp"
s_time = time.time()
is_embedded = Lsb(source_file, coding).embed(secret_message, result_file)
e_time = time.time()
if is_embedded:
print("Secret message was successfully embedded!\n")
print("Time: " + f'{(e_time - s_time):.2f}' + " s")
else:
print("Something went wrong...")
# Extracting
else:
s_time = time.time()
secret = Lsb(source_file, coding).extract()
e_time = time.time()
print("Embedded message:\n" + secret + "\n")
print("Time: " + f'{(e_time - s_time):.2f}' + " s")
if __name__ == '__main__':
main()
|
import argparse
parser = argparse.ArgumentParser();
parser.add_argument("--first", default="Admin", help='First Name')
parser.add_argument("--last", default="Admin", help='Last Name')
parser.add_argument("-p", "--pay", default=1000, dest='EPAY', metavar='PAY', type=int, help='Payment')
parser.add_argument("--secret", help=argparse.SUPPRESS)
mgrp = parser.add_mutually_exclusive_group()
mgrp.add_argument('--xml', action='store_true', help="Get xml data")
mgrp.add_argument('--html', action='store_true', help="Get html data")
mgrp.add_argument('--yaml', action='store_true', help="Get yaml data")
mgrp.add_argument('--json', action='store_true', help="Get json data")
args = parser.parse_args()
# args.first, args.last, args.pay
class Employee:
"""A sample Employee class."""
raise_amt = 1.05
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
print(self.__dict__)
@property
def email(self):
return '{}.{}@email.com'.format(self.first, self.last)
@property
def fullname(self):
return '{} {}'.format(self.first, self.last)
def apply_raise(self):
self.pay = int(self.pay * self.raise_amt)
def monthly_schedule(self, month):
# response = requests.get(f'http://company.com/{self.last}/{month}')
# if response.ok:
# return response.text
# else:
# return 'Bad Response!'
pass
# --- end of class definition ---
if __name__ == "__main__":
first, last, pay = args.first, args.last, args.EPAY
emp = Employee(first, last, pay)
|
import os
import htmlPy
from PyQt4 import QtGui
#initial configuration
#we get the absolute path of the initialization script to manage all the assets and backend
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
#GUI initialization
app = htmlPy.AppGUI(title=u"PepperPresenter Application")
app.maximized = True
#GUI configuration
app.static_path = os.path.join(BASE_DIR, "static/")
app.template_path = os.path.join(BASE_DIR, "template/")
#app.window.setWindowIcon(QtGui.QIcon(BASE_DIR + "/static/img/icon.png"))
#Bind back end with front end
#import functionalities
from back_end.BackEnd import PepperApp
#register functionalities
app.bind(PepperApp(app))
app.template = ("index.html", {})
#run the app
if __name__ == "__main__":
app.start()
|
"""
The Quantum Graph Recurrent Neural Network
===========================================
.. meta::
:property="og:description": Using a quantum graph recurrent neural network to learn quantum dynamics.
:property="og:image": https://pennylane.ai/qml/_images/qgrnn_thumbnail.png
*Author: Jack Ceroni*
"""
######################################################################
# This demonstration investigates quantum graph
# recurrent neural networks (QGRNN), which are the quantum analogue of a
# classical graph recurrent neural network, and a subclass of the more
# general quantum graph
# neural network ansatz. Both the QGNN and QGRNN were introduced in
# `this paper (2019) <https://arxiv.org/abs/1909.12264>`__.
######################################################################
# The Idea
# --------
#
######################################################################
# A graph is defined as a set of *nodes* along with a set of
# **edges**, which represent connections between nodes.
# Information can be encoded into graphs by assigning numbers
# to nodes and edges, which we call **weights**.
# It is usually convenient to think of a graph visually:
#
# .. image:: ../demonstrations/qgrnn/graph.png
# :width: 70%
# :align: center
#
# In recent years, the concept of a
# `graph neural network <https://arxiv.org/abs/1812.08434>`__ (GNN) has been
# receiving a lot of attention from the machine learning community.
# A GNN seeks
# to learn a representation (a mapping of data into a
# low-dimensional vector space) of a given graph with feature vectors assigned
# to nodes and edges. Each of the vectors in the learned
# representation preserves not only the features, but also the overall
# topology of the graph, i.e., which nodes are connected by edges. The
# quantum graph neural network attempts to do something similar, but for
# features that are quantum-mechanical; for instance, a
# collection of quantum states.
#
######################################################################
# Consider the class of qubit Hamiltonians that are *quadratic*, meaning that
# the terms of the Hamiltonian represent either interactions between two
# qubits, or the energy of individual qubits.
# This class of Hamiltonians is naturally described by graphs, with
# second-order terms between qubits corresponding to weighted edges between
# nodes, and first-order terms corresponding to node weights.
#
# A well known example of a quadratic Hamiltonian is the transverse-field
# Ising model, which is defined as
#
# .. math::
#
# \hat{H}_{\text{Ising}}(\boldsymbol\theta) \ = \ \displaystyle\sum_{(i, j) \in E}
# \theta_{ij}^{(1)} Z_{i} Z_{j} \ + \ \displaystyle\sum_{i} \theta_{i}^{(2)} Z_{i} \ + \
# \displaystyle\sum_{i} X_{i},
#
# where :math:`\boldsymbol\theta \ = \ \{\theta^{(1)}, \ \theta^{(2)}\}`.
# In this Hamiltonian, the set :math:`E` that determines which pairs of qubits
# have :math:`ZZ` interactions can be represented by the set of edges for some graph. With
# the qubits as nodes, this graph is called the *interaction graph*.
# The :math:`\theta^{(1)}` parameters correspond to the edge weights and
# the :math:`\theta^{(2)}`
# parameters correspond to weights on the nodes.
#
######################################################################
# This result implies that we can think about *quantum circuits* with
# graph-theoretic properties. Recall that the time-evolution operator
# with respect to some Hamiltonian :math:`H` is defined as:
#
# .. math:: U \ = \ e^{-it H}.
#
# Thus, we have a clean way of taking quadratic Hamiltonians and turning
# them into unitaries (quantum circuits) that preserve the same correspondance to a graph.
# In the case of the Ising Hamiltonian, we have:
#
# .. math::
#
# U_{\text{Ising}} \ = \ e^{-it \hat{H}_{\text{Ising}} (\boldsymbol\theta)} \ = \ \exp \Big[ -it
# \Big( \displaystyle\sum_{(i, j) \in E} \theta_{ij}^{(1)} Z_{i} Z_{j} \ + \
# \displaystyle\sum_{i} \theta_{i}^{(2)} Z_{i} \ + \ \displaystyle\sum_{i} X_{i} \Big) \Big]
#
# In general, this kind of unitary is very difficult to implement on a quantum computer.
# However, we can approximate it using the `Trotter-Suzuki decomposition
# <https://en.wikipedia.org/wiki/Time-evolving_block_decimation#The_Suzuki-Trotter_expansion>`__:
#
# .. math::
#
# \exp \Big[ -it \Big( \displaystyle\sum_{(i, j) \in E} \theta_{ij}^{(1)} Z_{i} Z_{j} \ + \
# \displaystyle\sum_{i} \theta_{i}^{(2)} Z_{i} \ + \ \displaystyle\sum_{i} X_{i} \Big) \Big]
# \ \approx \ \displaystyle\prod_{k \ = \ 1}^{t / \Delta} \Bigg[ \displaystyle\prod_{j \ = \
# 1}^{Q} e^{-i \Delta \hat{H}_{\text{Ising}}^{j}(\boldsymbol\theta)} \Bigg]
#
# where :math:`\hat{H}_{\text{Ising}}^{j}(\boldsymbol\theta)` is the :math:`j`-th term of the
# Ising Hamiltonian and :math:`\Delta` is some small number.
#
# This circuit is a specific instance of the **Quantum Graph
# Recurrent Neural Network**, which in general is defined as a variational ansatz of
# the form
#
# .. math::
#
# U_{H}(\boldsymbol\mu, \ \boldsymbol\gamma) \ = \ \displaystyle\prod_{i \ = \ 1}^{P} \Bigg[
# \displaystyle\prod_{j \ = \ 1}^{Q} e^{-i \gamma_j H^{j}(\boldsymbol\mu)} \Bigg],
#
# for some parametrized quadratic Hamiltonian, :math:`H(\boldsymbol\mu)`.
######################################################################
# Using the QGRNN
# ^^^^^^^^^^^^^^^^
#
######################################################################
# Since the QGRNN ansatz is equivalent to the
# approximate time evolution of some quadratic Hamiltonian, we can use it
# to learn the dynamics of a quantum system.
#
# Continuing with the Ising model example, let's imagine we have some system
# governed by :math:`\hat{H}_{\text{Ising}}(\boldsymbol\alpha)` for an unknown set of
# target parameters,
# :math:`\boldsymbol\alpha` and an unknown interaction graph :math:`G`. Let's also
# suppose we have access to copies of some
# low-energy, non-ground state of the target Hamiltonian, :math:`|\psi_0\rangle`. In addition,
# we have access to a collection of time-evolved states,
# :math:`\{ |\psi(t_1)\rangle, \ |\psi(t_2)\rangle, \ ..., \ |\psi(t_N)\rangle \}`, defined by:
#
# .. math:: |\psi(t_k)\rangle \ = \ e^{-i t_k \hat{H}_{\text{Ising}}(\boldsymbol\alpha)} |\psi_0\rangle.
#
# We call the low-energy states and the collection of time-evolved states *quantum data*.
# From here, we randomly pick a number of time-evolved states
# from our collection. For any state that we choose, which is
# evolved to some time :math:`t_k`, we compare it
# to
#
# .. math::
#
# U_{\hat{H}_{\text{Ising}}}(\boldsymbol\mu, \ \Delta) |\psi_0\rangle \ \approx \ e^{-i t_k
# \hat{H}_{\text{Ising}}(\boldsymbol\mu)} |\psi_0\rangle.
#
# This is done by feeding one of the copies of :math:`|\psi_0\rangle` into a quantum circuit
# with the QGRNN ansatz, with some guessed set of parameters :math:`\boldsymbol\mu`
# and a guessed interaction graph, :math:`G'`.
# We then use a classical optimizer to maximize the average
# "similarity" between the time-evolved states and the states prepared
# with the QGRNN.
#
# As the QGRNN states becomes more similar to
# each time-evolved state for each sampled time, it follows that
# :math:`\boldsymbol\mu \ \rightarrow \ \boldsymbol\alpha`
# and we are able to learn the unknown parameters of the Hamiltonian.
#
# .. figure:: ../demonstrations/qgrnn/qgrnn3.png
# :width: 90%
# :align: center
#
# A visual representation of one execution of the QGRNN for one piece of quantum data.
#
######################################################################
# Learning an Ising Model with the QGRNN
# ---------------------------------------
#
######################################################################
# We now attempt to use the QGRNN to learn the parameters corresponding
# to an arbitrary transverse-field Ising model Hamiltonian.
#
######################################################################
# Getting Started
# ^^^^^^^^^^^^^^^^
#
######################################################################
# We begin by importing the necessary dependencies:
#
import pennylane as qml
from matplotlib import pyplot as plt
import numpy as np
import scipy
import networkx as nx
import copy
######################################################################
# We also define some fixed values that are used throughout
# the simulation.
#
qubit_number = 4
qubits = range(qubit_number)
######################################################################
# In this
# simulation, we don't have quantum data readily available to pass into
# the QGRNN, so we have to generate it ourselves. To do this, we must
# have knowledge of the target interaction graph and the target Hamiltonian.
#
# Let us use the following cyclic graph as the target interaction graph
# of the Ising Hamiltonian:
#
ising_graph = nx.cycle_graph(qubit_number)
print(f"Edges: {ising_graph.edges}")
nx.draw(ising_graph)
######################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Edges: [(0, 1), (0, 3), (1, 2), (2, 3)]
#
#######################################################################
# .. figure:: ../demonstrations/qgrnn/graph1.png
# :width: 70%
# :align: center
#
######################################################################
# We can then initialize the “unknown” target parameters that describe the
# target Hamiltonian, :math:`\boldsymbol\alpha \ = \ \{\alpha^{(1)}, \ \alpha^{(2)}\}`.
# Recall from the introduction that we have defined our parametrized
# Ising Hamiltonian to be of the form:
#
# .. math::
#
# \hat{H}_{\text{Ising}}(\boldsymbol\theta) \ = \ \displaystyle\sum_{(i, j) \in E}
# \theta_{ij}^{(1)} Z_{i} Z_{j} \ + \ \displaystyle\sum_{i} \theta_{i}^{(2)} Z_{i} \ + \
# \displaystyle\sum_{i} X_{i},
#
# where :math:`E` is the set of edges in the interaction graph, and
# :math:`X_i` and :math:`Z_i` are the Pauli-X and Pauli-Z on the
# :math:`i`-th qubit.
#
# For this tutorial, we choose the target parameters by sampling from
# a uniform probability distribution ranging from :math:`-2` to :math:`2`, with
# two-decimal precision.
#
matrix_params = [[0.56, 1.24, 1.67, -0.79], [-1.44, -1.43, 1.18, -0.93]]
######################################################################
# In theory, these parameters can
# be any value we want, provided they are reasonably small enough that the QGRNN can reach them
# in a tractable number of optimization steps.
# In `matrix_params`, the first list represents the :math:`ZZ` interaction parameters and
# the second list represents the single-qubit `Z` parameters.
#
# Finally,
# we use this information to generate the matrix form of the
# Ising model Hamiltonian in the computational basis:
#
def create_hamiltonian_matrix(n, graph, params):
matrix = np.zeros((2 ** n, 2 ** n))
# Creates the interaction component of the Hamiltonian
for count, i in enumerate(graph.edges):
m = 1
for j in range(0, n):
if i[0] == j or i[1] == j:
m = np.kron(m, qml.PauliZ.matrix)
else:
m = np.kron(m, np.identity(2))
matrix += params[0][count] * m
# Creates the bias components of the matrix
for i in range(0, n):
m1 = m2 = 1
for j in range(0, n):
if j == i:
m1 = np.kron(m1, qml.PauliZ.matrix)
m2 = np.kron(m2, qml.PauliX.matrix)
else:
m1 = np.kron(m1, np.identity(2))
m2 = np.kron(m2, np.identity(2))
matrix += (params[1][i] * m1 + m2)
return matrix
# Prints a visual representation of the Hamiltonian matrix
ham_matrix = create_hamiltonian_matrix(qubit_number, ising_graph, matrix_params)
plt.matshow(ham_matrix, cmap='hot')
#plt.show()
plt.draw()
plt.pause(0.001)
input("Open Ports --> Open Preview or Browser --> push enter to continue")
######################################################################
# .. figure:: ../demonstrations/qgrnn/ising_hamiltonian.png
# :width: 50%
# :align: center
#
######################################################################
# Preparing Quantum Data
# ^^^^^^^^^^^^^^^^^^^^^^
#
######################################################################
# The collection of quantum data needed to run the QGRNN has two components:
# (i) copies of a low-energy state, and (ii) a collection of time-evolved states, each of which are
# simply the low-energy state evolved to different times.
# The following is a low-energy state of the target Hamiltonian:
#
low_energy_state = [
(-0.054661080280306085+0.016713907320174026j),
(0.12290003656489545-0.03758500591109822j),
(0.3649337966440005-0.11158863596657455j),
(-0.8205175732627094+0.25093231967092877j),
(0.010369790825776609-0.0031706387262686003j),
(-0.02331544978544721+0.007129899300113728j),
(-0.06923183949694546+0.0211684344103713j),
(0.15566094863283836-0.04760201916285508j),
(0.014520590919500158-0.004441887836078486j),
(-0.032648113364535575+0.009988590222879195j),
(-0.09694382811137187+0.02965579457620536j),
(0.21796861485652747-0.06668776658411019j),
(-0.0027547112135013247+0.0008426289322652901j),
(0.006193695872468649-0.0018948418969390599j),
(0.018391279795405405-0.005625722994009138j),
(-0.041350974715649635+0.012650711602265649j)
]
######################################################################
# This state can be obtained by using a decoupled version of the
# :doc:`Variational Quantum Eigensolver </demos/tutorial_vqe>` algorithm (VQE).
# Essentially, we choose a
# VQE ansatz such that the circuit cannot learn the exact ground state,
# but it can get fairly close. Another way to arrive at the same result is
# to perform VQE with a reasonable ansatz, but to terminate the algorithm
# before it converges to the ground state. If we used the exact ground state
# :math:`|\psi_0\rangle`, the time-dependence would be trivial and the
# data would not provide enough information about the Hamiltonian parameters.
#
# We can verify that this is a low-energy
# state by numerically finding the lowest eigenvalue of the Hamiltonian
# and comparing it to the energy expectation of this low-energy state:
#
res = np.vdot(low_energy_state, (ham_matrix @ low_energy_state))
energy_exp = np.real_if_close(res)
print(f"Energy Expectation: {energy_exp}")
ground_state_energy = np.real_if_close(min(np.linalg.eig(ham_matrix)[0]))
print(f"Ground State Energy: {ground_state_energy}")
######################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Energy Expectation: -7.244508985189101
# Ground State Energy: -7.330689661291242
#
######################################################################
# We have in fact found a low-energy, non-ground state,
# as the energy expectation is slightly greater than the energy of the true ground
# state. This, however, is only half of the information we need. We also require
# a collection of time-evolved, low-energy states.
# Evolving the low-energy state forward in time is fairly straightforward: all we
# have to do is multiply the initial state by a time-evolution unitary. This operation
# can be defined as a custom gate in PennyLane:
#
def state_evolve(hamiltonian, qubits, time):
U = scipy.linalg.expm(-1j* hamiltonian * time)
qml.QubitUnitary(U, wires=qubits)
######################################################################
# We don't actually generate time-evolved quantum data quite yet,
# but we now have all the pieces required for its preparation.
#
######################################################################
# Learning the Hamiltonian
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
######################################################################
# With the quantum data defined, we are able to construct the QGRNN and
# learn the target Hamiltonian.
# Each of the exponentiated
# Hamiltonians in the QGRNN ansatz,
# :math:`\hat{H}^{j}_{\text{Ising}}(\boldsymbol\mu)`, are the
# :math:`ZZ`, :math:`Z`, and :math:`X` terms from the Ising
# Hamiltonian. This gives:
#
def qgrnn_layer(param1, param2, qubits, graph, trotter_step):
# Applies a layer of RZZ gates (based on a graph)
for count, i in enumerate(graph.edges):
qml.MultiRZ(2 * param1[count] * trotter_step, wires=[i[0], i[1]])
# Applies a layer of RZ gates
for count, i in enumerate(qubits):
qml.RZ(2 * param2[count] * trotter_step, wires=i)
# Applies a layer of RX gates
for i in qubits:
qml.RX(2 * trotter_step, wires=i)
######################################################################
# As was mentioned in the first section, the QGRNN has two
# registers. In one register, some piece of quantum data
# :math:`|\psi(t)\rangle` is prepared and in the other we have
# :math:`U_{H}(\boldsymbol\mu, \ \Delta) |\psi_0\rangle`. We need a
# way to measure the similarity between these states.
# This can be done by using the fidelity, which is
# simply the modulus squared of the inner product between the states,
# :math:`| \langle \psi(t) | U_{H}(\Delta, \ \boldsymbol\mu) |\psi_0\rangle |^2`.
# To calculate this value, we use a `SWAP
# test <https://en.wikipedia.org/wiki/Swap_test>`__ between the registers:
#
def swap_test(control, register1, register2):
qml.Hadamard(wires=control)
for i in range(0, len(register1)):
qml.CSWAP(wires=[int(control), register1[i], register2[i]])
qml.Hadamard(wires=control)
######################################################################
# After performing this procedure, the value returned from a measurement of the circuit is
# :math:`\langle Z \rangle`, with respect to the ``control`` qubit.
# The probability of measuring the :math:`|0\rangle` state
# in this control qubit is related to both the fidelity
# between registers and :math:`\langle Z \rangle`. Thus, with a bit of algebra,
# we find that :math:`\langle Z \rangle` is equal to the fidelity.
#
# Before creating the full QGRNN and the cost function, we
# define a few more fixed values. Among these is a "guessed"
# interaction graph, which we set to be a complete graph. This choice
# is motivated by the fact that any target interaction graph will be a subgraph
# of this initial guess. Part of the idea behind the QGRNN is that
# we don’t know the interaction graph, and it has to be learned. In this case, the graph
# is learned *automatically* as the target parameters are optimized. The
# :math:`\boldsymbol\mu` parameters that correspond to edges that don't exist in
# the target graph will simply approach :math:`0`.
#
# Defines some fixed values
reg1 = list(range(qubit_number)) # First qubit register
reg2 = list(range(qubit_number, 2 * qubit_number)) # Second qubit register
control = 2 * qubit_number # Index of control qubit
trotter_step = 0.01 # Trotter step size
# Defines the interaction graph for the new qubit system
new_ising_graph = nx.Graph()
new_ising_graph.add_nodes_from(range(qubit_number, 2 * qubit_number))
new_ising_graph.add_edges_from([(4, 5), (5, 6), (6, 7), (4, 6), (7, 4), (5, 7)])
print(f"Edges: {new_ising_graph.edges}")
nx.draw(new_ising_graph)
######################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Edges: [(4, 5), (4, 6), (4, 7), (5, 6), (5, 7), (6, 7)]
#
# .. figure:: ../demonstrations/qgrnn/graph2.png
# :width: 70%
# :align: center
#
######################################################################
# With this done, we implement the QGRNN circuit for some given time value:
#
def qgrnn(params1, params2, time=None):
# Prepares the low energy state in the two registers
qml.QubitStateVector(np.kron(low_energy_state, low_energy_state), wires=reg1+reg2)
# Evolves the first qubit register with the time-evolution circuit to
# prepare a piece of quantum data
state_evolve(ham_matrix, reg1, time)
# Applies the QGRNN layers to the second qubit register
depth = time / trotter_step # P = t/Delta
for i in range(0, int(depth)):
qgrnn_layer(params1, params2, reg2, new_ising_graph, trotter_step)
# Applies the SWAP test between the registers
swap_test(control, reg1, reg2)
# Returns the results of the SWAP test
return qml.expval(qml.PauliZ(control))
######################################################################
# We have the full QGRNN circuit, but we still need to define a cost function.
# We know that
# :math:`| \langle \psi(t) | U_{H}(\boldsymbol\mu, \ \Delta) |\psi_0\rangle |^2`
# approaches :math:`1` as the states become more similar and approaches
# :math:`0` as the states become orthogonal. Thus, we choose
# to minimize the quantity
# :math:`-| \langle \psi(t) | U_{H}(\boldsymbol\mu, \ \Delta) |\psi_0\rangle |^2`.
# Since we are interested in calculating this value for many different
# pieces of quantum data, the final cost function is the average
# negative fidelity* between registers:
#
# .. math::
#
# \mathcal{L}(\boldsymbol\mu, \ \Delta) \ = \ - \frac{1}{N} \displaystyle\sum_{i \ = \ 1}^{N} |
# \langle \psi(t_i) | \ U_{H}(\boldsymbol\mu, \ \Delta) \ |\psi_0\rangle |^2,
#
# where we use :math:`N` pieces of quantum data.
#
# Before creating the cost function, we must define a few more fixed
# variables:
#
N = 15 # The number of pieces of quantum data that are used for each step
max_time = 0.1 # The maximum value of time that can be used for quantum data
######################################################################
# We then define the negative fidelity cost function:
#
def cost_function(params):
global iterations
# Separates the parameter list
weight_params = params[0:6]
bias_params = params[6:10]
# Randomly samples times at which the QGRNN runs
times_sampled = [np.random.uniform() * max_time for i in range(0, N)]
# Cycles through each of the sampled times and calculates the cost
total_cost = 0
for i in times_sampled:
result = qnode(weight_params, bias_params, time=i)
total_cost += -1 * result
# Prints the value of the cost function
if iterations % 5 == 0:
print(
"Fidelity at Step " + str(iterations) + ": " + str((-1 * total_cost / N)._value)
)
print("Parameters at Step " + str(iterations) + ": " + str(params._value))
print("---------------------------------------------")
iterations += 1
return total_cost / N
######################################################################
# The last step is to define the new device and QNode, and execute the
# optimizer. We use Adam,
# with a step-size of :math:`0.5`:
#
# Defines the new device
qgrnn_dev = qml.device("default.qubit", wires=2 * qubit_number + 1)
# Defines the new QNode
qnode = qml.QNode(qgrnn, qgrnn_dev)
iterations = 0
optimizer = qml.AdamOptimizer(stepsize=0.5)
steps = 300
qgrnn_params = list([np.random.randint(-20, 20)/50 for i in range(0, 10)])
init = copy.copy(qgrnn_params)
# Executes the optimization method
for i in range(0, steps):
qgrnn_params = optimizer.step(cost_function, qgrnn_params)
######################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Fidelity at Step 0: 0.9918346272467008
# Parameters at Step 0: [-0.18, 0.3, 0.32, -0.08, 0.22, 0.28, 0.0, 0.04, -0.26, -0.34]
# ---------------------------------------------
# Fidelity at Step 1: 0.9982958958675725
# Parameters at Step 1: [-0.6799981741676951, 0.7999982222290656, 0.8199988800891312, 0.4199985008189975, 0.7199987506613634, -0.2199988652959371, -0.4999965632247722, -0.4599971332618679, 0.2399970101066845, 0.1599984088859147]
# ---------------------------------------------
# Fidelity at Step 2: 0.990490615156777
# Parameters at Step 2: [-1.0098221172022992, 1.1336805430938313, 1.1430272204750902, 0.7644358118180916, 1.0421203687079919, -0.5468227660845358, -0.8209215857161252, -0.8014895139877191, 0.5869815972734128, 0.47692466959999924]
# ---------------------------------------------
# Fidelity at Step 3: 0.9762351051354551
# Parameters at Step 3: [-1.2299363744628509, 1.3661666433666075, 1.3415230886864924, 1.030456044669323, 1.2378781697230516, -0.7575172669172114, -1.0130201555167162, -1.0581775588121731, 0.8610525437144596, 0.6563901663908569]
# ---------------------------------------------
# Fidelity at Step 4: 0.9784260543002337
# Parameters at Step 4: [-1.3467580918064672, 1.5070467394602267, 1.4191992723432134, 1.2363074986621367, 1.3105574652271725, -0.8578216277740998, -1.0787194096943145, -1.2453915798444228, 1.082957803098034, 0.7007286251069664]
# ---------------------------------------------
# Fidelity at Step 5: 0.9760924766644138
# Parameters at Step 5: [-1.3975050155126143, 1.5874154907714464, 1.4253477829297985, 1.3972088582056272, 1.310768053020758, -0.8907455190190542, -1.0697339005273567, -1.3825354197935542, 1.264473178391723, 0.6691484100078874]
# ---------------------------------------------
# Fidelity at Step 6: 0.9791342696605839
# Parameters at Step 6: [-1.3916054226388823, 1.6158640373620028, 1.3729978909141598, 1.5194931625164307, 1.2515225501166842, -0.8679964251616565, -0.9987099125166444, -1.4761909187915563, 1.4117656894362285, 0.5767242683721397]
# ---------------------------------------------
# Fidelity at Step 7: 0.9787421089376458
# Parameters at Step 7: [-1.3419795434786161, 1.6037009979053718, 1.2793404242059645, 1.6096566051847816, 1.1498502520134959, -0.8049890167819886, -0.8822831819835857, -1.5340642754199918, 1.530248443597091, 0.44241619983839797]
# ---------------------------------------------
# Fidelity at Step 8: 0.9820315749997621
# Parameters at Step 8: [-1.2488997936415513, 1.5514040173101722, 1.1474714836863231, 1.6683325278610117, 1.0085180485340484, -0.7045659446371773, -0.7220052474377402, -1.5561429872624357, 1.6212374403240934, 0.2699385653926958]
# ---------------------------------------------
# Fidelity at Step 9: 0.9906449268083709
# Parameters at Step 9: [-1.1168651248534744, 1.4629872578303456, 0.9855814851930917, 1.6969881487826513, 0.8353653676865135, -0.57428268457325, -0.524589866529066, -1.5440633182793588, 1.68617934363009, 0.06842824811303153]
# ---------------------------------------------
# Fidelity at Step 10: 0.99582195327501
# Parameters at Step 10: [-0.964031092107903, 1.354674684306868, 0.814393606152222, 1.7042986703019174, 0.650940374604191, -0.43328323068490904, -0.31101171334022504, -1.5090252857025748, 1.731326080636684, -0.1402782545374336]
# ---------------------------------------------
# Fidelity at Step 11: 0.9961926541501808
# Parameters at Step 11: [-0.8036684420752858, 1.238468601757792, 0.648795252860385, 1.6970845839900839, 0.46994924470041266, -0.29523086642904206, -0.0966467937581893, -1.4596190993696592, 1.7617532916742076, -0.34043090654836505]
# ---------------------------------------------
# Fidelity at Step 12: 0.9983158950218793
# Parameters at Step 12: [-0.6312660733884548, 1.1116800858652522, 0.4972845049612349, 1.6699268716653712, 0.2975512423191221, -0.16726719667887802, 0.11884361341846977, -1.3889433232896733, 1.773276350359844, -0.5223183946029325]
# ---------------------------------------------
# Fidelity at Step 13: 0.9967000138255077
# Parameters at Step 13: [-0.4676461753735439, 0.9917792527825551, 0.36890033998622973, 1.6368149409927473, 0.1458560667857181, -0.058079424182490846, 0.3155504611747129, -1.3145273809480482, 1.7766599241241958, -0.6769959196369855]
# ---------------------------------------------
# Fidelity at Step 14: 0.9945001638616079
# Parameters at Step 14: [-0.31794007513498357, 0.8838593182597574, 0.274152383590202, 1.5986807415684292, 0.023998424267510157, 0.02334325807987936, 0.484463374084153, -1.2376354781826886, 1.7724156182238344, -0.792919523723297]
# ---------------------------------------------
# Fidelity at Step 15: 0.9913345835898904
# Parameters at Step 15: [-0.18832820993560723, 0.7934498472478128, 0.2192041202380814, 1.5582912583375383, -0.06231146312915761, 0.07191800841202414, 0.6173058799196036, -1.1617573784335757, 1.7624859746860937, -0.8642178255228796]
# ---------------------------------------------
# Fidelity at Step 16: 0.9889565034466684
# Parameters at Step 16: [-0.08560961522854411, 0.7266587755855446, 0.21178056237429993, 1.5174343908488523, -0.10602854111569737, 0.08120012393307255, 0.7038563895141848, -1.089491099254555, 1.7477885167776666, -0.8832941887432142]
# ---------------------------------------------
# Fidelity at Step 17: 0.9925729160049567
# Parameters at Step 17: [-0.01530712818391683, 0.688186047241436, 0.2546346524352683, 1.478007577277352, -0.10461799053446165, 0.04908916100233172, 0.7372715330783467, -1.023558091459499, 1.72924671045939, -0.8487271410430316]
# ---------------------------------------------
# Fidelity at Step 18: 0.9929504851720798
# Parameters at Step 18: [0.030032491434869538, 0.6705379228419254, 0.32736077326483304, 1.4416928946464962, -0.07563674255845146, -0.007189452883322685, 0.7339963088570366, -0.965283352682299, 1.7091399150791209, -0.7831498975610103]
# ---------------------------------------------
# Fidelity at Step 19: 0.9965405348346449
# Parameters at Step 19: [0.049351359919739754, 0.6743590703990037, 0.4278756421344065, 1.4090284612866684, -0.02163430171942262, -0.08514460113615971, 0.693373791202934, -0.9156335387084984, 1.6874429857641309, -0.6900461171429496]
# ---------------------------------------------
# Fidelity at Step 20: 0.9966665201201759
# Parameters at Step 20: [0.05278414037474171, 0.690320672688792, 0.5389740191005465, 1.3798222192408045, 0.04246657361378194, -0.1704614872588347, 0.6327107569705728, -0.8732384948075647, 1.665370037015241, -0.5873476637991509]
# ---------------------------------------------
# Fidelity at Step 21: 0.9985601378649517
# Parameters at Step 21: [0.036744860702025185, 0.7211548377193117, 0.6624797948658447, 1.3541418526093136, 0.11662055186990422, -0.2630293857462455, 0.5463270732728669, -0.8391813354875706, 1.641691390380928, -0.47517726904059726]
# ---------------------------------------------
# Fidelity at Step 22: 0.9989726690473872
# Parameters at Step 22: [0.0099853113729, 0.7590819662221863, 0.786559829395425, 1.331306986027622, 0.19078421742491747, -0.3533747180286354, 0.44798226535748187, -0.811516385783778, 1.6171600388645548, -0.3653610979369781]
# ---------------------------------------------
# Fidelity at Step 23: 0.999047032083147
# Parameters at Step 23: [-0.026725503966848224, 0.8028384497425541, 0.9074123764373417, 1.3108986407404344, 0.2601589904988729, -0.43682334045593063, 0.3386032585919626, -0.7903088349898605, 1.5906634543837104, -0.2636408610946702]
# ---------------------------------------------
# Fidelity at Step 24: 0.9987705154428619
# Parameters at Step 24: [-0.07086221861622898, 0.8494889117930519, 1.0190044786807104, 1.292312641664198, 0.31796110307647885, -0.5068103469275773, 0.22176642815390943, -0.7752768448722682, 1.561023616126524, -0.1781655177903615]
# ---------------------------------------------
# Fidelity at Step 25: 0.9988100329375428
# Parameters at Step 25: [-0.11779944704122991, 0.8944605594186567, 1.1143459278348542, 1.2748222812428165, 0.35783783243731665, -0.5572620061886935, 0.10437661028468458, -0.765460064780762, 1.52771962327713, -0.11647520632647335]
# ---------------------------------------------
# Fidelity at Step 26: 0.9985757201454402
# Parameters at Step 26: [-0.1620749955989192, 0.9340651187861451, 1.1928230771213455, 1.2582183861869087, 0.3819403680851732, -0.5908063527356947, -0.0052458861753093555, -0.7587107231668222, 1.4928816938483498, -0.07549378112681823]
# ---------------------------------------------
# Fidelity at Step 27: 0.9976302566535219
# Parameters at Step 27: [-0.20132070244284592, 0.9664369998249582, 1.2536060950940575, 1.2422878678254046, 0.3904514403341251, -0.6078223076765568, -0.10356163561282461, -0.7542507212062988, 1.4569850261955124, -0.05477860988229539]
# ---------------------------------------------
# Fidelity at Step 28: 0.9983882356575454
# Parameters at Step 28: [-0.23255417027769784, 0.987815950434486, 1.2909889004408277, 1.2265674025512432, 0.37752274884372017, -0.6025799670865759, -0.1859803393905809, -0.752257031132375, 1.4186113777017493, -0.06138955414451268]
# ---------------------------------------------
# Fidelity at Step 29: 0.9978671373289247
# Parameters at Step 29: [-0.2566856271475478, 1.0006492046651103, 1.3125780415283235, 1.2115530344170848, 0.3520816340739027, -0.5841604105400039, -0.2540974192141527, -0.7518168751351452, 1.380097816028637, -0.0842201282781858]
# ---------------------------------------------
# Fidelity at Step 30: 0.9974929437984331
# Parameters at Step 30: [-0.27124830423752155, 1.001805037820717, 1.3148505271380608, 1.1972028099405516, 0.3108355440242823, -0.5493333047969007, -0.30386761874823015, -0.7534487198975702, 1.340572547803093, -0.12730767306292412]
# ---------------------------------------------
# Fidelity at Step 31: 0.9987904515719516
# Parameters at Step 31: [-0.27331253281188894, 0.9875366027856964, 1.2947325486214651, 1.183881268184288, 0.2512304445465742, -0.49559813556132276, -0.3302261854169147, -0.7582194250788556, 1.2993209544142166, -0.1938896756224774]
# ---------------------------------------------
# Fidelity at Step 32: 0.9990327822738706
# Parameters at Step 32: [-0.26795379123456736, 0.9646777852539683, 1.2642320526274793, 1.172242080195256, 0.18538030083281698, -0.4350974227693908, -0.3416288874251854, -0.7653120532473738, 1.2589997312286312, -0.26913506648646457]
# ---------------------------------------------
# Fidelity at Step 33: 0.9996630784088545
# Parameters at Step 33: [-0.25498616785829376, 0.9326639083646915, 1.2250032532283779, 1.1631299925830865, 0.11566158561604338, -0.370123583270212, -0.3372023926341056, -0.7759491224912733, 1.219973281394287, -0.35046354300650573]
# ---------------------------------------------
# Fidelity at Step 34: 0.9995646137376791
# Parameters at Step 34: [-0.23931125925929084, 0.8979390115443661, 1.18517083195646, 1.1562392191477264, 0.049407091226932126, -0.3080683093766753, -0.3256263558112948, -0.7885433711096579, 1.1837377130938038, -0.42860701069958995]
# ---------------------------------------------
# Fidelity at Step 35: 0.9996465904308229
# Parameters at Step 35: [-0.21955712185077875, 0.8579684028917567, 1.1461751770792745, 1.153596712136502, -0.010617573336647104, -0.25156592871335065, -0.3032907108221233, -0.806211033906433, 1.150742645372461, -0.5010224120719031]
# ---------------------------------------------
# Fidelity at Step 36: 0.9994156035961047
# Parameters at Step 36: [-0.19868629481528227, 0.8165858084509231, 1.1121003293501985, 1.1548201771770399, -0.06107266557805362, -0.20402041326536352, -0.2755913764320364, -0.827857105634376, 1.1216160185109565, -0.5633578965665793]
# ---------------------------------------------
# Fidelity at Step 37: 0.9993165244122285
# Parameters at Step 37: [-0.1779889897836405, 0.7749703004823995, 1.0870344529373086, 1.1611755563451476, -0.09753314112151815, -0.16984814116498834, -0.24407479905269586, -0.8549288607994256, 1.0972908304783051, -0.6106194470473059]
# ---------------------------------------------
# Fidelity at Step 38: 0.9996276075039349
# Parameters at Step 38: [-0.15915307597784445, 0.7354708885031871, 1.072332703887529, 1.1720124226103679, -0.11964806996345806, -0.14955951002483486, -0.2122772329988582, -0.8862271155143, 1.0778211101348336, -0.6420226972112081]
# ---------------------------------------------
# Fidelity at Step 39: 0.9990796004168293
# Parameters at Step 39: [-0.142525631080849, 0.6994887039221028, 1.0634937491532876, 1.1842620087197029, -0.13411912965193842, -0.13670846657035737, -0.1829062246986572, -0.9176867201800266, 1.0615334788002615, -0.6645525202471699]
# ---------------------------------------------
# Fidelity at Step 40: 0.9991081504737204
# Parameters at Step 40: [-0.12978142843589885, 0.6682722355308486, 1.067173079974738, 1.2008548828199337, -0.13307343789244414, -0.13917897231406814, -0.15733943829829156, -0.9527752044623491, 1.0504893600171643, -0.6695713382045293]
# ---------------------------------------------
# Fidelity at Step 41: 0.9994006109153413
# Parameters at Step 41: [-0.12115567316443504, 0.6424397976328438, 1.0824788700855221, 1.2211349307434558, -0.11825805101753337, -0.15543553765641152, -0.1367627010490742, -0.9905091080053223, 1.0443773039911841, -0.6588951948060155]
# ---------------------------------------------
# Fidelity at Step 42: 0.9994774988967781
# Parameters at Step 42: [-0.11571425014381993, 0.6214062734740367, 1.1051772812625795, 1.2432828086933019, -0.09506646644529715, -0.1802470006258153, -0.12088226665017486, -1.0287060346219037, 1.0418592834612284, -0.6384405334852695]
# ---------------------------------------------
# Fidelity at Step 43: 0.9994472365267485
# Parameters at Step 43: [-0.11343136573993991, 0.6053750054673392, 1.1345586980326912, 1.2670816384279806, -0.0645914205975793, -0.21272564092891239, -0.11021940656600411, -1.0669698557220983, 1.042885163548047, -0.6093818606417603]
# ---------------------------------------------
# Fidelity at Step 44: 0.9997444727808593
# Parameters at Step 44: [-0.11493137902455217, 0.5956040002841007, 1.1720296957398517, 1.2933199126033281, -0.025840937747204952, -0.25441678362223674, -0.10664260949985985, -1.1056396577415977, 1.0485202738504023, -0.5705427079314583]
# ---------------------------------------------
# Fidelity at Step 45: 0.99988159413535
# Parameters at Step 45: [-0.1181641198295314, 0.5899525287207167, 1.2117280803719108, 1.3197972842747712, 0.014517460349924967, -0.29859722278693634, -0.10769621831761414, -1.1424715828989647, 1.0567394027897858, -0.5293778103982675]
# ---------------------------------------------
# Fidelity at Step 46: 0.9998573514345785
# Parameters at Step 46: [-0.12175656133105636, 0.5867654275419255, 1.2499010203636594, 1.3451356909319643, 0.05248875951969164, -0.3410987405398154, -0.11141149487295963, -1.176254626328141, 1.0661176290234677, -0.4903933522447272]
# ---------------------------------------------
# Fidelity at Step 47: 0.9998292007097693
# Parameters at Step 47: [-0.12487131421130249, 0.5859454109604079, 1.284613548937081, 1.3689706154338066, 0.0852664799312279, -0.379774628371075, -0.11801000957728758, -1.2060029474462826, 1.0767548088559875, -0.45664961742963583]
# ---------------------------------------------
# Fidelity at Step 48: 0.9997365697977294
# Parameters at Step 48: [-0.12654857023039606, 0.586429393841211, 1.313585556214431, 1.3905244254034577, 0.11041058221068717, -0.4121675022983979, -0.12627431817292728, -1.230996950201297, 1.087855998062673, -0.43089443071099853]
# ---------------------------------------------
# Fidelity at Step 49: 0.9997528113932673
# Parameters at Step 49: [-0.12584831617269926, 0.5874242095681329, 1.3349964372480259, 1.4091922470968539, 0.12563466677970891, -0.43619264265287366, -0.13540569126088883, -1.2504754631024455, 1.0988857079981462, -0.41566649516961174]
# ---------------------------------------------
# Fidelity at Step 50: 0.9995771492573322
# Parameters at Step 50: [-0.12308220081986324, 0.5882956053892081, 1.3498112566839737, 1.4250593957050575, 0.1327111946665708, -0.45293666365919977, -0.14442696202432756, -1.2653186413203268, 1.109248000229557, -0.4091049260651626]
# ---------------------------------------------
# Fidelity at Step 51: 0.9996582070356129
# Parameters at Step 51: [-0.1167464756855651, 0.5885180846568769, 1.3553538821219644, 1.437311180522153, 0.12773708284845237, -0.45934041737317816, -0.153191885507882, -1.2738828621497664, 1.1187160226528092, -0.41542403253932575]
# ---------------------------------------------
# Fidelity at Step 52: 0.9996955432159077
# Parameters at Step 52: [-0.10771034556633162, 0.5880448359118233, 1.3537658157861867, 1.4464530329091365, 0.11384863623479319, -0.45786035704962497, -0.16146420142746504, -1.2774690460612694, 1.1271196329598958, -0.43122725720747185]
# ---------------------------------------------
# Fidelity at Step 53: 0.9997991619034833
# Parameters at Step 53: [-0.09627329672367939, 0.5868963942580755, 1.3462380053806309, 1.4527280567240575, 0.09247903186228626, -0.44976296196126986, -0.1693346453161187, -1.2765780952688226, 1.1344036427582234, -0.45493168233995446]
# ---------------------------------------------
# Fidelity at Step 54: 0.9996928121478973
# Parameters at Step 54: [-0.08345955687710035, 0.5853498150099569, 1.3350713003972525, 1.456813442186792, 0.06682602891064508, -0.43769031387004786, -0.1769697735889833, -1.2725521174090646, 1.1406837825893554, -0.48306702794086753]
# ---------------------------------------------
# Fidelity at Step 55: 0.9998899365649595
# Parameters at Step 55: [-0.06751148109701582, 0.583214665299736, 1.3183722482806561, 1.4575330258805324, 0.033384228036375245, -0.41943921471437035, -0.18540640162811964, -1.2629340228252863, 1.1456369135708773, -0.5191713211994088]
# ---------------------------------------------
# Fidelity at Step 56: 0.9998624340125136
# Parameters at Step 56: [-0.05131752850714959, 0.5811990289809753, 1.3013023835929207, 1.4569478769178683, -0.0004006580174740204, -0.4007625392880318, -0.19430310803813716, -1.251647479542463, 1.1498364761138966, -0.5553743416586863]
# ---------------------------------------------
# Fidelity at Step 57: 0.9999132721039935
# Parameters at Step 57: [-0.034655781202251414, 0.5797721835560955, 1.284992930419999, 1.4547640992807522, -0.03367428060470313, -0.3831140718715586, -0.20534639523532677, -1.2379072945156397, 1.1532505168291682, -0.5904570588092755]
# ---------------------------------------------
# Fidelity at Step 58: 0.9998775148230059
# Parameters at Step 58: [-0.018859726755114874, 0.579075691631346, 1.271532927647943, 1.4521537374570472, -0.06335771928892965, -0.36867465578447683, -0.21781255421386045, -1.2238816583685157, 1.1562215101775397, -0.6213031578896728]
# ---------------------------------------------
# Fidelity at Step 59: 0.9998457347810404
# Parameters at Step 59: [-0.0043922861721972534, 0.5793702002250531, 1.2627489076560674, 1.4496008479846716, -0.087487791151152, -0.3593321319413558, -0.2321843903928341, -1.2102636747738713, 1.1588836805134917, -0.6457585646371806]
# ---------------------------------------------
# Fidelity at Step 60: 0.9997727108398051
# Parameters at Step 60: [0.008381004961572968, 0.5806993140477965, 1.2595896924677998, 1.447526804103137, -0.10478843190020545, -0.35617567351770024, -0.2484759318134681, -1.1977686151876175, 1.1613353047857047, -0.6624979557651268]
# ---------------------------------------------
# Fidelity at Step 61: 0.999780827709714
# Parameters at Step 61: [0.01909797144686237, 0.5832292737274423, 1.2640247275721554, 1.4463805516185642, -0.1129425547030803, -0.36150916550403545, -0.267404289385826, -1.187080581646848, 1.1635990684701452, -0.6689774147180123]
# ---------------------------------------------
# Fidelity at Step 62: 0.999808168158736
# Parameters at Step 62: [0.027905032228909082, 0.5864669710827904, 1.2750805933139595, 1.4463033399265706, -0.11250744283286447, -0.37457023406410855, -0.2881605783982173, -1.1786398687238406, 1.1656020826645257, -0.666008871067185]
# ---------------------------------------------
# Fidelity at Step 63: 0.9998548091256995
# Parameters at Step 63: [0.035299063031327474, 0.5897168777071272, 1.2912297644365056, 1.4472746060795163, -0.10510414420013081, -0.39368682521839066, -0.3096536254084067, -1.1725501854957112, 1.1671566218333023, -0.6555791753225414]
# ---------------------------------------------
# Fidelity at Step 64: 0.9999037069964792
# Parameters at Step 64: [0.0418815267714097, 0.5922150299101131, 1.3102058029478096, 1.449115390383419, -0.09285188170899843, -0.416680689996342, -0.33075519781109286, -1.168752259973914, 1.168045873571082, -0.640236123403879]
# ---------------------------------------------
# Fidelity at Step 65: 0.9999246406433501
# Parameters at Step 65: [0.04829108705674348, 0.5933961363308191, 1.3295024801701152, 1.451494818076525, -0.07868535063518896, -0.44066890713809964, -0.3503195623475613, -1.1667777631995717, 1.1681448292091399, -0.6233418461259689]
# ---------------------------------------------
# Fidelity at Step 66: 0.9998866529728345
# Parameters at Step 66: [0.054990646400591335, 0.5929714866395723, 1.3475420631521589, 1.4541780082209224, -0.06478406516638432, -0.4635982065859475, -0.367668914093558, -1.1661843607147455, 1.1674078817134135, -0.6073574766751283]
# ---------------------------------------------
# Fidelity at Step 67: 0.9999098513016205
# Parameters at Step 67: [0.06305970317875366, 0.5894425173204019, 1.362279806065894, 1.457201816175589, -0.05293376636334743, -0.4837715235887784, -0.38173243002587237, -1.1674220982195875, 1.1651062924666915, -0.594733597363039]
# ---------------------------------------------
# Fidelity at Step 68: 0.9998660737935083
# Parameters at Step 68: [0.07207131811893552, 0.5836763740576104, 1.3731394847850424, 1.4602536559593045, -0.04417940302564178, -0.500178959137041, -0.39288037868101516, -1.169729608023709, 1.161707190111347, -0.5863686970050932]
# ---------------------------------------------
# Fidelity at Step 69: 0.9999129411159435
# Parameters at Step 69: [0.08262588608079113, 0.574810479604329, 1.3780673500067586, 1.4633174442657115, -0.04043810439157602, -0.5109593138014746, -0.40059235668512994, -1.1733561658529568, 1.156749837842348, -0.5847647387158366]
# ---------------------------------------------
# Fidelity at Step 70: 0.9999204175249918
# Parameters at Step 70: [0.09353640799320935, 0.5647304323063623, 1.3794737376476547, 1.4663251015063614, -0.03994340760723002, -0.5179451077951711, -0.40634844089445255, -1.1775732993830843, 1.151165123449546, -0.5873654574983501]
# ---------------------------------------------
# Fidelity at Step 71: 0.9999188294218071
# Parameters at Step 71: [0.10439909428941072, 0.5539946810211575, 1.3778815045617863, 1.4692807280461864, -0.04216929238300462, -0.521672517131992, -0.4107648593458895, -1.1822576620042096, 1.1451991959499215, -0.5934612963667989]
# ---------------------------------------------
# Fidelity at Step 72: 0.9998604104737938
# Parameters at Step 72: [0.1150711419134814, 0.5428094649719107, 1.3736920817157159, 1.4723117761807147, -0.047000405497170064, -0.5223555994970089, -0.414287638179804, -1.187491318807621, 1.1388891215693562, -0.6029561557441782]
# ---------------------------------------------
# Fidelity at Step 73: 0.9999430308973932
# Parameters at Step 73: [0.1256137433970921, 0.5304311299324498, 1.364379429769975, 1.4758445198215686, -0.05652412917964197, -0.5178998350909616, -0.4174842021241018, -1.1944011422191803, 1.131549276053809, -0.618992065967143]
# ---------------------------------------------
# Fidelity at Step 74: 0.9999164917980096
# Parameters at Step 74: [0.1352240671048545, 0.5186721462999183, 1.3544791514439096, 1.4795190877049202, -0.06662586038443774, -0.5124582922772923, -0.42092281691525507, -1.2016505027993059, 1.124376089573319, -0.6358352457562463]
# ---------------------------------------------
# Fidelity at Step 75: 0.9999447368621411
# Parameters at Step 75: [0.1433517389997682, 0.5077377878377025, 1.3439833619291048, 1.4839036749660373, -0.07720888813729462, -0.5061759141332505, -0.4260363440256178, -1.2102020610623234, 1.1171661798181443, -0.6538662573020569]
# ---------------------------------------------
# Fidelity at Step 76: 0.9999371167409344
# Parameters at Step 76: [0.1500140190045107, 0.4980752316911107, 1.3345300738606132, 1.4887111518450673, -0.08677048568017348, -0.5005964210616465, -0.43256554322458635, -1.219315335276509, 1.1103510852672205, -0.6708789782047512]
# ---------------------------------------------
# Fidelity at Step 77: 0.9999422443337805
# Parameters at Step 77: [0.1548386249560046, 0.4900488617967078, 1.3268507273328314, 1.49410275596857, -0.09430363221716345, -0.4967256792177868, -0.4413051029923251, -1.2291959841357525, 1.1040339321643222, -0.6857726663067597]
# ---------------------------------------------
# Fidelity at Step 78: 0.9999238559846356
# Parameters at Step 78: [0.15816023191744086, 0.4835880087905452, 1.3215248542660816, 1.499899606624128, -0.09948259802800598, -0.4949866314201788, -0.45179307232821825, -1.239408282708757, 1.0983243101263098, -0.6979516019681357]
# ---------------------------------------------
# Fidelity at Step 79: 0.9998947527867448
# Parameters at Step 79: [0.1598049053610818, 0.47891485857613925, 1.3193948259446773, 1.5062519491417448, -0.10136287159746196, -0.49640463581192196, -0.4647400306369159, -1.2500852220050942, 1.0932619266576435, -0.7063956771055033]
# ---------------------------------------------
# Fidelity at Step 80: 0.9999239374484553
# Parameters at Step 80: [0.15968640176034107, 0.47623857742848363, 1.3215516035038206, 1.5133445108042078, -0.09869941981715959, -0.5023790625052149, -0.4810894704739998, -1.2613481265326896, 1.0888831966015242, -0.709782922818542]
# ---------------------------------------------
# Fidelity at Step 81: 0.999948470725103
# Parameters at Step 81: [0.15903121689591235, 0.47455604185793715, 1.3263353473424122, 1.5205295668382561, -0.09350921353968285, -0.5109460782478329, -0.4986121270465538, -1.2722717718240208, 1.085019857104728, -0.7100977773081497]
# ---------------------------------------------
# Fidelity at Step 82: 0.9999366087435407
# Parameters at Step 82: [0.15850219731430015, 0.4732269015028527, 1.3323677097014557, 1.5273815258143946, -0.08721313304564966, -0.5206595953771328, -0.5159691422708877, -1.2823461352567127, 1.08150103237917, -0.7087956210664609]
# ---------------------------------------------
# Fidelity at Step 83: 0.9999578753805831
# Parameters at Step 83: [0.15876122870928364, 0.4717539889208967, 1.3389858240280035, 1.5336558651468708, -0.07983759313470387, -0.531645417340478, -0.5331916992279223, -1.2912136020041343, 1.078157462787459, -0.7059802614905673]
# ---------------------------------------------
# Fidelity at Step 84: 0.9999562859848723
# Parameters at Step 84: [0.15998950538828352, 0.46987222594640976, 1.3453745056781694, 1.5392301575813734, -0.0727606140942332, -0.5424516612950055, -0.5491990471098366, -1.2987740326465576, 1.0749304304852945, -0.7031076480354954]
# ---------------------------------------------
# Fidelity at Step 85: 0.9999228784481042
# Parameters at Step 85: [0.16259932393819493, 0.46723784978087973, 1.3510020439148975, 1.5439781690404197, -0.06661231505507895, -0.5525379692017052, -0.5636186962362523, -1.3048278506884046, 1.0717015957679998, -0.7009024320958638]
# ---------------------------------------------
# Fidelity at Step 86: 0.9999267299255452
# Parameters at Step 86: [0.16836953998618337, 0.4625743331186897, 1.354370824497011, 1.547342987670039, -0.06270296629337258, -0.5610776835106112, -0.5757496874720254, -1.3083583567906822, 1.068061298085744, -0.7009301098350672]
# ---------------------------------------------
# Fidelity at Step 87: 0.9999539062194203
# Parameters at Step 87: [0.17706182486991237, 0.4559909932922185, 1.3550043012605686, 1.5493683852536209, -0.06147149350529153, -0.567534538603623, -0.5855838960013352, -1.3095163493047912, 1.0640384884320284, -0.7037221397941468]
# ---------------------------------------------
# Fidelity at Step 88: 0.9999375340731136
# Parameters at Step 88: [0.1868411682003187, 0.4487769452876402, 1.354198171678452, 1.5506991362680713, -0.06179202566244289, -0.572554209006431, -0.5939668641662594, -1.3094725665544014, 1.0600401284357561, -0.708003630693759]
# ---------------------------------------------
# Fidelity at Step 89: 0.9999452209763122
# Parameters at Step 89: [0.19823455064505652, 0.44058748194066133, 1.3509939304430245, 1.5511797992868592, -0.06428899198213606, -0.5757041510507358, -0.601117863559753, -1.3079073427777828, 1.0559608562613465, -0.7146251024356468]
# ---------------------------------------------
# Fidelity at Step 90: 0.9999533531894214
# Parameters at Step 90: [0.21043439829521546, 0.4320595524880997, 1.3463316808943995, 1.5512027849227317, -0.06823345854195782, -0.5776080355311207, -0.6077936351302407, -1.305379521016447, 1.0519900966765465, -0.7228294616693073]
# ---------------------------------------------
# Fidelity at Step 91: 0.9999482377985015
# Parameters at Step 91: [0.22258131884660334, 0.42383575872142953, 1.3410192677440258, 1.5511322451252496, -0.07274782428256824, -0.5789636125943975, -0.614715204002582, -1.302483776213029, 1.0483402298473565, -0.7316818371041457]
# ---------------------------------------------
# Fidelity at Step 92: 0.9999576342273339
# Parameters at Step 92: [0.23412628512023217, 0.41639544583068705, 1.3358218745171693, 1.5513108208713804, -0.07713588301552442, -0.5804647359056426, -0.6227699371318457, -1.29963212390933, 1.0451605613454913, -0.7405203785739566]
# ---------------------------------------------
# Fidelity at Step 93: 0.9999727698188148
# Parameters at Step 93: [0.24453212095330634, 0.4100441400134776, 1.3315228065798237, 1.5519269263778077, -0.08074493839523109, -0.5825878464036203, -0.6319817828330716, -1.2972190092792801, 1.0425036708669353, -0.7485460033992009]
# ---------------------------------------------
# Fidelity at Step 94: 0.9999536250599137
# Parameters at Step 94: [0.2536722703488573, 0.4047163590726245, 1.3280973887080771, 1.5528955787872132, -0.08347035077885467, -0.5852439420251464, -0.6417777542876416, -1.2953414143018134, 1.0403125102601947, -0.755512034118745]
# ---------------------------------------------
# Fidelity at Step 95: 0.9999374137250309
# Parameters at Step 95: [0.26103507537725984, 0.4009139089583318, 1.3261420351258584, 1.5546937868438435, -0.08446635447884701, -0.5893556788424157, -0.65363447984888, -1.2945073876821842, 1.0388087430656394, -0.7607797218924264]
# ---------------------------------------------
# Fidelity at Step 96: 0.9999374430735666
# Parameters at Step 96: [0.26627778353324005, 0.3989196213921769, 1.3262941049535335, 1.5577437040216018, -0.0830341263130707, -0.5956816237748449, -0.668542648219778, -1.2951902905794082, 1.0381178975629617, -0.7638221259352528]
# ---------------------------------------------
# Fidelity at Step 97: 0.9999649962536792
# Parameters at Step 97: [0.2697847785808088, 0.39826938809292944, 1.3281747322189905, 1.561947624046751, -0.07949687637201738, -0.6038947845882106, -0.6859370315702844, -1.2973315413297546, 1.038061526677841, -0.7650278039241306]
# ---------------------------------------------
# Fidelity at Step 98: 0.9999680100655207
# Parameters at Step 98: [0.27271796356922473, 0.39790234793181534, 1.330559862331582, 1.566421385013026, -0.07551557234134834, -0.6122839897830252, -0.7031685695024871, -1.2999467407306893, 1.0381884951626206, -0.7657447634063109]
# ---------------------------------------------
# Fidelity at Step 99: 0.9999595967720151
# Parameters at Step 99: [0.2754978092770754, 0.39740785939847073, 1.3329187656288328, 1.5709769654558308, -0.07156215758556388, -0.6203943327686694, -0.719667344436063, -1.3028301018713266, 1.038348575369489, -0.7664758729296259]
# ---------------------------------------------
# Fidelity at Step 100: 0.9999691823205074
# Parameters at Step 100: [0.2787195506309466, 0.39613177135110544, 1.3345447334346552, 1.5756188983879296, -0.06806192154371864, -0.627881145467017, -0.7351851429420309, -1.3059974283184748, 1.0383453956803301, -0.7679358743646278]
# ---------------------------------------------
# Fidelity at Step 101: 0.9999619187430177
# Parameters at Step 101: [0.28242894636354865, 0.3941413617317143, 1.335479267494555, 1.5800706763364583, -0.06526765767770006, -0.6344739933622472, -0.7492072726913137, -1.3091003914506294, 1.038150294608112, -0.7701324583381439]
# ---------------------------------------------
# Fidelity at Step 102: 0.9999527247255329
# Parameters at Step 102: [0.2871335910640331, 0.39093393639234225, 1.3353433022708152, 1.584278472971393, -0.06355716737124557, -0.6398872952154574, -0.7614264174470093, -1.3120450306286773, 1.0375848139552188, -0.7736042137182007]
# ---------------------------------------------
# Fidelity at Step 103: 0.9999515087413056
# Parameters at Step 103: [0.2933050376932996, 0.3860404487541233, 1.3337755584108504, 1.5881898954764302, -0.06324796246744374, -0.6438856506025266, -0.7716056939368332, -1.3147448766603498, 1.0364772154865878, -0.7788250794420518]
# ---------------------------------------------
# Fidelity at Step 104: 0.9999675825710751
# Parameters at Step 104: [0.3008025878139133, 0.37959472310447573, 1.3309036751336876, 1.5917705693945272, -0.06415186981343302, -0.6466373568076824, -0.7799700701153659, -1.317171006296594, 1.0348254855853776, -0.7855574595540972]
# ---------------------------------------------
# Fidelity at Step 105: 0.999963746275596
# Parameters at Step 105: [0.3088450633120314, 0.3724504767796725, 1.3275806019833747, 1.5951023660493973, -0.06558320660968625, -0.6487570170855949, -0.7872103416969299, -1.3194046097416412, 1.032895159344219, -0.7928421744279232]
# ---------------------------------------------
# Fidelity at Step 106: 0.9999700820140445
# Parameters at Step 106: [0.3171910993892471, 0.3648639835104649, 1.3243046328861554, 1.5982874179327842, -0.0671603503119517, -0.6506556187610064, -0.7938392326798476, -1.3215284432992322, 1.0307296748903971, -0.8002770965620059]
# ---------------------------------------------
# Fidelity at Step 107: 0.9999719230711291
# Parameters at Step 107: [0.32535216077061707, 0.3573429169358276, 1.3214957744448017, 1.6013913477063242, -0.06846711483943341, -0.6527000200317139, -0.8003590156529284, -1.3236232324916415, 1.028486597834076, -0.8073183277096091]
# ---------------------------------------------
# Fidelity at Step 108: 0.9999704259176454
# Parameters at Step 108: [0.3329986920706661, 0.3501863547749545, 1.3192680004739101, 1.6044694211790305, -0.06917601723569679, -0.6551678922869517, -0.8072809087079148, -1.3257835416202473, 1.0262475057816136, -0.813620320598846]
# ---------------------------------------------
# Fidelity at Step 109: 0.9999707168742463
# Parameters at Step 109: [0.33998041479735225, 0.34364365212175774, 1.3181363309815315, 1.6076105586239462, -0.06917365171601826, -0.658241855114755, -0.8147859085270887, -1.3280243362252648, 1.0241020915541021, -0.8190015935969763]
# ---------------------------------------------
# Fidelity at Step 110: 0.9999620589656709
# Parameters at Step 110: [0.3460423583913361, 0.3378693854411911, 1.317849107573066, 1.6108044671696549, -0.06823790133041444, -0.662061454074891, -0.8233949240935231, -1.3304100784557746, 1.0220571285441684, -0.8232926159057777]
# ---------------------------------------------
# Fidelity at Step 111: 0.9999584134890723
# Parameters at Step 111: [0.35107705015323165, 0.3329995731612195, 1.318762522003613, 1.6141968846358399, -0.06612681776667563, -0.6669555529780533, -0.8338466286064476, -1.3330303389384777, 1.020113758621977, -0.8263798910593158]
# ---------------------------------------------
# Fidelity at Step 112: 0.9999738155717663
# Parameters at Step 112: [0.3551015929094727, 0.3289772288315669, 1.320341831355963, 1.6177230807833847, -0.06299787239164033, -0.6727464025727425, -0.8464442930459265, -1.3358252556034642, 1.0182343928369553, -0.8285450055481617]
# ---------------------------------------------
# Fidelity at Step 113: 0.9999756646761303
# Parameters at Step 113: [0.35874167432563875, 0.3253652359278932, 1.3222239301261025, 1.6211422839553296, -0.05984117723587664, -0.6784929360424193, -0.8592066116829397, -1.338460774651548, 1.0164419406180722, -0.8305134681878302]
# ---------------------------------------------
# Fidelity at Step 114: 0.9999632639053777
# Parameters at Step 114: [0.36224136399520773, 0.3219381474441058, 1.3240473921763805, 1.6243697990828445, -0.05697943055729214, -0.6838898111277703, -0.8717711089033766, -1.3408279751056573, 1.0146925419723878, -0.8326343910256307]
# ---------------------------------------------
# Fidelity at Step 115: 0.9999668306911149
# Parameters at Step 115: [0.3661666312383166, 0.31816758562783154, 1.3251122730719682, 1.6273587352316352, -0.054927480071705025, -0.6885799157666435, -0.8845328813242739, -1.3427311562359716, 1.012805347737836, -0.8358068058694881]
# ---------------------------------------------
# Fidelity at Step 116: 0.9999754262812004
# Parameters at Step 116: [0.37067123661742024, 0.3139181266791284, 1.3252926209140343, 1.63006555267173, -0.05381331395093721, -0.6924085884467933, -0.8968698569941312, -1.3441423791478109, 1.0108210366991226, -0.8400656201548946]
# ---------------------------------------------
# Fidelity at Step 117: 0.9999695765369236
# Parameters at Step 117: [0.37551193859655563, 0.30942234244863465, 1.3250715263661343, 1.632541596793348, -0.053322135969472426, -0.6956156209933725, -0.9083586665283568, -1.3451943259101402, 1.008852882232944, -0.8448540224550405]
# ---------------------------------------------
# Fidelity at Step 118: 0.9999704499616886
# Parameters at Step 118: [0.3809867404554024, 0.30431566381658504, 1.3237609869734193, 1.6347345280858476, -0.05357966866203419, -0.6980657635836122, -0.9190687419076997, -1.3458179041827725, 1.0068498778842818, -0.8504909559059793]
# ---------------------------------------------
# Fidelity at Step 119: 0.9999755406629498
# Parameters at Step 119: [0.38704494386526944, 0.2987017809134776, 1.322100112216846, 1.6368192018235117, -0.054239309681938824, -0.7001794349389332, -0.9290719847797951, -1.3461376589728842, 1.0048849088674854, -0.8565799861007863]
# ---------------------------------------------
# Fidelity at Step 120: 0.9999789011267138
# Parameters at Step 120: [0.39331096352645406, 0.29294291177961673, 1.320681871596173, 1.6389160289170315, -0.054877407594790976, -0.7023326927126207, -0.938493381656362, -1.3463437842275554, 1.0030652486348512, -0.8625479083263631]
# ---------------------------------------------
# Fidelity at Step 121: 0.9999805974747978
# Parameters at Step 121: [0.3994724184276103, 0.28723095564053364, 1.3192844618119195, 1.6410257256243213, -0.055214914824290494, -0.7046612022543123, -0.9475386902958111, -1.3465682713464002, 1.001459711273059, -0.868078671789324]
# ---------------------------------------------
# Fidelity at Step 122: 0.9999776972201694
# Parameters at Step 122: [0.4053697020523372, 0.2817518952184357, 1.3183268871909553, 1.643233147188186, -0.05510610404915501, -0.7073342903646016, -0.9563234811807184, -1.3468978140778394, 1.0001022397220145, -0.872971845704736]
# ---------------------------------------------
# Fidelity at Step 123: 0.9999730869531723
# Parameters at Step 123: [0.41099360416098013, 0.27661557993975416, 1.3183903011479048, 1.6456545435677146, -0.05453484459865441, -0.710475460425631, -0.9649009871180091, -1.3473801603951079, 0.9990075498368671, -0.8771861687839271]
# ---------------------------------------------
# Fidelity at Step 124: 0.9999757043274963
# Parameters at Step 124: [0.4162510926470969, 0.2717324710945155, 1.3191867431897601, 1.6484648379221867, -0.053046171510678926, -0.7144703154559339, -0.9739679574720589, -1.3482207595415412, 0.9982798148899359, -0.8805092467341217]
# ---------------------------------------------
# Fidelity at Step 125: 0.9999831071267143
# Parameters at Step 125: [0.4212157294059932, 0.2671278073829876, 1.3208485576256803, 1.6515742056714695, -0.051072873534062604, -0.7189571892077739, -0.9831871286985219, -1.3492992985000976, 0.9978150247357753, -0.8832925936026619]
# ---------------------------------------------
# Fidelity at Step 126: 0.9999800502741776
# Parameters at Step 126: [0.4258304803154631, 0.2628015348419753, 1.322576103509064, 1.6547282217328532, -0.04901727259077763, -0.7234002955708773, -0.9922258325738191, -1.3504893545077803, 0.9975241476585869, -0.8858653921420795]
# ---------------------------------------------
# Fidelity at Step 127: 0.999984253640506
# Parameters at Step 127: [0.43014442630061056, 0.25868690873059363, 1.3240528613827867, 1.657857631389491, -0.047087685568584815, -0.7275751147813105, -1.00102221378504, -1.3517377997881197, 0.9973592845901739, -0.8884676881460879]
# ---------------------------------------------
# Fidelity at Step 128: 0.9999815621781017
# Parameters at Step 128: [0.4342474947003735, 0.25468250766471245, 1.3250608082262136, 1.6609254323711848, -0.0454602413512257, -0.7313106186446107, -1.0095491550284497, -1.353004449699596, 0.9972786993423773, -0.8913329456660455]
# ---------------------------------------------
# Fidelity at Step 129: 0.9999749203279624
# Parameters at Step 129: [0.43828868033187524, 0.2507339797713747, 1.3259657360952608, 1.6639692251407665, -0.04423732639627555, -0.7346198312241684, -1.017741785522529, -1.3542354623451425, 0.997236371907943, -0.8945655022370658]
# ---------------------------------------------
# Fidelity at Step 130: 0.9999834155837806
# Parameters at Step 130: [0.44249513928258005, 0.24653040306162236, 1.3262773111368178, 1.6670270506989011, -0.04370634050950086, -0.7372511491001004, -1.0258454978354004, -1.3554121508254215, 0.9971815425965311, -0.8987263635407147]
# ---------------------------------------------
# Fidelity at Step 131: 0.9999843047093948
# Parameters at Step 131: [0.44662627031508617, 0.2423288408400945, 1.3261078794234142, 1.6699493390380813, -0.04356251616217691, -0.7393949056552869, -1.0336202982832152, -1.3565045133903642, 0.9971185609488726, -0.9032365078623498]
# ---------------------------------------------
# Fidelity at Step 132: 0.9999805708115902
# Parameters at Step 132: [0.4506341367367542, 0.23819936793492752, 1.3256677578018876, 1.6727233225737634, -0.043638344726336875, -0.7412146163040286, -1.0410454472963724, -1.3575100642862223, 0.9970466843308186, -0.9078502956878469]
# ---------------------------------------------
# Fidelity at Step 133: 0.9999848139657832
# Parameters at Step 133: [0.4545912429808756, 0.23404123760127696, 1.324996072175742, 1.6753963110904075, -0.043834865568968186, -0.7428366929831622, -1.048332836540861, -1.3584348303868834, 0.9969536660917491, -0.9125615675357505]
# ---------------------------------------------
# Fidelity at Step 134: 0.9999825718980218
# Parameters at Step 134: [0.4584279371592802, 0.2300414525837428, 1.3247252928222295, 1.677991163691852, -0.043977558780503384, -0.7444909542643103, -1.0553028345072406, -1.3592844893711566, 0.9968544812772046, -0.9170150826462645]
# ---------------------------------------------
# Fidelity at Step 135: 0.9999838951828618
# Parameters at Step 135: [0.4621382229469667, 0.22606720373351205, 1.3244691931108554, 1.6804903156912678, -0.04388265097535359, -0.7462825168234908, -1.062277085779582, -1.360073745565299, 0.9967386980878402, -0.9211551550856614]
# ---------------------------------------------
# Fidelity at Step 136: 0.9999824784588653
# Parameters at Step 136: [0.46573094404052606, 0.22218648245433192, 1.3246036871228646, 1.6829350170970572, -0.043495549937033674, -0.7483250061268732, -1.0691916011324232, -1.3608082940377562, 0.9966148553885258, -0.924864944920965]
# ---------------------------------------------
# Fidelity at Step 137: 0.9999844204058552
# Parameters at Step 137: [0.4691665762480434, 0.21829418758173436, 1.3246903022951173, 1.6852722136598322, -0.04263885474266841, -0.7506944700595976, -1.0763112061299038, -1.3614873941276366, 0.9964682866850386, -0.928060163579988]
# ---------------------------------------------
# Fidelity at Step 138: 0.999981074378395
# Parameters at Step 138: [0.4724835716645587, 0.2145049377725422, 1.3251259128289934, 1.6875346505803719, -0.041523943498307715, -0.7532784602034222, -1.0833319831342216, -1.3620989339182574, 0.9963078754896275, -0.9308430754925966]
# ---------------------------------------------
# Fidelity at Step 139: 0.9999795341590785
# Parameters at Step 139: [0.47570079563668827, 0.2106587381633647, 1.3254196683382355, 1.6896652488659856, -0.04007702689921555, -0.7560711026919471, -1.0905213768357342, -1.362611511128764, 0.9961036723792209, -0.9332696152915033]
# ---------------------------------------------
# Fidelity at Step 140: 0.9999889992016171
# Parameters at Step 140: [0.47898375000729126, 0.2067913373020297, 1.3262842079716255, 1.6917932283805504, -0.03851562036395674, -0.7590541683418879, -1.097685648625456, -1.3630140081676458, 0.9958597307579654, -0.935521013127338]
# ---------------------------------------------
# Fidelity at Step 141: 0.999987152994515
# Parameters at Step 141: [0.4820979864855765, 0.2030562123457122, 1.3268750998011958, 1.6937422986913524, -0.03705166232259487, -0.7618206345540147, -1.1045286788348445, -1.3633311333858664, 0.9956043895794627, -0.9377007330127332]
# ---------------------------------------------
# Fidelity at Step 142: 0.9999837616045216
# Parameters at Step 142: [0.4852152795745083, 0.19933276937130542, 1.3274710255573734, 1.6956023299086598, -0.03577461253637882, -0.7643978601280781, -1.1111527147563243, -1.363538821944383, 0.9953193029481325, -0.9399955381588703]
# ---------------------------------------------
# Fidelity at Step 143: 0.9999814459257635
# Parameters at Step 143: [0.48839916484759044, 0.19550491221578772, 1.3277946418636415, 1.6973704451352887, -0.034798868893791066, -0.7666648753845143, -1.1176717543209973, -1.3636173732484602, 0.9949934317787731, -0.9426268272160425]
# ---------------------------------------------
# Fidelity at Step 144: 0.9999850374815137
# Parameters at Step 144: [0.49165454993302277, 0.19157234440392285, 1.3278888659113894, 1.6990801687879322, -0.03415337895580718, -0.7686115048888433, -1.1241119264423507, -1.3635854280358073, 0.994637952532866, -0.9456455164014339]
# ---------------------------------------------
# Fidelity at Step 145: 0.9999861831973936
# Parameters at Step 145: [0.4948516637510453, 0.18771062041704265, 1.3279261867569543, 1.700750702088882, -0.03371833064936751, -0.7703351317991944, -1.130353693041737, -1.3635155517339068, 0.9943020382966017, -0.9488152011860097]
# ---------------------------------------------
# Fidelity at Step 146: 0.9999849895773791
# Parameters at Step 146: [0.4979179564776596, 0.1839432903772568, 1.3277729869065453, 1.7023684683632287, -0.033421590116644154, -0.7718574676594108, -1.1364400887029087, -1.3634364571146622, 0.9939995210390251, -0.9520625040412555]
# ---------------------------------------------
# Fidelity at Step 147: 0.9999867654127862
# Parameters at Step 147: [0.5008672133519527, 0.18029084997712289, 1.3277142937941975, 1.704005832696915, -0.03317473744913395, -0.7733145786739568, -1.1424194449958749, -1.3633827960967202, 0.9937519907295783, -0.955299499225928]
# ---------------------------------------------
# Fidelity at Step 148: 0.9999887913045802
# Parameters at Step 148: [0.503651682297394, 0.17679445321469484, 1.3277508023942615, 1.7056658715580102, -0.032895307435838114, -0.774767538847392, -1.1482963708561775, -1.3633806485383175, 0.9935767702098116, -0.958419192994982]
# ---------------------------------------------
# Fidelity at Step 149: 0.999985035956237
# Parameters at Step 149: [0.506274433952535, 0.1734509832221617, 1.327941771421969, 1.7073681258447921, -0.032527136133335424, -0.7762749830110723, -1.154080538510611, -1.3634484542330767, 0.9934845033090591, -0.9613654595093701]
# ---------------------------------------------
# Fidelity at Step 150: 0.9999878877045175
# Parameters at Step 150: [0.508749462189302, 0.17018243939953917, 1.328248119010844, 1.7091637132690014, -0.03196923377526107, -0.7779311273700311, -1.1599394135997274, -1.363615635769636, 0.9934987056822612, -0.9641197343895163]
# ---------------------------------------------
# Fidelity at Step 151: 0.9999872199533634
# Parameters at Step 151: [0.5110306887058557, 0.16704257626956867, 1.328412707125246, 1.7109354590868575, -0.03128792053631156, -0.7796047782485971, -1.1656706186958778, -1.3638443013173356, 0.9935892840802129, -0.9666568943461408]
# ---------------------------------------------
# Fidelity at Step 152: 0.9999846672973843
# Parameters at Step 152: [0.5132716374928659, 0.16400649350758884, 1.3290566977033718, 1.7127763720480835, -0.030506251567427905, -0.7814239682976281, -1.171216767607195, -1.3641084371106356, 0.9937345066138964, -0.9689872541512644]
# ---------------------------------------------
# Fidelity at Step 153: 0.9999867351528788
# Parameters at Step 153: [0.5154380642884647, 0.16101081197847836, 1.329681664784569, 1.7146132461957844, -0.029644046624950387, -0.7832743982470529, -1.1766086526335258, -1.3643966310971727, 0.9939355408878404, -0.9711722947871008]
# ---------------------------------------------
# Fidelity at Step 154: 0.9999862241484375
# Parameters at Step 154: [0.5174946573552897, 0.15803941295449758, 1.3298958677221986, 1.7163569021894192, -0.028766525059498282, -0.7850159870432324, -1.1817991369158976, -1.3646823960544083, 0.9941758238430728, -0.9732732065211951]
# ---------------------------------------------
# Fidelity at Step 155: 0.9999830014109053
# Parameters at Step 155: [0.5195040670339732, 0.15504745752530472, 1.329762082040617, 1.7180207634581337, -0.02790811151081738, -0.7866483216268434, -1.1867998664498791, -1.3649424237865677, 0.9944472518870449, -0.9753472278100357]
# ---------------------------------------------
# Fidelity at Step 156: 0.9999864511000253
# Parameters at Step 156: [0.521716676233046, 0.15176583730252977, 1.32957639767777, 1.719723619205503, -0.02705181275335927, -0.7883125895766997, -1.1918166235153695, -1.3651207890826964, 0.9947525014995762, -0.977549037634112]
# ---------------------------------------------
# Fidelity at Step 157: 0.9999826513650695
# Parameters at Step 157: [0.5239964399597866, 0.14844427349129943, 1.329535680851092, 1.7214165735621738, -0.026241741176397944, -0.7899683259992069, -1.1967061184083099, -1.365230019906992, 0.995068887255867, -0.979764123843502]
# ---------------------------------------------
# Fidelity at Step 158: 0.9999882498026169
# Parameters at Step 158: [0.5262861536543998, 0.14504362765356396, 1.3292303431819674, 1.7230464001713983, -0.025476035965192906, -0.7915423527347953, -1.2016220671627142, -1.365248690384345, 0.9954066878505755, -0.982048867852817]
# ---------------------------------------------
# Fidelity at Step 159: 0.9999857804291704
# Parameters at Step 159: [0.5285783161964774, 0.14174543081676313, 1.3293491757849916, 1.7246782438510426, -0.02475599728313822, -0.7931466879198291, -1.206408566689147, -1.3652104189458465, 0.995741455288656, -0.9842759850445346]
# ---------------------------------------------
# Fidelity at Step 160: 0.9999873512494414
# Parameters at Step 160: [0.5307594882904066, 0.13849537736632228, 1.3291680039841776, 1.7262214114795207, -0.024073128205469314, -0.7946463806378471, -1.2112733331631036, -1.3651025135799986, 0.9960957414433877, -0.9865214428462055]
# ---------------------------------------------
# Fidelity at Step 161: 0.9999886461524623
# Parameters at Step 161: [0.5328458040346352, 0.13532214379578952, 1.328966681660508, 1.7277096320780783, -0.023409888019261096, -0.7961063813366783, -1.2162034351667637, -1.364932886473924, 0.996457568976065, -0.9887460480457212]
# ---------------------------------------------
# Fidelity at Step 162: 0.9999902032474028
# Parameters at Step 162: [0.5348340653861403, 0.13225505344324726, 1.328847208373762, 1.7291647442219011, -0.02275233771033779, -0.7975569869396825, -1.2211816293722073, -1.3647211936191672, 0.9968308645104235, -0.9909191256609178]
# ---------------------------------------------
# Fidelity at Step 163: 0.9999889132157431
# Parameters at Step 163: [0.5366843777290922, 0.12933385256430174, 1.3286998070140525, 1.7305499865191734, -0.022101471015799637, -0.7989569513789231, -1.2260919913498218, -1.3644891300526791, 0.9972069997644573, -0.9929949238177518]
# ---------------------------------------------
# Fidelity at Step 164: 0.999989502893319
# Parameters at Step 164: [0.5385235144431655, 0.12657531222828117, 1.3291695036954272, 1.731969724717606, -0.021470826093723947, -0.8004348306344797, -1.2308069913603101, -1.3642503959184866, 0.9975698806550973, -0.9949470347889933]
# ---------------------------------------------
# Fidelity at Step 165: 0.9999843597471785
# Parameters at Step 165: [0.5402555117761144, 0.12386269780869166, 1.3294479006466213, 1.7333139145782528, -0.020846006937108762, -0.801838413403773, -1.2355098657138137, -1.3639852050723429, 0.9979320419695976, -0.9968610486132228]
# ---------------------------------------------
# Fidelity at Step 166: 0.9999892815397706
# Parameters at Step 166: [0.5419763943442601, 0.12107381862368885, 1.3296300899917965, 1.7346385301625344, -0.020234793790248003, -0.8032058316245257, -1.2402922602982387, -1.3636810725471509, 0.9982968131525182, -0.9988249045527734]
# ---------------------------------------------
# Fidelity at Step 167: 0.9999868476099629
# Parameters at Step 167: [0.5436811281664722, 0.118345847639199, 1.3299671848217822, 1.7359542632234002, -0.01966693203088115, -0.8045450432272869, -1.2448980877577691, -1.3633785403963068, 0.9986538360575402, -1.0007502576104403]
# ---------------------------------------------
# Fidelity at Step 168: 0.999988211891881
# Parameters at Step 168: [0.5453907419233148, 0.11562234491250503, 1.3303961942731146, 1.737252260673979, -0.019144809206808718, -0.8058398287593344, -1.249321291250448, -1.3630740644029284, 0.9989949914004753, -1.002662997387518]
# ---------------------------------------------
# Fidelity at Step 169: 0.9999928232199977
# Parameters at Step 169: [0.5470298089619259, 0.11294845433054324, 1.3306209946783156, 1.7384770651864752, -0.018675792225203727, -0.8070111888957845, -1.2535154412184717, -1.3627845973676904, 0.9993249606501416, -1.0045453852920951]
# ---------------------------------------------
# Fidelity at Step 170: 0.9999887321883951
# Parameters at Step 170: [0.5486670595875374, 0.11037292141096658, 1.3311378474407562, 1.739711282598617, -0.01825573027893023, -0.8081728492461591, -1.2574421460778868, -1.3625190854901312, 0.9996410514327894, -1.0063623030994178]
# ---------------------------------------------
# Fidelity at Step 171: 0.9999920762371789
# Parameters at Step 171: [0.5502349300327534, 0.10779993295101048, 1.3313410830723522, 1.7408785660065607, -0.01787842457962252, -0.8092012976753657, -1.26118384893814, -1.362280184910283, 0.9999541327857622, -1.008177363562292]
# ---------------------------------------------
# Fidelity at Step 172: 0.9999868204329156
# Parameters at Step 172: [0.5518084300106664, 0.10527005598833068, 1.3317547409661767, 1.7420676848269074, -0.017533696191392507, -0.8102232480918031, -1.2647257065399566, -1.3620720984869334, 1.0002613731708783, -1.0099566960612834]
# ---------------------------------------------
# Fidelity at Step 173: 0.9999913559311914
# Parameters at Step 173: [0.5533847676018934, 0.1026629991782988, 1.3321004972759545, 1.7432814900492706, -0.01719997151269994, -0.811207265200446, -1.2681875031077672, -1.3619056071299658, 1.0005797750452579, -1.011770234432686]
# ---------------------------------------------
# Fidelity at Step 174: 0.9999913214040246
# Parameters at Step 174: [0.5548821612889252, 0.10011060185804076, 1.3323360404668545, 1.7444747043954105, -0.01687189563828634, -0.8121357317212428, -1.2715295445300454, -1.3617802772055234, 1.000907299920358, -1.0135406958891202]
# ---------------------------------------------
# Fidelity at Step 175: 0.9999860300741775
# Parameters at Step 175: [0.5563210452484355, 0.0976058452896269, 1.3326247104410003, 1.7456829155834255, -0.01652412298005788, -0.8130683214357861, -1.2748043515034768, -1.3616955370247923, 1.0012482944684757, -1.0152569520851642]
# ---------------------------------------------
# Fidelity at Step 176: 0.9999869130137046
# Parameters at Step 176: [0.5576730167381697, 0.09506111198725749, 1.332764702975683, 1.7469248277458853, -0.016100468936757248, -0.8140193217285754, -1.2782028243333425, -1.361661260556583, 1.0016330592746054, -1.0169543314815557]
# ---------------------------------------------
# Fidelity at Step 177: 0.9999877217147968
# Parameters at Step 177: [0.5589274852929289, 0.09258159463488037, 1.3328613000011675, 1.7481529641823588, -0.015629042269218567, -0.8149771282707365, -1.2816037761035932, -1.361648318523111, 1.0020367177663734, -1.0185712748653197]
# ---------------------------------------------
# Fidelity at Step 178: 0.9999872658409166
# Parameters at Step 178: [0.5602276069540725, 0.09013573257411094, 1.3335546522621782, 1.7494895597083502, -0.015105424148538587, -0.8161053194032152, -1.2850558032987738, -1.3616335521110063, 1.0024591565155927, -1.0201225639762055]
# ---------------------------------------------
# Fidelity at Step 179: 0.9999875682031998
# Parameters at Step 179: [0.5614533265372388, 0.08765171564005021, 1.333926828438151, 1.750790180328323, -0.01456972519685228, -0.8171790361138784, -1.2886525881175384, -1.361586512781489, 1.0029150679738548, -1.0217108790652585]
# ---------------------------------------------
# Fidelity at Step 180: 0.9999901610429807
# Parameters at Step 180: [0.5626325362460528, 0.08529226687868985, 1.334338451916551, 1.75203049108332, -0.01408242264275763, -0.8181988672647009, -1.2920751971204063, -1.3615175757134808, 1.0033517777316945, -1.0232391229953068]
# ---------------------------------------------
# Fidelity at Step 181: 0.9999924934552326
# Parameters at Step 181: [0.5637724360868879, 0.08301598861453113, 1.3346625456129624, 1.7531977142841724, -0.013653694695672566, -0.8191386884772068, -1.2953676293760934, -1.3614089750352865, 1.0037750754379051, -1.0247469653569756]
# ---------------------------------------------
# Fidelity at Step 182: 0.9999896337416619
# Parameters at Step 182: [0.5648823856161473, 0.08081632764422314, 1.3349008987446196, 1.7542874802301704, -0.013285444609570781, -0.8199977576502516, -1.298518194796666, -1.3612559517700367, 1.0041812470182039, -1.02623464086704]
# ---------------------------------------------
# Fidelity at Step 183: 0.9999886552526498
# Parameters at Step 183: [0.5659564124151729, 0.07863183162668098, 1.3348109422735874, 1.7552690439422955, -0.012978665788255614, -0.8207337329914272, -1.3015909474075096, -1.361033993599378, 1.004578488805329, -1.0277434686827096]
# ---------------------------------------------
# Fidelity at Step 184: 0.9999896963669672
# Parameters at Step 184: [0.5670407395555168, 0.0764062248474207, 1.3345306186077732, 1.7561739406827335, -0.012682726906812027, -0.8214300293705896, -1.304667668449513, -1.3607179965360254, 1.0049775004301271, -1.0292546807793863]
# ---------------------------------------------
# Fidelity at Step 185: 0.9999892555185942
# Parameters at Step 185: [0.5681657453963701, 0.0742498846666968, 1.3346425503959116, 1.7570930899011632, -0.012365431398008667, -0.822222072957483, -1.3076478743542113, -1.3603657170326973, 1.0053656310008658, -1.030665191966978]
# ---------------------------------------------
# Fidelity at Step 186: 0.9999873008804145
# Parameters at Step 186: [0.5692469331809209, 0.07214852738005786, 1.334726456522285, 1.7579694445496343, -0.01199912569255848, -0.8230416451427469, -1.310601063990293, -1.359981830405764, 1.005761025868443, -1.031971760971129]
# ---------------------------------------------
# Fidelity at Step 187: 0.9999902918781893
# Parameters at Step 187: [0.5703082261411033, 0.0700651796476419, 1.334896749157116, 1.7588420377632485, -0.011550108087819182, -0.8239479011347973, -1.3136022649324255, -1.3595645721844367, 1.006177612859388, -1.0331729024355931]
# ---------------------------------------------
# Fidelity at Step 188: 0.9999885080739047
# Parameters at Step 188: [0.5713983164659022, 0.06808727518476335, 1.3355403183830272, 1.7597628961459177, -0.011100614964728651, -0.8249430455093314, -1.3164774463852682, -1.35915795659551, 1.0065828112639668, -1.0342885329375568]
# ---------------------------------------------
# Fidelity at Step 189: 0.9999862800292353
# Parameters at Step 189: [0.572434804434997, 0.06612847682904653, 1.3359856559587966, 1.7606464812958231, -0.010677275161958846, -0.825864146318007, -1.3193148628423925, -1.3587522835441175, 1.0069937130870932, -1.0354150953922756]
# ---------------------------------------------
# Fidelity at Step 190: 0.999986941039955
# Parameters at Step 190: [0.5734157712423219, 0.0642115062042154, 1.3362412133150423, 1.7614910453000487, -0.01031387695515812, -0.8266793210934649, -1.3220600557304993, -1.358363678558335, 1.0074003729498613, -1.0365720553497828]
# ---------------------------------------------
# Fidelity at Step 191: 0.9999890579779093
# Parameters at Step 191: [0.5742763144740772, 0.062338167532080394, 1.3359942272632255, 1.762246736010568, -0.01000955999141036, -0.8273153806758138, -1.3247240659672528, -1.3580029613090359, 1.0078091831397777, -1.037765435378209]
# ---------------------------------------------
# Fidelity at Step 192: 0.9999798614554044
# Parameters at Step 192: [0.5750348275406495, 0.06050912454155909, 1.3354045794993685, 1.7629382965909832, -0.009732835043158979, -0.8278312897232609, -1.3273137762418075, -1.3576734333506415, 1.0082177525611982, -1.0389580051847715]
# ---------------------------------------------
# Fidelity at Step 193: 0.9999867211457386
# Parameters at Step 193: [0.575541937870243, 0.058638268388376595, 1.333829424344813, 1.7635015625270305, -0.00933054933450848, -0.8282103517661018, -1.330068048247978, -1.3573865132373175, 1.0086873691308977, -1.040078785952071]
# ---------------------------------------------
# Fidelity at Step 194: 0.9999885173075377
# Parameters at Step 194: [0.5759330636780624, 0.05686170692563526, 1.332260716345552, 1.7640508985116936, -0.008758432038375216, -0.8287101056883912, -1.3328695949297107, -1.3571284961271388, 1.0091873394591213, -1.0409664786968056]
# ---------------------------------------------
# Fidelity at Step 195: 0.9999914414076407
# Parameters at Step 195: [0.5761698087898832, 0.05520787437158809, 1.3306213537620428, 1.764541391429165, -0.007951988821247653, -0.8293609243539548, -1.3357305590793547, -1.3568807266091802, 1.0097224300509535, -1.041523503066072]
# ---------------------------------------------
# Fidelity at Step 196: 0.9999909358323312
# Parameters at Step 196: [0.5764460927363217, 0.05365713280830934, 1.3297462583876818, 1.7650963298974642, -0.006946505580886105, -0.8303293111980891, -1.3385944472376565, -1.356607494314803, 1.0102670663121784, -1.041766147095695]
# ---------------------------------------------
# Fidelity at Step 197: 0.9999906039213291
# Parameters at Step 197: [0.5767220976396684, 0.05221967959575094, 1.3291124851999803, 1.7656255362200648, -0.005933869704823914, -0.8313403443762636, -1.341316897342956, -1.3563194698910808, 1.0107906440580432, -1.0418902860638228]
# ---------------------------------------------
# Fidelity at Step 198: 0.999989638431839
# Parameters at Step 198: [0.5770661079103651, 0.05082746036319119, 1.3287969456999924, 1.7661289073141504, -0.004936116431480546, -0.8324038971128354, -1.343923477449557, -1.355964787344698, 1.0112947775188423, -1.0419333188145385]
# ---------------------------------------------
# Fidelity at Step 199: 0.999991606617489
# Parameters at Step 199: [0.5775094191382846, 0.04945618421796187, 1.328710317887879, 1.7666051097500668, -0.004059246822290077, -0.8334229380088882, -1.3463756706953927, -1.355537991382031, 1.0117698517998235, -1.042021085504792]
# ---------------------------------------------
# Fidelity at Step 200: 0.9999887382906469
# Parameters at Step 200: [0.578030459572647, 0.048039145898378995, 1.328377894191988, 1.7669658843418832, -0.0034115315624715353, -0.8342053910707248, -1.3486503101922291, -1.3550075140083484, 1.0122104791186208, -1.0422898758521553]
# ---------------------------------------------
# Fidelity at Step 201: 0.9999925504526961
# Parameters at Step 201: [0.5786045080319537, 0.046515376553358335, 1.3274020566780673, 1.7671434184496464, -0.0030863047854294773, -0.8345862731337464, -1.350754920349443, -1.3543502995139975, 1.012623699363371, -1.0428631724087638]
# ---------------------------------------------
# Fidelity at Step 202: 0.999987255051033
# Parameters at Step 202: [0.5792017647238181, 0.04507292120345454, 1.3266199087241262, 1.7673322669051341, -0.002871042638470988, -0.8349088800492177, -1.3527100967128156, -1.3537096189628486, 1.013010919594112, -1.043485932366876]
# ---------------------------------------------
# Fidelity at Step 203: 0.9999888907573472
# Parameters at Step 203: [0.5797387075603089, 0.04364080969232375, 1.325521237782578, 1.7674533054630355, -0.0027846252235358806, -0.8350404370721993, -1.3546345976760379, -1.3530614356430213, 1.0134144707666521, -1.044209093510324]
# ---------------------------------------------
# Fidelity at Step 204: 0.9999881322007121
# Parameters at Step 204: [0.5802396206810376, 0.0422928753246885, 1.324706835881735, 1.7676251500038638, -0.0026872600106955867, -0.8352189681322226, -1.3565439873059322, -1.3524655967666366, 1.0138380231743556, -1.0448691491124078]
# ---------------------------------------------
# Fidelity at Step 205: 0.9999894671754493
# Parameters at Step 205: [0.5806914874725001, 0.04104581140421217, 1.3244634196493874, 1.7679304385405523, -0.0024719827456277303, -0.8355911829340839, -1.358539534277575, -1.3519640678407858, 1.0143193814162696, -1.0453599992552314]
# ---------------------------------------------
# Fidelity at Step 206: 0.9999887550530641
# Parameters at Step 206: [0.58112054461415, 0.03988085878168208, 1.324829539004447, 1.768372668192599, -0.0021429946527134463, -0.8361566271138334, -1.3605549445614127, -1.3515688560968233, 1.0148434248146052, -1.0456880730062577]
# ---------------------------------------------
# Fidelity at Step 207: 0.9999886259446119
# Parameters at Step 207: [0.5814592119397126, 0.038760976413281766, 1.3251588665269407, 1.7688472494478302, -0.0017760094832740304, -0.8367119264841479, -1.3625314031351388, -1.3512812908547716, 1.0154026011795458, -1.0459490260414839]
# ---------------------------------------------
# Fidelity at Step 208: 0.9999881139367572
# Parameters at Step 208: [0.5818449253018668, 0.03765135420895377, 1.325802181057237, 1.7694035248856217, -0.0014506017523894395, -0.837284168467825, -1.36437529011688, -1.351070052402854, 1.0159605514890004, -1.0462281768586885]
# ---------------------------------------------
# Fidelity at Step 209: 0.999987619645847
# Parameters at Step 209: [0.5821125428709006, 0.036466933319086334, 1.325516216879589, 1.7698410345525648, -0.0012633434400915718, -0.8375101782469347, -1.3660476637189252, -1.350942916995247, 1.0165309547632595, -1.046658068686272]
# ---------------------------------------------
# Fidelity at Step 210: 0.99999025342367
# Parameters at Step 210: [0.582340225734264, 0.03517995166803719, 1.3246358185219782, 1.7702078978309685, -0.0011893135878432212, -0.8374859194741336, -1.3675400627278114, -1.3508856113579193, 1.0171132484957168, -1.0472076010069815]
# ---------------------------------------------
# Fidelity at Step 211: 0.9999884765567478
# Parameters at Step 211: [0.5827442110195769, 0.03389662460962347, 1.3246663160569445, 1.7707286210834383, -0.001116984343657353, -0.8376557949861562, -1.3689037468637544, -1.3508563250802832, 1.0176748261746098, -1.0477187392767007]
# ---------------------------------------------
# Fidelity at Step 212: 0.9999904561856239
# Parameters at Step 212: [0.5832025352877948, 0.032631330812688694, 1.3250559757662133, 1.771294272313985, -0.0010245966853708556, -0.8379075762354566, -1.3701751164810776, -1.3508489587041526, 1.0182246556232561, -1.0481672409301965]
# ---------------------------------------------
# Fidelity at Step 213: 0.9999901858354218
# Parameters at Step 213: [0.5836605704891774, 0.031376665348989655, 1.3255095674442012, 1.7718537512835941, -0.0009259331585511649, -0.8381631597635073, -1.3713892089714161, -1.3508567602125896, 1.0187724587122158, -1.0485744541707018]
# ---------------------------------------------
# Fidelity at Step 214: 0.999988435759673
# Parameters at Step 214: [0.5841644644043431, 0.0301670059306356, 1.326257830920721, 1.7724447901025937, -0.0008609609656053234, -0.8384547421229761, -1.3725448268533085, -1.3508618728232376, 1.0192939089925728, -1.0489831592110408]
# ---------------------------------------------
# Fidelity at Step 215: 0.9999895762160621
# Parameters at Step 215: [0.5846071363430934, 0.028952547931390647, 1.3266199248264239, 1.7729538086743766, -0.0008886357076636716, -0.8385738392256091, -1.3736919164157642, -1.3508571862340193, 1.019813802841107, -1.049476081419914]
# ---------------------------------------------
# Fidelity at Step 216: 0.9999865439963951
# Parameters at Step 216: [0.5850557559413209, 0.027766459985979788, 1.327061556266125, 1.7734498368139349, -0.0009720306347003305, -0.8386553607704775, -1.3748193624390446, -1.3508389982062428, 1.020319208258268, -1.0500056998757614]
# ---------------------------------------------
# Fidelity at Step 217: 0.9999885827559089
# Parameters at Step 217: [0.5854117281207973, 0.026620914277741707, 1.327155156248737, 1.7738595553203997, -0.0010939920041759354, -0.8386143736767395, -1.3759722960439218, -1.3508104401024605, 1.0208262718879615, -1.050558252967573]
# ---------------------------------------------
# Fidelity at Step 218: 0.999988349649449
# Parameters at Step 218: [0.5858236667875478, 0.025519249618372042, 1.3277427224692333, 1.7743297264414288, -0.0011998850761463797, -0.8386915295261786, -1.3771535054686013, -1.350765659221492, 1.0213253207632875, -1.051067274653289]
# ---------------------------------------------
# Fidelity at Step 219: 0.9999927873362311
# Parameters at Step 219: [0.5862885686180744, 0.02446858506824528, 1.328768317162256, 1.7748567588791937, -0.001287037408261083, -0.8388806807238663, -1.3783355403979574, -1.3507124604760232, 1.0218139081740487, -1.051532499293655]
# ---------------------------------------------
# Fidelity at Step 220: 0.9999904472208753
# Parameters at Step 220: [0.5868062589518784, 0.023450048412156033, 1.330064725519595, 1.7754166728690355, -0.001396332986240341, -0.8391133113320853, -1.3794656098399214, -1.3506545039230118, 1.0222811084089933, -1.0520065794444358]
# ---------------------------------------------
# Fidelity at Step 221: 0.999989842546006
# Parameters at Step 221: [0.5872844947166628, 0.022425904775236594, 1.3309321801533682, 1.7758978858197463, -0.0015924064284546574, -0.8391785376958127, -1.3805132302165977, -1.3505976173625995, 1.0227283344789568, -1.0525785601205644]
# ---------------------------------------------
# Fidelity at Step 222: 0.9999870275056041
# Parameters at Step 222: [0.5877537904766912, 0.021392833563420723, 1.3315298501957273, 1.7763328917515049, -0.0018538439441622395, -0.8391290190289682, -1.3814592840869555, -1.3505514496180628, 1.0231558695945016, -1.0532272656902077]
# ---------------------------------------------
# Fidelity at Step 223: 0.9999884661453955
# Parameters at Step 223: [0.5884048263614704, 0.020321388108284218, 1.332843939475398, 1.7769053981589935, -0.0021503852953904654, -0.8392136781452616, -1.3822951490721656, -1.3505225289686806, 1.0235581477181113, -1.0539266248019814]
# ---------------------------------------------
# Fidelity at Step 224: 0.9999884532070813
# Parameters at Step 224: [0.5889900674029886, 0.01924770428584472, 1.3336934487269372, 1.7774144968686065, -0.002489047082506203, -0.8391561943730307, -1.3830499513386436, -1.3505289113292414, 1.0239430903605118, -1.0546918015277298]
# ---------------------------------------------
# Fidelity at Step 225: 0.9999898475733888
# Parameters at Step 225: [0.5895506048504998, 0.018194378508048982, 1.3344672926785306, 1.7779352447027705, -0.0028199249136769576, -0.8390843005297155, -1.3837557963187326, -1.3505831368072339, 1.024316936801018, -1.0554693272819546]
# ---------------------------------------------
# Fidelity at Step 226: 0.9999907428905219
# Parameters at Step 226: [0.5901977370756432, 0.01720730735481824, 1.3358847432853092, 1.7785804675311812, -0.0031111741874706956, -0.839195479627143, -1.3844284679651042, -1.3506652385280777, 1.0246693620289369, -1.0561995176813221]
# ---------------------------------------------
# Fidelity at Step 227: 0.9999906102497097
# Parameters at Step 227: [0.590844867359659, 0.01627999061901328, 1.3374478668238698, 1.7792678723385187, -0.0033879803093705956, -0.8393568484949371, -1.385075003542551, -1.350777094926472, 1.0249992681104725, -1.0569197423061372]
# ---------------------------------------------
# Fidelity at Step 228: 0.9999910334499799
# Parameters at Step 228: [0.591543823070571, 0.015365295171680707, 1.339289895838081, 1.7800321846247285, -0.00368734918107617, -0.8395639503160266, -1.3856726658278233, -1.3509285028095877, 1.0252961632984325, -1.057697135594466]
# ---------------------------------------------
# Fidelity at Step 229: 0.999988409038041
# Parameters at Step 229: [0.5922323634726342, 0.01449344407434168, 1.341072365846745, 1.7807940042253934, -0.004020935168399662, -0.8397365382906985, -1.386217667855764, -1.351093930735195, 1.0255610966677864, -1.058518138916406]
# ---------------------------------------------
# Fidelity at Step 230: 0.9999901039286563
# Parameters at Step 230: [0.5928132620427989, 0.013598646052713477, 1.3420267384272355, 1.7814540200052944, -0.004481787594141734, -0.8396210033567889, -1.3866913758321575, -1.3513050735492125, 1.0257866556443531, -1.059548334044546]
# ---------------------------------------------
# Fidelity at Step 231: 0.9999915196733226
# Parameters at Step 231: [0.5932533362477989, 0.012721532992385855, 1.3421873232196984, 1.782005536688833, -0.004981545274549092, -0.8393000221813757, -1.3871439407418964, -1.3515519569762011, 1.0259807639210963, -1.0606729599407296]
# ---------------------------------------------
# Fidelity at Step 232: 0.9999884483776887
# Parameters at Step 232: [0.5936264913568032, 0.011918917506283057, 1.3422479640294946, 1.7825284524150424, -0.005397205500562751, -0.8390286721940902, -1.387612788995174, -1.3518038286523277, 1.02615340247575, -1.0617009680045686]
# ---------------------------------------------
# Fidelity at Step 233: 0.9999893754195205
# Parameters at Step 233: [0.5939147481832755, 0.011224914908710732, 1.342500451027886, 1.7830627156054486, -0.005544808905161805, -0.8390145839131689, -1.3881919560065117, -1.3520723348736075, 1.026317765175582, -1.0624413935125638]
# ---------------------------------------------
# Fidelity at Step 234: 0.9999869842920284
# Parameters at Step 234: [0.5941250004028764, 0.010646979257375278, 1.343025322096422, 1.783601059347741, -0.0053556782844705235, -0.8393379560831443, -1.3889179381326244, -1.352327019736342, 1.0264676445133947, -1.0628130998079022]
# ---------------------------------------------
# Fidelity at Step 235: 0.9999893059352454
# Parameters at Step 235: [0.5942117178606191, 0.010163933474947767, 1.3432180484802942, 1.784028868450274, -0.004968590479311849, -0.8397604447145343, -1.38973655486462, -1.3525316255797117, 1.026605482439269, -1.0629541374178728]
# ---------------------------------------------
# Fidelity at Step 236: 0.9999896434642385
# Parameters at Step 236: [0.5943484019952318, 0.009694666764780353, 1.3434811896287295, 1.78436741956631, -0.004450894883286583, -0.8403327344086806, -1.390590184555642, -1.352592763454403, 1.0266775758984168, -1.0629430110938438]
# ---------------------------------------------
# Fidelity at Step 237: 0.9999901073736958
# Parameters at Step 237: [0.5945756158662212, 0.009196184216358051, 1.3436384200091407, 1.7845877212992054, -0.003988396553925689, -0.8408677281742165, -1.3913982477537676, -1.3524894743047176, 1.0266746373408528, -1.0629792281037513]
# ---------------------------------------------
# Fidelity at Step 238: 0.9999927691264081
# Parameters at Step 238: [0.594703642656405, 0.008692772398369507, 1.3427089405740413, 1.784543553512466, -0.0036730206376294873, -0.8410659516918019, -1.3921709162345033, -1.3522501730040704, 1.0266266948791514, -1.0631680015636504]
# ---------------------------------------------
# Fidelity at Step 239: 0.9999917315733722
# Parameters at Step 239: [0.5948067580090348, 0.008218803138169877, 1.3414399845389282, 1.7843878034662675, -0.0034434592965103803, -0.8411340592696075, -1.3929314198469678, -1.3519230900200534, 1.0265522515142744, -1.0634263593280144]
# ---------------------------------------------
# Fidelity at Step 240: 0.9999905424814088
# Parameters at Step 240: [0.5949800696134573, 0.007805234926347038, 1.3406572208595793, 1.784297829954337, -0.003224236022222644, -0.8413117231071137, -1.3936942408910464, -1.3515670053333104, 1.0264749143994978, -1.063658562179518]
# ---------------------------------------------
# Fidelity at Step 241: 0.9999885826045259
# Parameters at Step 241: [0.5949962815780454, 0.007512344113460753, 1.3394914506452924, 1.7841052004053317, -0.002924095968009788, -0.8414742961671783, -1.3945856033212118, -1.351167893492728, 1.0264217099709094, -1.063775584532843]
# ---------------------------------------------
# Fidelity at Step 242: 0.9999883606888723
# Parameters at Step 242: [0.5948245777087302, 0.0073659552955860266, 1.3380419916583655, 1.7838336882096821, -0.0024508502494072974, -0.8417114284919093, -1.3956439248697783, -1.3507514127294864, 1.026415173035054, -1.063672136033635]
# ---------------------------------------------
# Fidelity at Step 243: 0.9999861511179472
# Parameters at Step 243: [0.5945540945025368, 0.00734998275974998, 1.3368001062312966, 1.783572564000042, -0.0017556868828075458, -0.8421690874043849, -1.3968405142949893, -1.3503339085157124, 1.0264641452937509, -1.0632846088543018]
# ---------------------------------------------
# Fidelity at Step 244: 0.9999911580594937
# Parameters at Step 244: [0.5941391817848185, 0.0074079919451457265, 1.3352179770944113, 1.7832192624843257, -0.0008842166745967229, -0.8426835128816891, -1.3980958687205038, -1.3499101993315223, 1.0265699236460317, -1.0626600448263706]
# ---------------------------------------------
# Fidelity at Step 245: 0.9999898212508886
# Parameters at Step 245: [0.593766426167929, 0.007461086818384863, 1.3337732245594254, 1.782883782463429, -3.520519593994305e-05, -0.843206839122646, -1.399266288825347, -1.3494930655712347, 1.0266859103435684, -1.0620202439793718]
# ---------------------------------------------
# Fidelity at Step 246: 0.9999856379788754
# Parameters at Step 246: [0.5935274087548836, 0.007438726797372935, 1.3325679928963028, 1.782559960006288, 0.0007574560546551006, -0.8437377803837636, -1.4002772006975712, -1.3490425506377353, 1.0268113796108818, -1.061391656073096]
# ---------------------------------------------
# Fidelity at Step 247: 0.9999883239594803
# Parameters at Step 247: [0.5933535967481981, 0.007343159535556303, 1.3311877546850395, 1.7821986098049338, 0.001414719058850634, -0.8441183829929937, -1.4011219020749626, -1.3485820724101096, 1.0269495375873934, -1.0608717265285883]
# ---------------------------------------------
# Fidelity at Step 248: 0.9999900698059424
# Parameters at Step 248: [0.5934131491707247, 0.00715239199806403, 1.330460118591667, 1.7819551746946658, 0.0018896686076640776, -0.8444998490167378, -1.4017840398455415, -1.3481123911230533, 1.027081520629623, -1.0605114227579961]
# ---------------------------------------------
# Fidelity at Step 249: 0.9999885539374775
# Parameters at Step 249: [0.5935062758562192, 0.006879797546502684, 1.3294961145063628, 1.781681104239747, 0.002145344361266706, -0.8446309996057094, -1.4022846754026792, -1.3476699145790654, 1.027214005311259, -1.0603643602112434]
# ---------------------------------------------
# Fidelity at Step 250: 0.9999895300027376
# Parameters at Step 250: [0.5936777625599198, 0.006599806437940118, 1.3289779761618017, 1.7815249122821493, 0.002275726765265315, -0.8447460865153723, -1.402721063135856, -1.347291975458325, 1.0273548112675543, -1.060335920350216]
# ---------------------------------------------
# Fidelity at Step 251: 0.9999897825367936
# Parameters at Step 251: [0.593862963547294, 0.006332944207955403, 1.3287970691222821, 1.781478031764532, 0.0023094209749266086, -0.8448313009439179, -1.4031274797944795, -1.347011456821376, 1.0275121600687303, -1.0604008336432516]
# ---------------------------------------------
# Fidelity at Step 252: 0.9999896387188143
# Parameters at Step 252: [0.5939058088165182, 0.006097596632468126, 1.3282866949915018, 1.7814292255724553, 0.002266040513625809, -0.8447432600265276, -1.4035341399018701, -1.3468518581022193, 1.0276909463279056, -1.0605455629161815]
# ---------------------------------------------
# Fidelity at Step 253: 0.9999887068802289
# Parameters at Step 253: [0.59387067843434, 0.005906120856296461, 1.32777561050331, 1.781422298638998, 0.0022220968178467077, -0.8446309645301731, -1.4039513992891786, -1.3467865416862939, 1.0278852875637112, -1.060682381012993]
# ---------------------------------------------
# Fidelity at Step 254: 0.9999912082647311
# Parameters at Step 254: [0.5937380705822191, 0.005765497119177365, 1.3275153758191351, 1.7815164473039649, 0.0022604555415742266, -0.8445948794870505, -1.4044082361672299, -1.3468767773236676, 1.0281166962573705, -1.0607094184704775]
# ---------------------------------------------
# Fidelity at Step 255: 0.9999913261847386
# Parameters at Step 255: [0.5936578158955296, 0.005640217289234351, 1.3278276470580708, 1.7817332040436074, 0.002350305575703594, -0.8447120575987057, -1.4048494338234274, -1.3470353050263304, 1.0283499935773348, -1.0606584350011485]
# ---------------------------------------------
# Fidelity at Step 256: 0.9999884429061434
# Parameters at Step 256: [0.5937387657258045, 0.005466537421689071, 1.3289761481135578, 1.782106391856814, 0.0024479747093570704, -0.8450064258141284, -1.4051979002680197, -1.3472456516461127, 1.0285697676625802, -1.0605701923627995]
# ---------------------------------------------
# Fidelity at Step 257: 0.9999891831757101
# Parameters at Step 257: [0.5937851969295652, 0.005196756021029536, 1.3294245827802798, 1.7823535410337858, 0.0024088866250306104, -0.8450219960253053, -1.4053849083966263, -1.3474686685044446, 1.0287637259980984, -1.0606193114296032]
# ---------------------------------------------
# Fidelity at Step 258: 0.9999883857699693
# Parameters at Step 258: [0.5940953819353095, 0.004791092327584531, 1.3305076450871693, 1.7826956874110953, 0.0022010109558538555, -0.8450446134557336, -1.4053616165625418, -1.3476661487748742, 1.0289057839928066, -1.060837114189176]
# ---------------------------------------------
# Fidelity at Step 259: 0.9999904796568865
# Parameters at Step 259: [0.5944964245890103, 0.004190556706255601, 1.3309402564751298, 1.7829042001339033, 0.0016739683970465424, -0.8446549383648442, -1.4050618203636571, -1.347832416441113, 1.0289875015471048, -1.0613979396555118]
# ---------------------------------------------
# Fidelity at Step 260: 0.9999895446462769
# Parameters at Step 260: [0.5948290412691644, 0.003611728925779746, 1.3310376718637225, 1.7830523350642844, 0.0011174288273581834, -0.844163954881152, -1.4047616223944055, -1.348002212413368, 1.0290679993304543, -1.0619883701766766]
# ---------------------------------------------
# Fidelity at Step 261: 0.9999875391094621
# Parameters at Step 261: [0.5951669123789316, 0.0031310396097699476, 1.331789961880425, 1.7833168983733616, 0.000681157510524022, -0.8439073978707629, -1.4045768433706556, -1.3482025491651533, 1.0291692895659659, -1.0624320524464426]
# ---------------------------------------------
# Fidelity at Step 262: 0.9999906306246412
# Parameters at Step 262: [0.5952821276050037, 0.002775226252607156, 1.3320799556119916, 1.7834941541314508, 0.0003834410190620781, -0.8436445142949486, -1.404537104631228, -1.3484310545032105, 1.0292994237971727, -1.062717230258624]
# ---------------------------------------------
# Fidelity at Step 263: 0.9999918475423802
# Parameters at Step 263: [0.5955068278307881, 0.002537066041621898, 1.333574274178206, 1.783890447565905, 0.00023900989459528507, -0.8437857783409928, -1.4046369850364349, -1.3486683103850958, 1.0294464368228422, -1.0628231123748586]
# ---------------------------------------------
# Fidelity at Step 264: 0.9999912431486708
# Parameters at Step 264: [0.5955932148174262, 0.002386430386773306, 1.3347469769816265, 1.7842223336425767, 0.00016586603537737705, -0.8439075571128305, -1.4048322277067897, -1.3489091954001837, 1.0296144174254342, -1.0628521286989452]
# ---------------------------------------------
# Fidelity at Step 265: 0.9999872555933262
# Parameters at Step 265: [0.5956772571815188, 0.002257828793900233, 1.33578997508499, 1.7845332412487767, 5.7699104043846676e-05, -0.843988917135868, -1.4050290899242444, -1.3491239263836743, 1.029776966527158, -1.0629292663204126]
# ---------------------------------------------
# Fidelity at Step 266: 0.9999902967524766
# Parameters at Step 266: [0.5957507501519791, 0.002068783031136052, 1.3361309650648827, 1.7847050976583787, -0.00020519692950900033, -0.8438051992942112, -1.4051204013078076, -1.3492778991144794, 1.029910772991523, -1.063194799291699]
# ---------------------------------------------
# Fidelity at Step 267: 0.9999900150551528
# Parameters at Step 267: [0.5960916717608045, 0.001792095457352826, 1.3372109116920048, 1.7849799192210827, -0.0005592198163297445, -0.8437340950967995, -1.405069258993, -1.349362115887662, 1.0299998148528058, -1.0635698848235604]
# ---------------------------------------------
# Fidelity at Step 268: 0.9999901632430063
# Parameters at Step 268: [0.5964938257348139, 0.0014818649446677085, 1.3381974555803442, 1.7852420652228995, -0.0009791586947237767, -0.8436098082259162, -1.4049470696253095, -1.3494147180174358, 1.0300676204942645, -1.0640370491853084]
# ---------------------------------------------
# Fidelity at Step 269: 0.9999903697305814
# Parameters at Step 269: [0.5970118277779763, 0.0011219116396615045, 1.3393961902403144, 1.7855517098796023, -0.0014572778116721133, -0.8435008592080591, -1.4047356296715312, -1.34945268329791, 1.030103626036143, -1.064599942972532]
# ---------------------------------------------
# Fidelity at Step 270: 0.9999899367559645
# Parameters at Step 270: [0.5975015615937334, 0.0007479805980981278, 1.340286482112039, 1.785845478008108, -0.001971802490065613, -0.8433015328756424, -1.4044857384421614, -1.349519912456144, 1.030121254199464, -1.0652548452467254]
# ---------------------------------------------
# Fidelity at Step 271: 0.9999900678666692
# Parameters at Step 271: [0.5977691910068161, 0.0004258803750859548, 1.3403369377028158, 1.7860300864203622, -0.00241999652252484, -0.8429562359241124, -1.4042866622671257, -1.3496564051307558, 1.0301453455775575, -1.0658908588260874]
# ---------------------------------------------
# Fidelity at Step 272: 0.9999874390289892
# Parameters at Step 272: [0.5978043725676705, 0.00022655979324939457, 1.3399444553783821, 1.7861742417428905, -0.0026566307691544863, -0.8426747036087735, -1.4042431189145252, -1.3498703592182126, 1.030191903838473, -1.066332331123344]
# ---------------------------------------------
# Fidelity at Step 273: 0.9999852404955787
# Parameters at Step 273: [0.5976018832312114, 0.00019732576339516204, 1.3394361789334397, 1.7863019114132654, -0.002520678451761485, -0.84265540315487, -1.404425981261165, -1.3501473692207824, 1.0302708567239884, -1.0663787728334129]
# ---------------------------------------------
# Fidelity at Step 274: 0.999991202216739
# Parameters at Step 274: [0.5970193753976241, 0.00039122226824866557, 1.3383976223932375, 1.7862643127485072, -0.0018034291770251108, -0.8429631976270391, -1.404918488619218, -1.350438857017038, 1.0303917093774562, -1.0657749577970645]
# ---------------------------------------------
# Fidelity at Step 275: 0.9999868730098144
# Parameters at Step 275: [0.5964002312016697, 0.0006386686899270823, 1.3372686174563626, 1.7861038705095236, -0.0008960319344269613, -0.843421216990226, -1.4054938764832425, -1.3506018283136876, 1.0304883342850093, -1.0649185421353626]
# ---------------------------------------------
# Fidelity at Step 276: 0.9999871021993375
# Parameters at Step 276: [0.5959968024125873, 0.0008543240692973162, 1.3365676350952018, 1.78585275692967, 1.8806409563042127e-05, -0.8440244978214125, -1.406039485419777, -1.3505103596775951, 1.030531301231813, -1.0639819437643518]
# ---------------------------------------------
# Fidelity at Step 277: 0.9999881185344416
# Parameters at Step 277: [0.5957871447329421, 0.0009511001755770216, 1.3354265535238314, 1.785356367154387, 0.0006376402592192541, -0.8443413411173908, -1.4064349034510109, -1.350115097279099, 1.0304975653635302, -1.0633064271646853]
# ---------------------------------------------
# Fidelity at Step 278: 0.9999906556766786
# Parameters at Step 278: [0.5956367530266714, 0.0009847656361054722, 1.3336692706093733, 1.7846757019204984, 0.0009634945439259536, -0.8443164401766956, -1.4067434739871398, -1.3495484340103792, 1.0304263826065396, -1.062916102860791]
# ---------------------------------------------
# Fidelity at Step 279: 0.9999894442481566
# Parameters at Step 279: [0.5955525976517577, 0.001028907117684992, 1.3321084157952712, 1.78401261978164, 0.001164747539451552, -0.8442449016140025, -1.4070533814616768, -1.3489321291968648, 1.0303594700642262, -1.0626365307718746]
# ---------------------------------------------
# Fidelity at Step 280: 0.999989762379587
# Parameters at Step 280: [0.5955296466388412, 0.0011362950193639534, 1.331190926052615, 1.783475476175838, 0.001338438536987609, -0.8442954311854832, -1.407434080324662, -1.3483249178832246, 1.0303222541973678, -1.0623605557760991]
# ---------------------------------------------
# Fidelity at Step 281: 0.9999865518287322
# Parameters at Step 281: [0.5955545184217964, 0.0013231465148655075, 1.3311625356047956, 1.78314061313324, 0.0015410837043886285, -0.8445508065681544, -1.4078983402622387, -1.3477988954190034, 1.0303360583279675, -1.0620348186940016]
# ---------------------------------------------
# Fidelity at Step 282: 0.9999885658815401
# Parameters at Step 282: [0.59561242446879, 0.0015227890917823526, 1.3317033960996465, 1.7829985284014171, 0.0016868396520360817, -0.8448606401220743, -1.4083455925713972, -1.3474149162023767, 1.0303933476864877, -1.0617804808654006]
# ---------------------------------------------
# Fidelity at Step 283: 0.9999886200387803
# Parameters at Step 283: [0.5957255375318595, 0.0016497018583908577, 1.3324425509618274, 1.783020973690081, 0.001659143486992421, -0.8450490961123054, -1.40864684396356, -1.347207439471713, 1.030483142356627, -1.0617566525637303]
# ---------------------------------------------
# Fidelity at Step 284: 0.999989658576399
# Parameters at Step 284: [0.5959609043974802, 0.0015499456597282543, 1.3331254019793428, 1.7832049882435026, 0.0012922693222957348, -0.8449006011213077, -1.4085712839597364, -1.3472519572169899, 1.030580980547529, -1.0621844020557074]
# ---------------------------------------------
# Fidelity at Step 285: 0.9999894649394983
# Parameters at Step 285: [0.5960238507252131, 0.0013189631337330305, 1.332539860511835, 1.783301927654444, 0.0007433866489563661, -0.8442854509445332, -1.4082711162144352, -1.347499484338802, 1.0306917016644268, -1.0628767596250766]
# ---------------------------------------------
# Fidelity at Step 286: 0.9999849933233228
# Parameters at Step 286: [0.5960627205395083, 0.0010609043148659702, 1.3322430338627351, 1.7835243539681294, 0.00034286537921595847, -0.8438166939869268, -1.4079035520248064, -1.3479187962707286, 1.0308251683085796, -1.0634261212436664]
# ---------------------------------------------
# Fidelity at Step 287: 0.9999879639253013
# Parameters at Step 287: [0.5956899682731177, 0.0009154963726379061, 1.3311635607559102, 1.7836488028761535, 0.0004225855939777025, -0.8434963287575707, -1.407661671454534, -1.3485458555802754, 1.0310382483211102, -1.0634353812611523]
# ---------------------------------------------
# Fidelity at Step 288: 0.9999861925231072
# Parameters at Step 288: [0.5951550758551492, 0.0009283519295401207, 1.3304978498841862, 1.7837725159484372, 0.0010039543254848613, -0.8436443722100078, -1.4076374485189134, -1.3491935214571396, 1.0313008332555351, -1.0628211566014651]
# ---------------------------------------------
# Fidelity at Step 289: 0.9999899160547191
# Parameters at Step 289: [0.5943319215746347, 0.0011024983291010803, 1.3290715959896324, 1.7835802504401053, 0.001939345423603396, -0.843900829201413, -1.407855162572034, -1.3496824841474095, 1.0315948437820224, -1.0616977278993973]
# ---------------------------------------------
# Fidelity at Step 290: 0.9999899406341382
# Parameters at Step 290: [0.593879437917849, 0.0013024934116531413, 1.3292359330494528, 1.7834789968444222, 0.0028823150174223234, -0.8445658740971359, -1.408146953358147, -1.349884604304381, 1.0318369775129694, -1.060448351856046]
# ---------------------------------------------
# Fidelity at Step 291: 0.999987701120682
# Parameters at Step 291: [0.5938227296665606, 0.001395519180852045, 1.3297453114468043, 1.7831820333246586, 0.0033488197044209502, -0.8449975130724344, -1.408343231895112, -1.3496242833250376, 1.0319721784111542, -1.0595916756870527]
# ---------------------------------------------
# Fidelity at Step 292: 0.9999895591448354
# Parameters at Step 292: [0.594030472531836, 0.001275204988229672, 1.3289723092333423, 1.782426923070065, 0.002887692115341309, -0.8444637224643667, -1.4082817262878289, -1.3488816570547606, 1.031993478449861, -1.05965962408314]
# ---------------------------------------------
# Fidelity at Step 293: 0.9999892316742227
# Parameters at Step 293: [0.5944107428465928, 0.0011561190186811987, 1.3287859463486562, 1.7817881428685853, 0.002200021375265551, -0.8439060424703603, -1.408222823638352, -1.3480851698187917, 1.0320067372168447, -1.0599710816436352]
# ---------------------------------------------
# Fidelity at Step 294: 0.9999899316350848
# Parameters at Step 294: [0.5947992758125891, 0.0011229866430632221, 1.3293308466414726, 1.7813714966253529, 0.001521404070670901, -0.8435042668478548, -1.4082559739400768, -1.3474186267317105, 1.0320785139540976, -1.0602894796597422]
# ---------------------------------------------
# Fidelity at Step 295: 0.9999907505093709
# Parameters at Step 295: [0.5951524351616371, 0.0011663624769124455, 1.3305982924157018, 1.7812371317727713, 0.000988489193023588, -0.8433565124020631, -1.4083561337808042, -1.3469875194740446, 1.0322186618691107, -1.0605020314513662]
# ---------------------------------------------
# Fidelity at Step 296: 0.9999881552104773
# Parameters at Step 296: [0.5953267513068672, 0.0012511861842933924, 1.3319337927383572, 1.7813510571695326, 0.0006205158363415861, -0.8433069686202627, -1.4084495824717296, -1.3469315332264586, 1.032453125529733, -1.0606305908474725]
# ---------------------------------------------
# Fidelity at Step 297: 0.9999886313248761
# Parameters at Step 297: [0.5952913194747004, 0.001267158715709055, 1.332815657793257, 1.7816823291053976, 0.00036993673278882546, -0.8431800027361622, -1.4083585518645125, -1.3473569073207656, 1.0327821979296068, -1.0607704079399491]
# ---------------------------------------------
# Fidelity at Step 298: 0.999989473771392
# Parameters at Step 298: [0.595385647586571, 0.0011875072416154175, 1.3342622514353542, 1.7822505911816997, 0.00014488435321944392, -0.8431903850464094, -1.408104846362179, -1.34797102144586, 1.0331094484959207, -1.0609579558064077]
# ---------------------------------------------
# Fidelity at Step 299: 0.9999884925835485
# Parameters at Step 299: [0.595518478464883, 0.0009458696659425904, 1.3352816336854778, 1.7828634413147242, -0.0001669195299291341, -0.8430304233164108, -1.4076043914415428, -1.34873729802631, 1.0334059085675407, -1.0613246722884933]
# ---------------------------------------------
#
######################################################################
# With the learned parameters, we construct a visual representation
# of the Hamiltonian to which they correspond and compare it to the
# target Hamiltonian, and the initial guessed Hamiltonian:
#
new_ham_matrix = create_hamiltonian_matrix(
qubit_number, nx.complete_graph(qubit_number), [qgrnn_params[0:6], qgrnn_params[6:10]]
)
init_ham = create_hamiltonian_matrix(
qubit_number, nx.complete_graph(qubit_number), [init[0:6], init[6:10]]
)
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(6, 6))
axes[0].matshow(ham_matrix, vmin=-7, vmax=7, cmap='hot')
axes[0].set_title("Target Hamiltonian", y=1.13)
axes[1].matshow(init_ham, vmin=-7, vmax=7, cmap='hot')
axes[1].set_title("Initial Guessed Hamiltonian", y=1.13)
axes[2].matshow(new_ham_matrix, vmin=-7, vmax=7, cmap='hot')
axes[2].set_title("Learned Hamiltonian", y=1.13)
plt.subplots_adjust(wspace=0.3, hspace=0.3)
#plt.show()
plt.draw()
plt.pause(0.001)
input("Open Ports --> Open Preview or Browser --> push enter to continue")
######################################################################
# .. figure:: ../demonstrations/qgrnn/hamiltonian_comparison.png
# :width: 100%
# :align: center
#
######################################################################
# These images look very similar, indicating that the QGRNN has done a good job
# learning the target Hamiltonian.
#
# We can also look
# at the exact values of the target and learned parameters.
# Recall how the target
# interaction graph has :math:`4` edges while the complete graph has :math:`6`.
# Thus, as the QGRNN converges to the optimal solution, the weights corresponding to
# edges :math:`(1, 3)` and :math:`(2, 0)` in the complete graph should go to :math:`0`, as
# this indicates that they have no effect, and effectively do not exist in the learned
# Hamiltonian.
# We first pick out the weights of edges (1, 3) and (2, 0)
# and then remove them from the list of target parameters
qgrnn_params = list(qgrnn_params)
zero_weights = [qgrnn_params[1], qgrnn_params[4]]
del qgrnn_params[1]
del qgrnn_params[3]
######################################################################
# Then, we print all of the weights:
#
target_params = matrix_params[0] + matrix_params[1]
print("Target parameters \tLearned parameters")
for x in range(len(target_params)):
print(f"{target_params[x]}\t\t\t{qgrnn_params[x]}")
print(f"\nNon-Existing Edge Parameters: {zero_weights}")
######################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Target parameters Learned parameters
# 0.56 0.5958460805811168
# 1.24 1.33650946569539
# 1.67 1.7835767870051
# -0.79 -0.8428389553658877
# -1.44 -1.4068803387384392
# -1.43 -1.3495612547209628
# 1.18 1.03363436004853
# -0.93 -1.0618808945949776
#
# Non-Existing Edge Parameters: [0.000540998047389223, -0.0005928152653526596]
#
######################################################################
# The weights of edges :math:`(1, 3)` and :math:`(2, 0)`
# are very close to :math:`0`, indicating we have learned the cycle graph
# from the complete graph. In addition, the remaining learned weights
# are fairly close to those of the target Hamiltonian.
# Thus, the QGRNN is functioning properly, and has learned the target
# Ising Hamiltonian to a high
# degree of accuracy!
#
######################################################################
# References
# ----------
#
# 1. Verdon, G., McCourt, T., Luzhnica, E., Singh, V., Leichenauer, S., &
# Hidary, J. (2019). Quantum Graph Neural Networks. arXiv preprint
# `arXiv:1909.12264 <https://arxiv.org/abs/1909.12264>`__.
#
|
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
__all__ = ['login_required', 'staff_required']
def login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
Based on django.contrib.auth.decorators.login_required
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated and u.is_email_confirmed,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def staff_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is staff, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_staff,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
|
import numpy
import math
from matplotlib import pyplot
img0 = pyplot.imread('./data/Teddy/frame10.png')
img1 = pyplot.imread('./data/Teddy/frame11.png')
M, N = img0.shape
print(img0.shape)
W = 25
def lucas_kanade(img0, img1, W):
It = img1 - img0
Ix, Iy = numpy.gradient(img0)
pyplot.imshow(img0, cmap = 'gray')
quiv = []
for i in range(0, M - W // 2, W):
for j in range(0, N - W // 2, W):
sX = i + W // 2
sY = j + W // 2
A = ([[0., 0.], [0., 0.]])
b = ([[0.], [0.]])
for m in range(i, min(M, i + W)):
for n in range(j, min(N, j + W)):
A[0][0] += Ix[m][n] ** 2
A[0][1] += Ix[m][n] * Iy[m][n]
A[1][0] += Iy[m][n] * Ix[m][n]
A[1][1] += Iy[m][n] ** 2
b[0][0] += -Ix[m][n] * It[m][n]
b[1][0] += -Iy[m][n] * It[m][n]
V = numpy.linalg.inv(A).dot(b)
quiv.append([sY, sX, V[1][0], -V[0][0]])
quiv = numpy.array(quiv)
print(quiv[:, 0])
pyplot.quiver(quiv[:, 0], quiv[:, 1], quiv[:, 2], quiv[:, 3], color=(1, 0, 0))
pyplot.show()
lucas_kanade(img0, img1, W) |
import time
noe = time.time()
print(time.ctime(now))
|
from django.apps import AppConfig
class CeleryApiConfig(AppConfig):
name = 'celery_api'
def ready(self):
from . import signal_handler
|
# -*- coding: utf-8 -*-
from contextlib import nested
import json
import mock
import testify as T
from urlparse import urlunsplit
from py_razor_client.razor_client import RazorClient
class RazorClientTestCase(T.TestCase):
@T.setup_teardown
def create_razor_client(self):
self.hostname = "some_host"
self.port = "some_port"
requests_tgt = "py_razor_client.razor_client.requests"
requests_mock = mock.patch(requests_tgt)
with requests_mock as self.mock_requests:
self.razor_client = RazorClient(self.hostname, self.port, True)
self.mock_requests.reset_mock()
yield
def make_json_response(self, expected_response):
mock_response = mock.Mock()
mock_response.json.return_value = expected_response
return mock_response
def make_text_response(self, expected_response):
mock_response = mock.Mock()
mock_response.text = expected_response
return mock_response
class ConstructorTest(RazorClientTestCase):
@T.setup_teardown
def mock_discover_methods(self):
disc_tgt = "py_razor_client.razor_client.RazorClient.discover_methods"
with mock.patch(disc_tgt) as self.mock_discover_methods:
yield
def test_constructor_attributes(self):
razor_client = RazorClient(self.hostname, self.port)
T.assert_equal(razor_client.hostname, self.hostname)
T.assert_equal(razor_client.port, str(self.port))
def test_lazy_discovery_on(self):
RazorClient(self.hostname, self.port, lazy_discovery=True)
T.assert_equal(self.mock_discover_methods.call_count, 0)
def test_lazy_discovery_off(self):
RazorClient(self.hostname, self.port, lazy_discovery=False)
self.mock_discover_methods.assert_called_once_with()
class DiscoverMethodsTest(RazorClientTestCase):
def test_discover_methods(self):
collections = ('one', 'two')
commands = ('red', 'blue')
mock_response = self.make_json_response({
"collections": collections,
"commands": commands
})
self.mock_requests.get.return_value = mock_response
bind_mocks = (mock.patch.object(self.razor_client, "_bind_collection"),
mock.patch.object(self.razor_client, "_bind_command"))
ctxt = nested(*bind_mocks)
with ctxt as (mock_bind_collection, mock_bind_command):
self.razor_client.discover_methods()
T.assert_equal(mock_bind_collection.call_count, 2)
for collection in collections:
mock_bind_collection.assert_any_call(collection)
T.assert_equal(mock_bind_command.call_count, 2)
for command in commands:
mock_bind_command.assert_any_call(command)
class GetPathTest(RazorClientTestCase):
def test_get_path_relative_url_with_json(self):
expected_response = mock.sentinel.response
mock_response = self.make_json_response(expected_response)
self.mock_requests.get.return_value = mock_response
test_path = "/api/collections/nodes"
expected_host = ":".join((self.hostname, self.port))
expected_path = urlunsplit(("http", expected_host, test_path, "", ""))
actual_response = self.razor_client.get_path(test_path, True)
T.assert_equal(expected_response, actual_response)
self.mock_requests.get.assert_called_once_with(expected_path)
def test_get_path_relative_url_with_text(self):
expected_response = mock.sentinel.response
mock_response = self.make_text_response(expected_response)
self.mock_requests.get.return_value = mock_response
test_path = "/api/collections/nodes"
expected_host = ":".join((self.hostname, self.port))
expected_path = urlunsplit(("http", expected_host, test_path, "", ""))
actual_response = self.razor_client.get_path(test_path, False)
T.assert_equal(expected_response, actual_response)
self.mock_requests.get.assert_called_once_with(expected_path)
def test_get_path_absolute_url_with_json(self):
expected_response = mock.sentinel.response
mock_response = self.make_json_response(expected_response)
self.mock_requests.get.return_value = mock_response
test_path = "http://%s:%s/api/collections/nodes" % (self.hostname,
self.port)
expected_path = test_path
actual_response = self.razor_client.get_path(test_path, True)
T.assert_equal(expected_response, actual_response)
self.mock_requests.get.assert_called_once_with(expected_path)
def test_get_path_absolute_url_with_text(self):
expected_response = mock.sentinel.response
mock_response = self.make_text_response(expected_response)
self.mock_requests.get.return_value = mock_response
test_path = "http://%s:%s/api/collections/nodes" % (self.hostname,
self.port)
expected_path = test_path
actual_response = self.razor_client.get_path(test_path, False)
T.assert_equal(expected_response, actual_response)
self.mock_requests.get.assert_called_once_with(expected_path)
class PostDataTest(RazorClientTestCase):
def test_relative_path(self):
expected_response = mock.sentinel.response
mock_response = self.make_json_response(expected_response)
self.mock_requests.post.return_value = mock_response
test_path = "/api/commands/delete_node"
expected_host = ":".join((self.hostname, self.port))
expected_path = urlunsplit(("http", expected_host, test_path, "", ""))
expected_headers = {
"Content-Type": "application/json"
}
expected_data = "{}"
actual_response = self.razor_client.post_data(test_path)
T.assert_equal(expected_response, actual_response)
self.mock_requests.post.assert_called_once_with(
expected_path,
headers=expected_headers,
data=expected_data)
def test_absolute_path(self):
expected_response = mock.sentinel.response
mock_response = self.make_json_response(expected_response)
self.mock_requests.post.return_value = mock_response
test_path = "http://%s:%s/api/commands/delete_node" % (self.hostname,
self.port)
expected_path = test_path
expected_headers = {
"Content-Type": "application/json"
}
expected_data = "{}"
actual_response = self.razor_client.post_data(test_path)
T.assert_equal(expected_response, actual_response)
self.mock_requests.post.assert_called_once_with(
expected_path,
headers=expected_headers,
data=expected_data)
def test_data(self):
data = {
"a": 1,
"b": 2
}
expected_data = json.dumps(data)
expected_headers = {
"Content-Type": "application/json"
}
test_url = "http://%s:%s/irrelevant" % (self.hostname, self.port)
self.razor_client.post_data(test_url, **data)
self.mock_requests.post.assert_called_once_with(
test_url,
headers=expected_headers,
data=expected_data)
class SanitizeCommandNameTest(RazorClientTestCase):
def test_sanitizes_dashes(self):
test_name = "something-with-dashes"
expected_name = "something_with_dashes"
actual_name = self.razor_client.sanitize_command_name(test_name)
T.assert_equal(expected_name, actual_name)
class CoerceToFullUrlTest(RazorClientTestCase):
def test_creates_full_path(self):
test_path = "/api"
expected_path = "http://%s:%s/api" % (self.hostname, self.port)
actual_path = self.razor_client._coerce_to_full_url(test_path)
T.assert_equal(expected_path, actual_path)
def test_absolute_path_untouched(self):
test_path = "http://%s:%s/api" % (self.hostname, self.port)
expected_path = test_path
actual_path = self.razor_client._coerce_to_full_url(test_path)
T.assert_equal(expected_path, actual_path)
class MakeNetLocTest(RazorClientTestCase):
def test_make_netloc(self):
expected_host = "%s:%s" % (self.hostname, self.port)
actual_host = self.razor_client._make_netloc()
T.assert_equal(expected_host, actual_host)
class MakeRazorUrlTest(RazorClientTestCase):
def test_make_razor_url(self):
test_path = "/api"
expected_path = "http://%s:%s/api" % (self.hostname, self.port)
actual_path = self.razor_client._make_razor_url(test_path)
T.assert_equal(expected_path, actual_path)
class BindCollectionTest(RazorClientTestCase):
@T.setup_teardown
def mock_list_collection(self):
tgt = "py_razor_client.razor_client.RazorClient._get_collection"
with mock.patch(tgt) as self.mock_list_collection:
yield
def test_bind_collection_vanilla(self):
collection = "nodes"
collection_id = "http://%s:%s/api/collections/%s" % (self.hostname,
self.port,
collection)
test_collection = {
"name": collection,
"id": collection_id
}
expected_method = "nodes"
self.razor_client._bind_collection(test_collection)
actual_method = getattr(self.razor_client, expected_method, None)
T.assert_not_equal(actual_method, None)
actual_method()
self.mock_list_collection.assert_called_once_with(collection_id)
class BindCommandTest(RazorClientTestCase):
@T.setup_teardown
def mock_list_collection(self):
tgt = "py_razor_client.razor_client.RazorClient._execute_command"
with mock.patch(tgt) as self.mock_execute_command:
yield
def test_bind_command(self):
command = "unbind-node"
command_id = "http://%s:%s/api/collections/%s" % (self.hostname,
self.port,
command)
test_command = {
"name": command,
"id": command_id
}
expected_command_name = "unbind_node"
self.razor_client._bind_command(test_command)
actual_method = getattr(self.razor_client, expected_command_name, None)
T.assert_not_equal(actual_method, None)
actual_method()
self.mock_execute_command.assert_called_once_with(command_id)
class BindMethodTest(RazorClientTestCase):
def test_bind_method(self):
test_method = lambda: None
test_method_name = "test"
expected_method = test_method
self.razor_client._bind_method(test_method_name, test_method)
actual_method = getattr(self.razor_client, test_method_name, None)
T.assert_equal(expected_method, actual_method)
class GetListCollectionTest(RazorClientTestCase):
@T.setup_teardown
def mock_get_path(self):
with mock.patch.object(self.razor_client, "get_path") as mock_get_path:
self.mock_get_path = mock_get_path
yield
def test_get_collection(self):
test_url = mock.sentinel.url
self.razor_client._get_collection(test_url)
self.mock_get_path.assert_called_once_with(test_url)
def test_get_collection_item(self):
test_url = "/api"
test_item = "item"
self.razor_client._get_collection(test_url, test_item)
expected_url = "/".join((test_url, test_item))
self.mock_get_path.assert_called_once_with(expected_url)
class ExecuteCommandTest(RazorClientTestCase):
@T.setup_teardown
def mock_post_data(self):
with mock.patch.object(self.razor_client, "post_data") as mock_pd:
self.mock_post_data = mock_pd
yield
def test_execute_command(self):
url = mock.sentinel.url
args = {
"a": mock.sentinel.a,
"b": mock.sentinel.b
}
self.razor_client._execute_command(url, **args)
self.mock_post_data.assert_called_once_with(url, **args)
def test_execute_command_transforms_args(self):
url = mock.sentinel.url
args = {
"a": mock.sentinel.a,
"iso_url": mock.sentinel.b
}
expected_args = {
"a": mock.sentinel.a,
"iso-url": mock.sentinel.b
}
self.razor_client._execute_command(url, **args)
self.mock_post_data.assert_called_once_with(url, **expected_args)
|
import discord
from discord.ext import commands
import time
class Management:
def __init__(self, client):
self.client = client
@commands.is_owner()
@commands.command()
async def shutdown (self, ctx):
"""Shuts the bot down."""
await ctx.send("Bot is shutting down...")
await self.client.logout()
await self.client.close()
self.client.aiosession.close()
@commands.is_owner()
@commands.command()
async def chat (self, ctx, channel : discord.TextChannel, *, tosay):
"""Chats a message in a channel"""
await channel.send(tosay)
@commands.is_owner()
@commands.command()
async def reloadcog(self, ctx, cog):
"""Reloads a cog (Unload & load)."""
await ctx.send(f"Reloading {cog}...")
self.client.unload_extension(f"Cogs.{cog}")
self.client.load_extension(f"Cogs.{cog}")
await ctx.send(f"{cog} reload complete :white_check_mark:")
@commands.is_owner()
@commands.command()
async def loadcog(self, ctx, cog):
"""Only loads a cog."""
await ctx.send(f"Reloading {cog}...")
self.client.load_extension(f"Cogs.{cog}")
await ctx.send(f"{cog} load complete :white_check_mark:")
@commands.is_owner()
@commands.command()
async def unloadcog(self, ctx, cog):
"""Only unloads a cog."""
await ctx.send(f"Reloading {cog}...")
self.client.unload_extension(f"Cogs.{cog}")
await ctx.send(f"{cog} unload complete :white_check_mark:")
@commands.is_owner()
@commands.command()
async def reload(self, ctx):
"""Reloads all of the bot's cogs and other components."""
if await self.client.is_owner(ctx.author):
async with ctx.typing():
done = 0
msg = await ctx.send(f"Bot is reloading...```Command run start```({done} done)")
await msg.edit(content = f"Bot is reloading...```Utilities reloaded```({done} done)")
temp = []
for cog in self.client.cogs:
temp.append(cog)
done = done + 1
await msg.edit(content = "Bot is reloading...```Appended to temp {}{}```({} done)".format(cog, str(temp), done))
for cog in temp:
self.client.unload_extension(f"Cogs.{cog}")
done = done + 1
await msg.edit(content = f"Bot is reloading...```Unloaded {cog}```({done} done)")
self.client.load_extension(f"Cogs.{cog}")
done = done + 1
await msg.edit(content = f"Bot is reloading...```Loaded {cog}```({done} done)")
await msg.edit(content = f"Bot is reloading...```Reload complete {time.time()}```({done} done)")
await ctx.send( "Reload complete ✅")
def setup(client):
cog_to_add = Management(client)
Management.__name__ = "Bot management"
client.add_cog(cog_to_add)
|
'''
module for calculating
x to the power y (x ** y)
efficiently
'''
def big_power(x: int, y: int, MOD = None):
'''
For calculating powers where
x and y are large numbers
'''
result = 1
if MOD is None:
while (y > 0):
if (y & 1):
result *= x
x *= x
y >>= 1
else:
while (y > 0):
if (y & 1):
result *= x
x *= x
y >>= 1
result %= MOD
return result
def mod_power(x: int, y:int, MOD: int):
'''
For calculating powers where
modular arithmetic is used
'''
result = 1
x = x % MOD
if (x == 0):
return 0
while (y > 0):
if (y & 1 == 1):
result = ((result % MOD) * (x % MOD)) % MOD
y = y >> 1
x = ((x % MOD) * (x % MOD)) % MOD
return result
'''
PyAlgo
Devansh Singh, 2021
''' |
from typing import Dict, Optional
from asyncpraw import Reddit
from asyncpraw.models import Submission
from asyncprawcore.exceptions import OAuthException
from app.core.config import RedditConfig, settings
async def get_reddit_connection(config: RedditConfig) -> Reddit:
"""Function for connecting to reddit with configuration."""
return Reddit(
user_agent=f"app by /u/{config.username}",
client_id=config.client_id,
client_secret=config.client_secret,
username=config.username,
password=config.password,
)
async def get_reddit_user(reddit: Reddit) -> str:
"""Get user object and check that credentials are correct"""
try:
user = await reddit.user.me()
return str(user)
except OAuthException as err:
raise ValueError(
f"Connection failed with username {settings.reddit.username!r}"
) from err
async def extract_post_info(post: Submission) -> Dict[str, Optional[str]]:
"""Extract information from post needed for training model"""
def convert_label(flair: str) -> Optional[str]:
flair_dict = {
"Asshole": "YTA",
"Not the A-hole": "NTA",
"Everyone Sucks": "ESH",
"No A-holes here": "NAH",
"Not enough info": "INFO",
}
return flair_dict.get(flair)
return {
"id": post.id,
"title": post.title,
"label": convert_label(post.link_flair_text),
"text": post.selftext,
}
|
# Models
from hrm_api.community.models import Reaction
# Utils
import factory
from factory.django import DjangoModelFactory
from hrm_api.community.models.reaction import ReactionTypes
# Factories
from hrm_api.users.factories import ProfileFactory
from hrm_api.community.factories import FeedFactory, CommentFactory
# Factory
class FeedReactionFactory(DjangoModelFactory):
class Meta:
model = Reaction
django_get_or_create = ('created_by', 'feed')
created_by = factory.SubFactory(ProfileFactory)
type = factory.LazyAttribute(lambda o: ReactionTypes.RANDOM)
class CommentReactionFactory(DjangoModelFactory):
class Meta:
model = Reaction
django_get_or_create = ('created_by', 'comment')
created_by = factory.SubFactory(ProfileFactory)
type = factory.LazyAttribute(lambda o: ReactionTypes.RANDOM) |
import math
import cv2
import numpy as np
from time import time
import mediapipe as mp
import matplotlib.pyplot as plt
# Initializing mediapipe pose class.
mp_pose = mp.solutions.pose
# Setting up the Pose function.
pose = mp_pose.Pose(static_image_mode=True, min_detection_confidence=0.3, model_complexity=2)
# Initializing mediapipe drawing class, useful for annotation.
mp_drawing = mp.solutions.drawing_utils
# Setup Pose function for video.
pose_video = mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.5, model_complexity=1)
# Initialize the VideoCapture object to read from the webcam.
video = cv2.VideoCapture(1)
def detectPose(image, pose, display=True):
'''
This function performs pose detection on an image.
Args:
image: The input image with a prominent person whose pose landmarks needs to be detected.
pose: The pose setup function required to perform the pose detection.
display: A boolean value that is if set to true the function displays the original input image, the resultant image,
and the pose landmarks in 3D plot and returns nothing.
Returns:
output_image: The input image with the detected pose landmarks drawn.
landmarks: A list of detected landmarks converted into their original scale.
'''
# Create a copy of the input image.
output_image = image.copy()
# Convert the image from BGR into RGB format.
imageRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Perform the Pose Detection.
results = pose.process(imageRGB)
# Retrieve the height and width of the input image.
height, width, _ = image.shape
# Initialize a list to store the detected landmarks.
landmarks = []
# Check if any landmarks are detected.
if results.pose_landmarks:
# Draw Pose landmarks on the output image.
mp_drawing.draw_landmarks(image=output_image, landmark_list=results.pose_landmarks,
connections=mp_pose.POSE_CONNECTIONS)
# Iterate over the detected landmarks.
for landmark in results.pose_landmarks.landmark:
# Append the landmark into the list.
landmarks.append((int(landmark.x * width), int(landmark.y * height),
(landmark.z * width)))
# Check if the original input image and the resultant image are specified to be displayed.
if display:
# Display the original input image and the resultant image.
plt.figure(figsize=[22,22])
plt.subplot(121);plt.imshow(image[:,:,::-1]);plt.title("Original Image");plt.axis('off');
plt.subplot(122);plt.imshow(output_image[:,:,::-1]);plt.title("Output Image");plt.axis('off');
# Also Plot the Pose landmarks in 3D.
mp_drawing.plot_landmarks(results.pose_world_landmarks, mp_pose.POSE_CONNECTIONS)
# Otherwise
else:
# Return the output image and the found landmarks.
return output_image, landmarks
# Create named window for resizing purposes
cv2.namedWindow('Pose Detection', cv2.WINDOW_NORMAL)
# Initialize the VideoCapture object to read from a video stored in the disk.
video = cv2.VideoCapture(r"C:\Users\roger\Downloads\Video Bonus de Calentamiento.mp4")
# Set video camera size
video.set(3,1280)
video.set(4,960)
# Initialize a variable to store the time of the previous frame.
time1 = 0
# Iterate until the video is accessed successfully.
while video.isOpened():
# Read a frame.
ok, frame = video.read()
# Check if frame is not read properly.
if not ok:
# Break the loop.
break
# Flip the frame horizontally for natural (selfie-view) visualization.
frame = cv2.flip(frame, 1)
# Get the width and height of the frame
frame_height, frame_width, _ = frame.shape
# Resize the frame while keeping the aspect ratio.
frame = cv2.resize(frame, (int(frame_width * (640 / frame_height)), 640))
# Perform Pose landmark detection.
frame, _ = detectPose(frame, pose_video, display=False)
# Set the time for this frame to the current time.
time2 = time()
# Check if the difference between the previous and this frame time > 0 to avoid division by zero.
if (time2 - time1) > 0:
# Calculate the number of frames per second.
frames_per_second = 1.0 / (time2 - time1)
# Write the calculated number of frames per second on the frame.
cv2.putText(frame, 'FPS: {}'.format(int(frames_per_second)), (10, 30),cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 3)
# Update the previous frame time to this frame time.
# As this frame will become previous frame in next iteration.
time1 = time2
# Display the frame.
cv2.imshow('Pose Detection', frame)
# Wait until a key is pressed.
# Retreive the ASCII code of the key pressed
k = cv2.waitKey(1) & 0xFF
# Check if 'ESC' is pressed.
if(k == 27):
# Break the loop.
break
# Release the VideoCapture object.
video.release()
# Close the windows.
cv2.destroyAllWindows() |
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
INSTALLED_APPS = ["testapp"]
BASEDIR = os.path.dirname(__file__)
SECRET_KEY = "supersikret"
ROOT_URLCONF = "testapp.urls"
ALLOWED_HOSTS = ["*"]
|
# -*- coding: utf-8 -*-
from flask import url_for
from . import db, BaseNameMixin, MarkdownColumn
from .user import User
from .commentvote import VoteSpace, CommentSpace, SPACETYPE
__all__ = ['SPACESTATUS', 'ProposalSpace']
# --- Constants ---------------------------------------------------------------
class SPACESTATUS:
DRAFT = 0
SUBMISSIONS = 1
VOTING = 2
JURY = 3
FEEDBACK = 4
CLOSED = 5
WITHDRAWN = 6
# --- Models ------------------------------------------------------------------
class ProposalSpace(BaseNameMixin, db.Model):
__tablename__ = 'proposal_space'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id,
backref=db.backref('spaces', cascade="all, delete-orphan"))
tagline = db.Column(db.Unicode(250), nullable=False)
description = MarkdownColumn('description', default=u'', nullable=False)
datelocation = db.Column(db.Unicode(50), default=u'', nullable=False)
date = db.Column(db.Date, nullable=True)
date_upto = db.Column(db.Date, nullable=True)
website = db.Column(db.Unicode(250), nullable=True)
timezone = db.Column(db.Unicode(40), nullable=False, default=u'UTC')
status = db.Column(db.Integer, default=SPACESTATUS.DRAFT, nullable=False)
votes_id = db.Column(db.Integer, db.ForeignKey('votespace.id'), nullable=False)
votes = db.relationship(VoteSpace, uselist=False)
comments_id = db.Column(db.Integer, db.ForeignKey('commentspace.id'), nullable=False)
comments = db.relationship(CommentSpace, uselist=False)
def __init__(self, **kwargs):
super(ProposalSpace, self).__init__(**kwargs)
self.votes = VoteSpace(type=SPACETYPE.PROPOSALSPACE)
self.comments = CommentSpace(type=SPACETYPE.PROPOSALSPACE)
@property
def rooms(self):
return [room for venue in self.venues for room in venue.rooms]
def permissions(self, user, inherited=None):
perms = super(ProposalSpace, self).permissions(user, inherited)
perms.add('view')
if user is not None:
if self.status == SPACESTATUS.SUBMISSIONS:
perms.add('new-proposal')
if user == self.user:
perms.update([
'edit-space',
'delete-space',
'view-section',
'new-section',
'view-usergroup',
'new-usergroup',
'confirm-proposal',
'new-venue',
'edit-venue',
'delete-venue',
])
return perms
def url_for(self, action='view', _external=False):
if action == 'view':
return url_for('space_view', space=self.name, _external=_external)
elif action == 'new-proposal':
return url_for('proposal_new', space=self.name, _external=_external)
elif action == 'json':
return url_for('space_view_json', space=self.name, _external=_external)
elif action == 'csv':
return url_for('space_view_csv', space=self.name, _external=_external)
elif action == 'edit':
return url_for('space_edit', space=self.name, _external=_external)
elif action == 'sections':
return url_for('section_list', space=self.name, _external=_external)
elif action == 'new-section':
return url_for('section_new', space=self.name, _external=_external)
elif action == 'usergroups':
return url_for('usergroup_list', space=self.name, _external=_external)
elif action == 'new-usergroup':
return url_for('usergroup_new', space=self.name, _external=_external)
elif action == 'venues':
return url_for('venue_list', space=self.name, _external=_external)
elif action == 'new-venue':
return url_for('venue_new', space=self.name, _external=_external)
elif action == 'schedule':
return url_for('schedule_view', space=self.name, _external=_external)
elif action == 'edit-schedule':
return url_for('schedule_edit', space=self.name, _external=_external)
elif action == 'update-schedule':
return url_for('schedule_update', space=self.name, _external=_external)
elif action == 'new-session':
return url_for('session_new', space=self.name, _external=_external)
elif action == 'update-venue-colors':
return url_for('update_venue_colors', space=self.name, _external=_external)
elif action == 'json-schedule':
return url_for('schedule_json', space=self.name, _external=_external)
elif action == 'subscribe-schedule':
return url_for('schedule_subscribe', space=self.name, _external=_external)
elif action == 'ical-schedule':
return url_for('schedule_ical', space=self.name, _external=_external).replace('http', 'webcal').replace('https', 'webcal')
|
#python=3.6
# -*- coding: utf-8 -*-
"""
Synopsis:
Created: Created on Sun Feb 14 18:59:11 2021
Sources:
Author: John Telfeyan
john <dot> telfeyan <at> gmail <dot> com
Distribution: MIT Opens Source Copyright; Full permisions here:
https://gist.github.com/john-telfeyan/2565b2904355410c1e75f27524aeea5f#file-license-md
"""
from flask_app import flask_app |
import itertools as it
import string
import unicodedata
from unicode_math_symbols import *
JSON_SNIPPET_TEMPLATE = """\
"{0}": {{
"prefix": "{0}",
"body": ["{1}"],
"description": "{2}"
}},
"""
def json_escape(char):
codepoint = ord(char)
if codepoint <= 0xFFFF:
return "\\u{0:X}".format(codepoint)
else:
high = (codepoint - 0x10000) // 0x400 + 0xD800
low = (codepoint - 0x10000) % 0x400 + 0xDC00
return "\\u{0:X}\\u{1:X}".format(high, low)
def snippet_generator(command, symbols, args):
assert len(symbols) == len(args)
for symbol, arg in zip(symbols, args):
yield JSON_SNIPPET_TEMPLATE.format(
"\\\\{0}{{{1}}}".format(command, arg),
json_escape(symbol),
unicodedata.name(symbol))
def naked_greek_snippet_generator():
MATHEMATICAL_NAKED_GREEK_LETTERS = (
MATHEMATICAL_ITALIC_GREEK_LETTERS[:25] +
(chr(0x2207),) +
MATHEMATICAL_ITALIC_GREEK_LETTERS[26:51] +
(chr(0x2202),) +
MATHEMATICAL_ITALIC_GREEK_LETTERS[52:58])
for letter, command in zip(MATHEMATICAL_NAKED_GREEK_LETTERS,
LATEX_GREEK_COMMANDS):
yield JSON_SNIPPET_TEMPLATE.format(
command,
json_escape(letter),
unicodedata.name(letter))
def full_snippet_generator(command, capitals, smalls, greeks, digits):
if capitals is not None:
yield from snippet_generator(command, capitals, string.ascii_uppercase)
if smalls is not None:
yield from snippet_generator(command, smalls, string.ascii_lowercase)
if greeks is not None:
yield from snippet_generator(command, greeks, LATEX_GREEK_COMMANDS)
if digits is not None:
yield from snippet_generator(command, digits, string.digits)
if __name__ == "__main__":
with open("snippets.json", "w+") as f:
f.write("{\n")
for commands, (capitals, smalls,
greeks, digits) in LATEX_STYLE_COMMANDS.items():
for command in commands:
for snippet in full_snippet_generator(command, capitals, smalls,
greeks, digits):
f.write(snippet)
for snippet in naked_greek_snippet_generator():
f.write(snippet)
f.write("}\n")
|
from Jumpscale import j
from base_test import BaseTest
from parameterized import parameterized
import random, requests, uuid, unittest
from requests import ConnectionError
SKIPPED_INSTALLATION = {
"capacity": "https://github.com/threefoldtech/jumpscaleX_core/issues/94",
"sanic": "https://github.com/threefoldtech/jumpscaleX_core/issues/94",
"flask": "https://github.com/threefoldtech/jumpscaleX_core/issues/94",
"sockexec": "https://github.com/threefoldtech/jumpscaleX_core/issues/30",
"errbot": "https://github.com/threefoldtech/jumpscaleX_core/issues/94",
}
class TestServers(BaseTest):
@classmethod
def setUpClass(cls):
for server in BaseTest.SERVERS:
if server in BaseTest.INSTALLED_SERVER or server in SKIPPED_INSTALLATION:
continue
getattr(j.servers, server).install()
def setUp(self):
pass
@parameterized.expand(BaseTest.SERVERS)
def Test01_install_option(self, server):
"""
- Install server .
- Make sure that server has been installed successfully.
"""
if server in SKIPPED_INSTALLATION:
self.skipTest(SKIPPED_INSTALLATION[server])
if server in BaseTest.INSTALLED_SERVER:
self.skipTest("server does't have install option.")
self.info("Make sure that server has been installed successfully.")
if server == "threebot":
output, error = self.os_command("openresty --help")
self.assertIn("Usage", output.decode())
else:
output, error = self.os_command("{} --help".format(server))
if server == "zdb":
self.assertIn("Command line arguments:", output.decode())
elif server in ["sonic", "corex"]:
self.assertIn("USAGE:", output.decode())
elif server in ["odoo"]:
self.assertIn("Usage:", output.decode())
else:
self.assertIn("usage", output.decode())
@parameterized.expand(BaseTest.SERVERS)
def Test02_start_stop_options(self, server):
"""
- Start server.
- Make sure that server started successfully.
- Check that server connection works successfully.
- Stop server
- Check that can't connect to server anymore.
"""
skipped = {"gedis_websocket": "https://github.com/threefoldtech/jumpscaleX_core/issues/30"}
if server in SKIPPED_INSTALLATION:
self.skipTest(SKIPPED_INSTALLATION[server])
elif server in skipped:
self.skipTest(skipped[server])
self.info("* Start Server {}".format(server))
if server in ["etcd"]:
server_object = getattr(j.servers, server)
server_process = "/sandbox/bin/etcd"
else:
server_object = getattr(j.servers, server).get()
server_process = "startupcmd_{}".format(server)
server_object.start()
self.info(" * Make sure that server started successfully.")
output, error = self.os_command(
" ps -aux | grep -v -e grep -e tmux | grep {} | awk '{{print $2}}'".format(server_process)
)
self.assertTrue(output.decode())
server_PID = int(output.decode())
self.info(" * Check that server connection works successfully.")
if server == "odoo":
server_element = server_object.port
else:
server_element = server_PID
output, error = self.os_command("netstat -nltp | grep '{}' ".format(server_element))
self.assertTrue(output)
self.info(" * Stop server {}".format(server))
server_object.stop()
output, error = self.os_command("ps -aux | grep -v grep | grep {}".format(server_process))
self.assertFalse(output.decode())
self.info("* Check that can't connect to server anymore.")
output, error = self.os_command("netstat -nltp | grep {}".format(server_element))
self.assertFalse(output)
@parameterized.expand(BaseTest.SERVERS)
@unittest.skip("https://github.com/threefoldtech/jumpscaleX_core/issues/30")
def Test03_port_and_name_options(self, server):
"""
- install server with default port and get server.
- Make sure that server started successfully.
- Stop the server.
"""
self.info("Install server with default port and get server.")
getattr(j.servers, server).install()
server = getattr(j.servers, server).get()
self.info("Change server port and name. ")
new_port = random.randint(2000, 3000)
new_name = str(uuid.uuid4()).replace("-", "")[1:10]
server.port = new_port
server.name = new_name
self.info("Start the server and check the port server started with")
server.start()
output, error = self.os_command("ps -aux | grep {}".format(server))
self.assertIn(new_port, output.decode())
self.assertIn(new_name, output.decode())
self.info(" Stop the server.")
server.stop()
|
from collections import UserDict, OrderedDict
from collections.abc import Iterable
from _collections_abc import (
dict_keys,
dict_values,
dict_items,
) # https://github.com/python/typeshed/pull/6888
from .helpers import _is_iterable_but_not_string, _convert_slice_to_list
class kdict(UserDict):
"""
A dict with k-dimensional keys, sliceable along any of those dimensions.
"""
# Why we subclass UserDict instead of dict: https://stackoverflow.com/a/7148602/130164 and https://stackoverflow.com/a/64450669/130164 and similar
def __init__(self, dict=None, **kwargs):
self.key_len = None
key_lengths = []
if dict is not None:
key_lengths.extend([len(k) for k in dict.keys()])
if kwargs:
key_lengths.extend([len(k) for k in kwargs.keys()])
if len(key_lengths) >= 1:
self.key_len = key_lengths[0]
if not all(x == self.key_len for x in key_lengths):
raise ValueError("All keys must have same length")
super().__init__(dict, **kwargs)
def _get_multiple_keys(self, key_template):
# TODO: can we return a view into the dictionary rather than a copy?
# see https://stackoverflow.com/q/9329537/130164
key_transformed = []
# Convert slices to list
for ix, k in enumerate(key_template):
if isinstance(k, slice):
key_transformed.append(
_convert_slice_to_list(k, self.keys(dimensions=ix, unique=False))
)
elif _is_iterable_but_not_string(k):
key_transformed.append(list(k))
else:
key_transformed.append(k)
if len(key_transformed) != len(key_template):
raise ValueError("something went wrong in slice eval process")
# confirm all list lengths match
key_list_lengths = [
len(k) for k in key_transformed if _is_iterable_but_not_string(k)
]
# print(key_template, key_transformed, key_list_lengths)
if not all(x == key_list_lengths[0] for x in key_list_lengths):
raise KeyError("All slices must have same length")
# transform any remaining scalars into lists of that length
key_transformed = [
[k] * key_list_lengths[0] if (not _is_iterable_but_not_string(k)) else k
for k in key_transformed
]
# take subset
# check membership against keys() to handle the following scenario:
# suppose we have keys (1, "a"), (1, "b"), (2, "c"), and user asks for (1, :)
# in this case, key_transformed will include (1, "c"), which does not actually exist
# this is tested in test_none_slice_against_mixed_column()
# TODO: is there a better way to construct key_transformed?
subset = {k: self.data[k] for k in zip(*key_transformed) if k in self.keys()}
# Return another kdict
return self.__class__(dict=subset)
def __getitem__(self, key):
if self.key_len is not None and len(key) != self.key_len:
raise KeyError(key, "wrong key length")
if any(isinstance(k, slice) or _is_iterable_but_not_string(k) for k in key):
return self._get_multiple_keys(key)
return super().__getitem__(key)
def __setitem__(self, key, value):
if not self.key_len:
self.key_len = len(key)
else:
if len(key) != self.key_len:
raise KeyError(key, "wrong key length")
return super().__setitem__(key, value)
def keys(self, dimensions=None, unique=True) -> dict_keys:
"""
Get keys: either return full tuples, or return one column of the tuples (optionally taking unique values only)
"""
# Get a dict_keys object here to behave more like a dict does,
# whereas calling UserDict's keys() with super().keys() would give a KeysView, which is a bit different:
# https://github.com/python/typeshed/pull/6888
all_keys = self.data.keys()
if dimensions is None:
return all_keys
if isinstance(dimensions, Iterable):
# requested multiple dimensions. wrap keys in tuples
key_column = [
tuple(key[dimension] for dimension in dimensions) for key in all_keys
]
else:
# requested a single dimension (argument was a scalar, not a list)
# provide keys as scalars too
key_column = [key[dimensions] for key in all_keys]
if unique:
# get unique values in original order, so can't use set.
return list(OrderedDict.fromkeys(key_column))
return key_column
def values(self) -> dict_values:
# Return a dict_values object just like dict, rather than UserDict's ValuesView.
# https://github.com/python/typeshed/pull/6888
return (
self.data.values()
) # instead of return super().values() or not overriding at all
def items(self) -> dict_items:
# Return a dict_items object just like dict, rather than UserDict's ItemsView.
# https://github.com/python/typeshed/pull/6888
return (
self.data.items()
) # instead of return super().items() or not overriding at all
def eject(self):
return self.data
|
import tensorflow as tf
from tensorflow.keras.callbacks import Callback
from matplotlib import pyplot as plt
class GANMonitor(Callback):
def __init__(self, num_img, latent_dim, vis_path, model_path):
self.num_img = num_img
self.latent_dim = latent_dim
self.vis_path = vis_path
self.model_path = model_path
# Create random seed for visualisation during training
self.seed = tf.random.normal([16, latent_dim])
def on_epoch_end(self, epoch, logs=None):
# Generate random images for visualising
generated_images = self.model.generator(self.seed)
generated_images = (generated_images - 127.5) / 127.5
generated_images.numpy()
# Create a num_size * num_size plot of images
fig = plt.figure(figsize=(4, 4))
for i in range(self.num_img):
plt.subplot(4, 4, i+1)
img = tf.keras.utils.array_to_img(generated_images[i])
plt.imshow(img, cmap="gray")
plt.axis("off")
# Save the image
plt.savefig(self.vis_path + "epoch_{:03d}.png".format(epoch))
plt.show()
def on_train_end(self, logs=None):
self.model.generator.save(self.model_path)
|
# -*- coding: UTF-8 -*-
import json
import uuid
from datetime import datetime
from flask import current_app
from flask.ext.login import UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from werkzeug.security import generate_password_hash, check_password_hash
from app import db
from app.util.str_util import random_code
__author__ = 'Sheldon Chen'
class UserFollow(db.Model):
""" 关注表 """
__tablename__ = 'user_follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) # 关注者的 id
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) # 被关注者的 id
timestamp = db.Column(db.DateTime, default=datetime.now())
class User(UserMixin, db.Model):
""" 用户实体:学员,讲师,管理员 """
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
unique_code = db.Column(db.String(200),unique=True) #唯一码
invite_code = db.Column(db.String(20),unique=True) #邀请码
email = db.Column(db.String(30), unique=True) # 邮件,登录帐号
username = db.Column(db.String(64), unique=True) # 昵称
desc = db.Column(db.String(500)) # 简介
password_hash = db.Column(db.String(128)) # hash 后的密码
pwd = db.Column(db.String(255)) # mingwen
mobi = db.Column(db.String(15)) # 手机号
mobile_confirmed = db.Column(db.Boolean, default=False) # 确认手机号
qq = db.Column(db.String(128)) # qq
#money = db.Column(db.Float,default=0) #账户余额
balance = db.Column(db.Float,default=0) #账户余额,真实人民币
coin = db.Column(db.Float,default=0) #虚拟币账户
frozen_capital=db.Column(db.Float,default=0)#用于冻结资金,申请提现时
alipay = db.Column(db.String(50)) #支付宝账户
real_name = db.Column(db.String(20))#真实姓名
confirmed = db.Column(db.Boolean, default=False) # 是否确认
reg_time = db.Column(db.DateTime, default=datetime.now()) # 注册时间
logo_url = db.Column(db.String(255)) # 帐号 logo 地址
user_type = db.Column(db.Integer, default=1) # 帐号类型,1: 学员;2:讲师;3:管理员
channel = db.Column(db.String(10), default='') # 渠道
inviter_id = db.Column(db.Integer) #邀请人ID
province = db.Column(db.String(255)) # 省
city = db.Column(db.String(255)) # 市
addr = db.Column(db.String(500)) # 地址
company = db.Column(db.String(200)) # 公司/学校
job = db.Column(db.String(100)) # 职位
work_years = db.Column(db.String(255)) # 工作经验
focus_it = db.Column(db.String(255)) # 关注技术
follower_count =db.Column(db.Integer) #粉丝数
following_count =db.Column(db.Integer) #关注数
#我关注的人
followed = db.relationship('UserFollow',
foreign_keys=[UserFollow.follower_id],
backref=db.backref('followers', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
#我的粉丝
followers = db.relationship('UserFollow',
foreign_keys=[UserFollow.followed_id],
backref=db.backref('followed', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
def follow_user(self, user):
if not self.is_my_follower(user):
f = UserFollow(follower_id=user.id, followed_id=self.id,timestamp=datetime.now())
db.session.add(f)
db.session.commit()
if self.follower_count==None:
self.follower_count=1
else:
self.follower_count+=1; #自己粉丝数 +1
db.session.add(self)
db.session.commit()
if user.following_count==None:
user.following_count=1
else:
user.following_count+=1; #对方关注数加1
db.session.add(user)
db.session.commit()
def unfollow_user(self, user):
f = self.followers.filter_by(follower_id=user.id).first()
if f is not None :
db.session.delete(f)
db.session.commit()
self.follower_count-=1; #自己粉丝数 +1
db.session.add(self)
db.session.commit()
user.following_count-=1; #对方关注数加1
db.session.add(user)
db.session.commit()
def is_my_follower(self, user):
return self.followers.filter_by(follower_id=user.id).first() is not None
def is_followed_by(self, user):
return self.followed.filter_by(followed_id=user.id).first() is not None
def __unicode__(self):
return '%s' % self.username
@property
def password(self):
""" 不可以直接读取用户密码 """
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
""" 对用户密码加密后保存到用户类里 """
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
""" 校验用户密码是否正确 """
return check_password_hash(self.password_hash.encode('utf-8'), password)
def generate_confirmation_token(self, expiration=3600):
""" 对用户的 id 进行加密,然后返回一个散列值 """
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
""" 确认用户状态 """
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
db.session.commit()
return True
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],expires_in=expiration)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def is_super_admin(self):
return self.user_type==3
def is_teacher(self):
return self.user_type==2
def get_unique_code(self):
if self.unique_code==None:
self.unique_code=str(uuid.uuid1()).replace('-','')
return self.unique_code
def get_invite_code(self):
if self.invite_code==None:
self.invite_code=random_code(self.id,6)
return self.invite_code
class Teacher(db.Model):
"""讲师表 """
__tablename__ = 'teacher'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
teacher_name = db.Column(db.String(50))
id_num = db.Column(db.String(50)) #身份证号码
logo_url = db.Column(db.String(255)) # 头像地址
rate = db.Column(db.Float,default=0) #课程分成比例 ,值在 0 - 1 之间
company = db.Column(db.String(100)) # 就职公司
mobi=db.Column(db.String(12))
email=db.Column(db.String(50))
qq=db.Column(db.String(12))
brief = db.Column(db.String(500)) # 简介
sort_num = db.Column(db.Integer) #排序
is_show=db.Column(db.Integer,default=1) #是否在页面显示
is_recommend=db.Column(db.Integer,default=0)
teach_course_type = db.Column(db.String(100)) #授课领域
bank_name=db.Column(db.String(100)) #银行名称,如支付宝写 ALIPAY
bank_account = db.Column(db.String(100)) #银行账户
user = db.relationship('User')
class UserIncome(db.Model):
"""用户收入表 """
__tablename__ = 'user_income'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
money = db.Column(db.Float,default=0)
title =db.Column(db.String(100))
from_type = db.Column(db.Integer) #收入来源类型, 0:分享优惠码收入(普通用户)1: 分享优惠码收入, 3:销售课程收入(讲师)
from_id = db.Column(db.Integer)
created_time = db.Column(db.DateTime, default=datetime.now())
user = db.relationship('User')
class UserWithdrawal(db.Model):
"""用户提现 """
__tablename__ = 'user_withdrawal'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
apply_money = db.Column(db.Float,default=0) #申请金额
created_time = db.Column(db.DateTime, default=datetime.now()) #申请时间
pay_money = db.Column(db.Float,default=0) #支付金额
pay_channel = db.Column(db.String(10)) #支付渠道,支付宝,网银
beneficiary_account = db.Column(db.String(50)) # 收款账户
service_charge = db.Column(db.Float(50)) # 手续费
hande_user_id =db.Column(db.Integer, db.ForeignKey('users.id'))
hande_time = db.Column(db.DateTime)
state = db.Column(db.Integer,default=0) # 0 等待处理,1:成功,2:驳回
user = db.relationship("User",foreign_keys='[UserWithdrawal.user_id]')
class ProductVIP(db.Model):
""" 会员实体 """
__tablename__ = 'products'
id = db.Column(db.Integer, primary_key=True)
product_name = db.Column(db.String(30), unique=True) #会员名称
month_price = db.Column(db.Integer) # 会员价格(单位:元/月)
year_price = db.Column(db.Integer) # 会员价格(单位:元/年)
created_time = db.Column(db.DateTime, default=datetime.now())
is_visible = db.Column(db.Boolean,default=True) #是否公开
comment = db.Column(db.Text)
state = db.Column(db.Integer, default=0) # 状态:0:启用
# orders = db.relationship('Order', backref='product')
# classes = db.relationship('CourseProductRelationship')
# product_order_list = db.relationship('ProductOrder', backref="product")
class Order(db.Model):
""" 订单实体 """
__tablename__ = 'orders'
id = db.Column(db.Integer, primary_key=True)
order_num = db.Column(db.String(50),unique=True) # 流水号
title = db.Column(db.String(200)) # 订单名称
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
product_id = db.Column(db.Integer)
product_type=db.Column(db.Integer,default=0) # 0:高级会员 1:班级,2:专题(任务) 3:课程
price = db.Column(db.Float,default=0) # 商品单价
order_count=db.Column(db.Integer,default=1) #购买数量
coupon_id = db.Column(db.Integer) #优惠劵ID
invite_user_id = db.Column(db.Integer) #邀请人ID
day = db.Column(db.Integer) #购买产品的使用时长
total_price = db.Column(db.Float) # 订单总价
pay_channel = db.Column(db.String(20)) # 支付渠道,赠送 为 FREE, 支付宝为 ALIPAY, 银行为相应的银行代码,COIN 为虚拟币
trade_status = db.Column(db.String(50), default='INIT', nullable=True) # 订单状态 INIT 待支付, TRADE_FINISHED TRADE_SUCCESS 成功,CANCEL: 取消,EXPIRE :过期
created_date = db.Column(db.DateTime, default=datetime.now()) # 下单时间
operation = db.Column(db.Integer, default=0) # 订单操作
remark=db.Column(db.String(255)) # 备注
user = db.relationship('User')
class ProductOrder(db.Model):
""" 订购关系实体 """
__tablename__ = 'products_orders_relationship'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer)
product_id = db.Column(db.Integer)
product_type=db.Column(db.Integer,default=0) # 0:高级会员 1:班级,2:专题 3:课程
state = db.Column(db.Integer, default=0)
created_time = db.Column(db.DateTime, default=datetime.now())
order_time = db.Column(db.DateTime, nullable=True)
cancel_time = db.Column(db.DateTime, nullable=True)
is_experience = db.Column(db.Integer,default=0) #是否是体验会员的权限
class Course(db.Model):
""" 课程实体 """
__tablename__ = "classes"
# __searchable__ = ['name', 'desc']
# __analyzer__=ChineseAnalyzer()
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True) # 课程标题
desc = db.Column(db.Text) # 介绍
brief = db.Column(db.String(500)) # 简介
created_time = db.Column(db.DateTime, default=datetime.now()) # 发布时间
user_id = db.Column(db.Integer, db.ForeignKey('users.id')) # 讲师
lessons_count = db.Column(db.Integer) # 总课时
lessons_finished_count = db.Column(db.Integer, default=0) # 已更新课时
lessons_time = db.Column(db.Integer) # 总时长:秒
lessons_played_time = db.Column(db.Integer) # 总播放数
comment_count = db.Column(db.Integer) # 总评论数
recommend_count = db.Column(db.Integer) # 推荐指数
class_type = db.Column(db.Integer)
second_class_type = db.Column(db.Integer, db.ForeignKey('class_types.id'))
class_difficulty = db.Column(db.Integer, default=1) # 课程难度: 1:初级;2:中等;3:高级
key_words = db.Column(db.String(255)) # 课程关键字
fit_to = db.Column(db.String(255)) # 适合人群
img_url = db.Column(db.String(255)) # 封面地址
is_free = db.Column(db.Integer, default=False) # 是否免费
is_online = db.Column(db.Integer, default=0) # 状态 0:未上线;1:已上线;
types = db.Column(db.Integer, default=1) # 0:免费课程 1:会员课程 2:收费项目,3 就业课程
cost_price = db.Column(db.Float)
now_price = db.Column(db.Float)
qqgroup_id = db.Column(db.Integer, db.ForeignKey('qqgroup.id')) # qq群
expiry_day = db.Column(db.Integer,default=365) #学习有效期,对 types ==2 的 课程有效
rate = db.Column(db.Float,default=0) #课程分成比例 ,值在 0 - 1 之间
is_hot = db.Column(db.Integer,default=0) #是否热门
can_buy=db.Column(db.Integer,default=1) #是否可以购买
can_use_coupon=db.Column(db.Integer,default=1) #是否可以使用优惠码
teacher = db.relationship('User',
backref=db.backref('classes', lazy='dynamic'))
qqgroup = db.relationship('QQGroup',
backref=db.backref('classes', lazy='dynamic'))
def __unicode__(self):
return u'%s' % self.name
def is_vip(self):
query = CourseProductRelationship.query.filter(CourseProductRelationship.product_id==8,
CourseProductRelationship.product_type==0,
CourseProductRelationship.class_id==self.id)
relation =query.first()
return (relation is not None)
class CoursePath(db.Model):
""" 课程线路 """
__tablename__ = "course_path"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50), unique=True) # 标题
desc = db.Column(db.Text) # 介绍
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
price = db.Column(db.Float) #价格
logo_url = db.Column(db.String(200))
qqgroup_id = db.Column(db.Integer,db.ForeignKey('qqgroup.id'))
total_course = db.Column(db.Integer) #总课程数量
total_lesson = db.Column(db.Integer) #总课时数量
total_students = db.Column(db.Integer) #总学习数量
status = db.Column(db.Integer) #0:未发布, 1:正常 ,
qqgroup = db.relationship('QQGroup',
backref=db.backref('course_path', lazy='dynamic'))
topics = db.relationship('CourseTopic', backref='path')
class CourseTopic(db.Model):
""" 课程专题 """
__tablename__ = "course_topic"
id = db.Column(db.Integer, primary_key=True)
path_id = db.Column(db.Integer, db.ForeignKey('course_path.id'))
title = db.Column(db.String(50), unique=True) # 专题标题
desc = db.Column(db.Text) # 介绍
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
logo_url = db.Column(db.String(200))
class CourseProductRelationship(db.Model):
""" 课程和产品中间表 """
__tablename__ = 'class_product_relationship'
# id = db.Column(db.Integer, primary_key=True)
class_id = db.Column(db.Integer, db.ForeignKey('classes.id'), primary_key=True)
product_id = db.Column(db.Integer, db.ForeignKey('products.id'), primary_key=True)
product_type=db.Column(db.Integer,default=0) # 0:高级会员 1:班级,2:专题
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
# classes = db.relationship('Class')
# products = db.relationship('Product')
class ClassType(db.Model):
""" 课程类型实体 """
__tablename__ = 'class_types'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
parent_id = db.Column(db.Integer, default=0)
sort = db.Column(db.Integer, default=0)
created_time = db.Column(db.DateTime, default=datetime.now())
desc = db.Column(db.String(255))
classes = db.relationship('Course', backref='class_item_type')
def __unicode__(self):
return '%s' % (self.name, )
class Recommend(db.Model):
""" 课程推荐实体 """
__tablename__ = 'recommend'
id = db.Column(db.Integer, primary_key=True)
restype = db.Column(db.Integer, default=0)
resid = db.Column(db.Integer, default=0)
settime = db.Column(db.DateTime, default=datetime.now())
sort = db.Column(db.Integer, default=0)
status = db.Column(db.Integer, default=0)
class ClassDraws(db.Model):
""" 课程效果图 """
__tablename__ = "classes_draws"
id = db.Column(db.Integer, primary_key=True)
class_id = db.Column(db.Integer)
img_url = db.Column(db.String(255)) # 课程标题
remark = db.Column(db.String(255)) # 课程标题
class ClassTypeRelationship(db.Model):
""" 课程与三级分类管理表"""
__tablename__ = "classe_type_relationship"
id = db.Column(db.Integer, primary_key=True)
class_id = db.Column(db.Integer, db.ForeignKey('classes.id'), primary_key=True)
class_type_id = db.Column(db.Integer, db.ForeignKey('class_types.id'), primary_key=True)
classes = db.relationship('Course')
class CourseWare(db.Model):
""" 课件 """
__tablename__ = 'coursewares'
id = db.Column(db.Integer, primary_key=True)
filename = db.Column(db.String(255)) # 文件名
filehash = db.Column(db.String(255))
filekey = db.Column(db.String(100)) # 七牛上的文件名
filesize = db.Column(db.Integer) # 文件大小 单位 B
bucketname = db.Column(db.String(50))
domain = db.Column(db.String(255))
class_id = db.Column(db.Integer, db.ForeignKey('classes.id'))
lesson_id=db.Column(db.Integer,db.ForeignKey('lessons.id'))
created_time = db.Column(db.TIMESTAMP, default=datetime.now()) # 上传时间
is_free = db.Column(db.Integer, default=0) # 是否免费下载,默认和Lesson 的权限一致
# lesson = db.relationship('Lesson')
class CourseWareDownload(db.Model):
""" 课件下载记录 """
__tablename__ = 'courseware_download'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
courseware_id = db.Column(db.Integer, db.ForeignKey('coursewares.id'))
download_time = db.Column(db.TIMESTAMP, default=datetime.now()) # 下载时间
class CourseComment(db.Model):
""" 课程评论 """
__tablename__ = 'classcomments'
id = db.Column(db.Integer, primary_key=True)
comment = db.Column(db.Text) # 评论内容
score = db.Column(db.Integer,default=5) # 打分
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
class_id = db.Column(db.Integer, db.ForeignKey('classes.id')) # 在课程详情页面评论时只记录课程ID,不记录课时ID
lesson_id =db.Column(db.Integer, db.ForeignKey('lessons.id')) #在播放页面评论时记录课时ID和课程ID
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
user = db.relationship('User',
backref=db.backref('comments', lazy='dynamic'))
class CourseNote(db.Model):
""" 课程笔记 """
__tablename__ = 'classnotes'
id = db.Column(db.Integer, primary_key=True)
note = db.Column(db.Text) # 课程笔记
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
class_id = db.Column(db.Integer, db.ForeignKey('classes.id'))
lesson_id = db.Column(db.Integer, db.ForeignKey('lessons.id'))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
position = db.Column(db.Float) #笔记在视频中的位置
img_url = db.Column(db.String(200)) #视频截图地址
is_open = db.Column(db.Integer,default=1) #是否公开笔记,1 公开,0 私有
user = db.relationship('User',
backref=db.backref('notes', lazy='dynamic'))
lesson = db.relationship('Lesson')
class CourseFavorites(db.Model):
""" 课程收藏表 """
__tablename__ = 'classfavorites'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
class_id = db.Column(db.Integer, db.ForeignKey('classes.id'))
created_time = db.Column(db.TIMESTAMP, default=datetime.now()) # 添加时间
clazz = db.relationship('Course')
class Chapter(db.Model):
""" 章节实体类 """
__tablename__ = 'chapters'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255)) # 章节标题
lessons = db.relationship("Lesson",order_by="Lesson.bsort")
class_id = db.Column(db.Integer, db.ForeignKey('classes.id'))
def __unicode__(self):
return '%s' % self.title
class Compilation(db.Model):
""" 视频合辑实体 """
__tablename__ = 'compilations'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True)
created_time = db.Column(db.TIMESTAMP, default=datetime.now()) # 发布时间
videoes = db.relationship('Video')
def to_Json(self):
return {
'id':self.id,
'name':self.name,
'created_time': self.created_time
}
def format_date(self):
if self.created_time is not None:
return self.created_time.strftime("%Y-%m-%d %H:%M:%S")
return ''
class Lesson(db.Model):
""" 课时实体 """
__tablename__ = 'lessons'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255)) # 课时标题
desc = db.Column(db.Text) # 课时介绍
created_time = db.Column(db.TIMESTAMP, default=datetime.now()) # 发布时间
state = db.Column(db.Integer, default=0) # 状态
is_free = db.Column(db.Integer, default=0) # 是否收费
pub_tag = db.Column(db.Integer, default=0) # 更新标识
img_url = db.Column(db.String(255)) # 封面地址
video_id = db.Column(db.Integer,db.ForeignKey('videos.id'))
chapter_id = db.Column(db.Integer, db.ForeignKey('chapters.id')) # 章节
bsort = db.Column(db.Integer, default=0)
video = db.relationship('Video')
def __unicode__(self):
return u'%s' % self.name
class LessonPlay(db.Model):
""" 课时学习记录表 """
__tablename__ = 'lesson_play'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
class_id = db.Column(db.Integer, db.ForeignKey('classes.id'))
lesson_id = db.Column(db.Integer,db.ForeignKey('lessons.id'))
play_time = db.Column(db.TIMESTAMP, default=datetime.now()) # 播放时间
start_position=db.Column(db.Integer,default=0) #开始播放的位置
position = db.Column(db.Integer,default=0) # 当前播放进度
lesson_duration = db.Column(db.Integer,default=0) #课时总时长
play_duration = db.Column(db.Integer,default=0) #学习了多少时间
end_time = db.Column(db.TIMESTAMP) #结束播放时间
ip_addr=db.Column(db.String(20)) #播放时候的IP地址
is_finished = db.Column(db.Boolean,default=False) #是否学完该课时,lesson_duration - play_duration < 2 可认为学完该课时
user = db.relationship('User',
backref=db.backref('lesson_study_list', lazy='dynamic'))
lesson = db.relationship('Lesson')
class LessonStudy(db.Model):
""" 课时学习记录表 """
__tablename__ = 'lesson_study'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
class_id = db.Column(db.Integer, db.ForeignKey('classes.id'))
lesson_id = db.Column(db.Integer,db.ForeignKey('lessons.id'))
start_time = db.Column(db.TIMESTAMP, default=datetime.now()) # 播放时间
update_time = db.Column(db.TIMESTAMP) # 最后一次更新时间
learn_time = db.Column(db.Integer) # 学习总时间,每次累加,单位秒
status = db.Column(db.Integer,default=1) #状态,1:学习中,2:学完
class CourseStudy(db.Model):
""" 学习进度表 """
__tablename__ = 'classstudy'
id = db.Column(db.Integer, primary_key=True)
class_id = db.Column(db.Integer, db.ForeignKey('classes.id'))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
start_time = db.Column(db.TIMESTAMP, default=datetime.now()) # 开始学习时间
progress = db.Column(db.Integer,default=0) # 当前学习进度,例如:课程共10个课时,当前学到了第5个课时,那进度就是 5
is_finish=db.Column(db.Boolean,default=False) #是否完成学习
clazz = db.relationship('Course')
class Video(db.Model):
""" 视频文件 """
__tablename__ = 'videos'
id = db.Column(db.Integer, primary_key=True)
filename = db.Column(db.String(255)) # 本地文件名
filehash = db.Column(db.String(255))
filekey = db.Column(db.String(100)) # 七牛上的文件名
filesize = db.Column(db.Integer) # 文件大小 单位 B
bucketname = db.Column(db.String(50))
domain = db.Column(db.String(200))
duration = db.Column(db.Integer) # 时长:(秒)
createtime = db.Column(db.DateTime, default=datetime.now()) # 创建时间
creator = db.Column(db.Integer) # 创建者
compilation = db.Column(db.Integer, db.ForeignKey('compilations.id')) # 视频合辑
def __unicode__(self):
return u'%s' % (self.filename, )
class QQGroup(db.Model):
""" QQ 群 """
__tablename__ = 'qqgroup'
id = db.Column(db.Integer, primary_key=True);
group_name = db.Column(db.String(255));
group_num = db.Column(db.String(20));
group_link = db.Column(db.String(255));
createtime = db.Column(db.TIMESTAMP, default=datetime.now()) # 创建时间
desc = db.Column(db.String(255))
#classes = db.relationship('Class')
class Banner(db.Model):
__tablename__ = 'banner'
id = db.Column(db.Integer, primary_key=True);
name = db.Column(db.String(50), unique=True) # Banner标题
file_name = db.Column(db.String(100)) # 7牛上的文件名,用于删除图片文件
img_url = db.Column(db.String(255)) # Banner图片URL
redirect_url = db.Column(db.String(255)) # 点击跳转地址
bg_color = db.Column(db.String(10))
is_blank = db.Column(db.Boolean, default=True) # 是否打开新窗口
order_num = db.Column(db.Integer, default=0) # 排序
state = db.Column(db.Integer, default=0) # 状态:0:启用
class FriendLink(db.Model):
__tablename__ = 'friendlink'
id = db.Column(db.Integer, primary_key=True);
site_name = db.Column(db.String(100)) # 网站名称
site_url = db.Column(db.String(255)) # 网站地址
title = db.Column(db.String(100)) # 显示友链的文字
contact = db.Column(db.String(200)) # 联系方式
created_time = db.Column(db.TIMESTAMP, default=datetime.now()) # 添加时间
state = db.Column(db.Integer, default=0) # 状态:0:启用
class PhoneMessage(db.Model):
__tablename__ = 'phonemessages'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
use_for = db.Column(db.String(50)) #用于做什么,buy: 购买验证
phone = db.Column(db.String(15)) #手机号码
code = db.Column(db.String(10)) # 验证码
message = db.Column(db.String(200))
send_time = db.Column(db.TIMESTAMP, default=datetime.now()) # 发送时间
send_type=db.Column(db.Integer,default=0) # 0为短信发送类型,1为邮件发送类型
email=db.Column(db.String(50)) # 邮箱地址,send_type 为1 时才有值
class SocialUser(db.Model):
__tablename__ = 'social_user'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id')) # 用户
open_id=db.Column(db.String(64))
type=db.Column(db.String(20)) #账号类型 qq,weibo ,wechat
access_token=db.Column(db.String(64))
expire_in = db.Column(db.String(20)) # 过期值
nickname=db.Column(db.String(20))
head_url = db.Column(db.String(255)) # 头像URL
gender=db.Column(db.String(10)) #性别
status = db.Column(db.Integer,default=1) # 1正常,0解绑
class SociaUserModel(json.JSONEncoder):
nickname=''
head_url=''
channel=''
gender=''
def __init__(self,nickname,head_url,channel,gender):
self.nickname=nickname
self.head_url=head_url
self.channel=channel
self.gender=gender
class UserMsg(db.Model):
""" 用户通知 """
__tablename__ = 'user_msg'
id = db.Column(db.Integer, primary_key=True)
conversation_id = db.Column(db.String(64),unique=True) #会话ID,由 from_user_id + to _user_id 组成
title = db.Column(db.String(100)) #标题
msg = db.Column(db.String(500)) #通知内容
send_time = db.Column(db.TIMESTAMP, default=datetime.now()) # 发送时间
from_user_id = db.Column(db.Integer, db.ForeignKey('users.id')) #发信人ID
to_user_id = db.Column(db.Integer, db.ForeignKey('users.id')) # 收信人ID
orientation = db.Column(db.Integer,default=0) # 0是接收消息,1是发送消息
msg_type = db.Column(db.Integer,default=0) #消息类型,0为系统消息,1为用户消息
is_read=db.Column(db.Integer,default=0)
read_time=db.Column(db.TIMESTAMP) # 阅读时间
to_user = db.relationship("User",foreign_keys='[UserMsg.to_user_id]')
from_user = db.relationship("User",foreign_keys=from_user_id,primaryjoin=from_user_id==User.id)
class SysNotification(db.Model):
""" 系统通知 """
__tablename__ = 'sys_notification'
id = db.Column(db.Integer, primary_key=True)
msg = db.Column(db.String(255)) #通知内容
create_time = db.Column(db.TIMESTAMP, default=datetime.now()) # 添加时间
end_time = db.Column(db.TIMESTAMP) # 结束时间
class Coupon(db.Model):
""" 优惠劵 """
__tablename__ = 'coupon'
id = db.Column(db.Integer, primary_key=True)
val = db.Column(db.Integer) #面额
code =db.Column(db.String(15)) #编码
created_time = db.Column(db.DateTime, default=datetime.now) # 创建时间
expiry_time = db.Column(db.DateTime) # 失效时间
state = db.Column(db.Integer,default=1) # 1:未使用,2:已领取,3:已使用
payback = db.Column(db.Integer,default=0) # 返现金额
user_for_type = db.Column(db.Integer) # 0:优惠码, 3:课程
user_for_id = db.Column(db.Integer) #使用产品的ID
use_for_title = db.Column(db.String(50)) #使用产品的ID
giver = db.Column(db.Integer) #赠予者ID,系统发放ID为 0
owner = db.Column(db.Integer) #拥有该优惠卷用户ID
allow_give = db.Column(db.Boolean) #是否允许赠送他人
get_time = db.Column(db.DateTime) # 获取时间
use_time = db.Column(db.DateTime) #使用时间
remark = db.Column(db.String(50)) #备注
#type = db.Column(db.Integer,default=1) # 1:优惠券,2:红包,3:现金抵扣券
class ClassTypeEncoder(json.JSONEncoder):
""" 课程类型 json 编码 """
def default(self, obj):
if isinstance(obj, ClassType):
return {"id": obj.id, "name": obj.name}
return json.JSONEncoder.default(self, obj)
class ForumThreadRelation(db.Model):
""" 板块和帖子关联 """
__tablename__ = 'forum_thread_relation'
id = db.Column(db.Integer, primary_key=True)
forum_id=db.Column(db.Integer, db.ForeignKey('forum.id'), primary_key=True)
thread_id=db.Column(db.Integer, db.ForeignKey('thread.id'),primary_key=True)
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
class Forum(db.Model):
""" 板块 """
__tablename__ = 'forum'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200)) #标题
intro = db.Column(db.String(500)) #简介
logo_url = db.Column(db.String(200)) #图标地址
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
# admin_id = db.Column(db.Integer,db.ForeignKey('users.id')) #管理员ID
type= db.Column(db.Integer,default=1) #板块类型:1 普通板块 2:活动板块 3: 课程板块
course_type=db.Column(db.Integer) #1:班级 3:课程,当type 为3时该字段才有效
is_online= db.Column(db.Integer,default=0) #是否上线,0:未上线,1:上线
order_num=db.Column(db.Integer) #排序
thread_count=db.Column(db.Integer) #文章总数
is_thread_check = db.Column(db.Integer) # 是否开启发帖审核
is_display=db.Column(db.Integer) # 是否显示
admins = db.relationship('ForumAdmin')
def is_normal_forum(self):
return self.type==1
def is_course_forum(self):
return self.type==3
class ForumAdmin(db.Model):
""" 板块管理员表 """
__tablename__ = 'forum_admin'
id = db.Column(db.Integer, primary_key=True)
forum_id = db.Column(db.Integer, db.ForeignKey('forum.id')) #板块ID
user_id = db.Column(db.Integer, db.ForeignKey('users.id')) # 管理员ID
type =db.Column(db.Integer) # 1正管理员,0 :副管理员
status = db.Column(db.Integer) #0取消管理员权限 1:正常
user = db.relationship('User')
class Thread(db.Model):
""" 帖子表 """
__tablename__ = 'thread'
# __searchable__ = ['title', 'content']
# __analyzer__=ChineseAnalyzer()
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100)) #标题
content = db.Column(db.Text) # 评论内容
brief = db.Column(db.String(200)) # 简介
imgs=db.Column(db.String(2000)) #图片链接
class_id = db.Column(db.Integer, db.ForeignKey('classes.id')) # 针对于某个课程的帖子,不写为0
lesson_id =db.Column(db.Integer, db.ForeignKey('lessons.id')) #针对于某个课程某个课时的帖子,不写为0
user_id = db.Column(db.Integer, db.ForeignKey('users.id')) # 发帖人ID
ip_address=db.Column(db.String(20)) #发帖IP地址
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
last_update_time = db.Column(db.DateTime, default=datetime.now()) # 上次更新时间
last_comment_time = db.Column(db.DateTime) # 最后评论时间
read_count = db.Column(db.Integer,default=0) # 中阅读次数
reply_count = db.Column(db.Integer,default=0) #总回帖次数
like_count = db.Column(db.Integer,default=0) #喜欢次数
is_original = db.Column(db.Integer) #是否原创,0:否,1:是
is_top = db.Column(db.Integer,default=0) #是否置顶
is_hot = db.Column(db.Integer,default=0) #是否热门
thread_type = db.Column(db.Integer,default=0) # 0 博客,1 帖子, 2 提问
status = db.Column(db.Integer,default=0) #状态 0:等待审核,1: 审核通过 ;2 未通过审核, -1 删除
user = db.relationship('User')
def first_img(self):
if self.imgs is not None:
links = self.imgs.split(",")
if len(links)>0:
return links[0]
return None;
class ThreadLike(db.Model):
""" 帖子喜欢表 """
__tablename__ = 'thread_like'
id = db.Column(db.Integer, primary_key=True)
thread_id = db.Column(db.Integer, db.ForeignKey('thread.id')) # 帖子,Id
user_id = db.Column(db.Integer, db.ForeignKey('users.id')) # 发帖人ID
ip_address=db.Column(db.String(20)) #IP地址
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
class ThreadRequest(db.Model):
""" 投稿申请表 """
__tablename__ = 'thread_request'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100)) #标题
url = db.Column(db.String(200)) #url 地址
thread_id = db.Column(db.Integer, db.ForeignKey('thread.id')) # 帖子,Id
forum_id = db.Column(db.Integer, db.ForeignKey('forum.id')) #板块ID
request_user_id=db.Column(db.Integer, db.ForeignKey('users.id')) # 申请人ID
request_time = db.Column(db.DateTime, default=datetime.now()) # 时间
hand_user_id=db.Column(db.Integer, db.ForeignKey('users.id')) # 申请人ID
hand_time=db.Column(db.DateTime)
hand_time=db.Column(db.DateTime)
msg=db.Column(db.String(500)) #处理反馈消息
status = db.Column(db.Integer,default=0) # 0 等待处理,1:通过 ,2:不通过
class ThreadPost(db.Model):
""" 帖子表 """
__tablename__ = 'thread_post'
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text) # 内容
thread_id = db.Column(db.Integer, db.ForeignKey('thread.id')) # 帖子,Id
class_id = db.Column(db.Integer, db.ForeignKey('classes.id')) # 针对于某个课程的帖子,不写为0
lesson_id =db.Column(db.Integer, db.ForeignKey('lessons.id')) #针对于某个课程某个课时的帖子,不写为0
user_id = db.Column(db.Integer, db.ForeignKey('users.id')) # 评论人ID
ip_address=db.Column(db.String(20)) #评论IP地址
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
status = db.Column(db.Integer,default=1) #1正常,0未审核通过
user = db.relationship('User')
class ThreadAttachment(db.Model):
""" 附件表 """
__tablename__ = 'thread_attachment'
id = db.Column(db.Integer, primary_key=True)
thread_id = db.Column(db.Integer, db.ForeignKey('thread.id')) # 帖子,Id
file_name = db.Column(db.String(200)) # 附件名称
file_size = db.Column(db.Integer) # kb
file_key = db.Column(db.String(100)) #
dl_url = db.Column(db.String(200)) #下载地址
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
class TrainingClass(db.Model):
""" 培训课程 """
__tablename__ = 'training_class'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100)) #标题
intro=db.Column(db.String(500)) # 简介
logo_url = db.Column(db.String(200)) #封面地址
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
price = db.Column(db.Float) #价格
learn_days = db.Column(db.Integer) #学习周期
qqgroup_id = db.Column(db.Integer,db.ForeignKey('qqgroup.id'))
total_course = db.Column(db.Integer) #总课程数量
total_lesson = db.Column(db.Integer) #总课时数量
total_students = db.Column(db.Integer) #总学习数量
status = db.Column(db.Integer) #0:未发布, 1:正常 ,
type =db.Column(db.String(10)) # 课程类型:android ,ios ,html5 ....
is_open = db.Column(db.Integer) #是否开放学习
page_tilte =db.Column(db.String(100)) #页面标题
page_keywords=db.Column(db.String(500))
page_description=db.Column(db.String(500))
qqgroup = db.relationship('QQGroup',
backref=db.backref('trainingclass', lazy='dynamic'))
modules = db.relationship('ClassModule', backref='training_class')
class TrainApply(db.Model):
""" 培训报名申请表 """
__tablename__ = 'train_apply'
id = db.Column(db.Integer, primary_key=True)
class_id = db.Column(db.Integer)
user_id =db.Column(db.Integer) #用户ID
username =db.Column(db.String(255)) #用户名
realname =db.Column(db.String(255)) #姓名
mobil =db.Column(db.String(12)) #手机
qq =db.Column(db.String(15)) #QQ
email =db.Column(db.String(50)) #邮箱
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
company =db.Column(db.String(255)) #学校或就职企业
workyear =db.Column(db.String(255)) #工作年限
remark = db.Column(db.String(500)) #留言说明
status = db.Column(db.Integer,default=0) # 申请状态: 0 等待审核,1 :审核通过, 2:审核未通过
class ClassModule(db.Model):
""" 培训课程 """
__tablename__ = 'class_module'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100)) #标题
intro = db.Column(db.String(200)) # 简介
target = db.Column(db.String(200)) #学习目标
class_id = db.Column(db.Integer,db.ForeignKey('training_class.id'))
learn_days = db.Column(db.Integer) #学习周期(天)
total_course = db.Column(db.Integer) #总课程数量
total_lesson = db.Column(db.Integer) #总课时数量
sort_num = db.Column(db.Integer) # 排序
price = db.Column(db.Float) #价格
coin = db.Column(db.Integer) #虚拟币
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
status = db.Column(db.Integer) #1正常
tasks = db.relationship('ClassTask', backref='class_module')
task_course_relation = db.Table('task_course_relation',
db.Column('task_id', db.Integer, db.ForeignKey('class_task.id')),
db.Column('course_id', db.Integer, db.ForeignKey('classes.id'))
)
class ClassTask(db.Model):
""" 学习任务表 """
__tablename__ = 'class_task'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100)) #标题
intro = db.Column(db.String(200)) # 简介
class_id = db.Column(db.Integer,db.ForeignKey('training_class.id'))
module_id = db.Column(db.Integer,db.ForeignKey('class_module.id'))
price = db.Column(db.Float) #价格
coin = db.Column(db.Integer) #虚拟币
pay_cur_type = db.Column(db.Integer) # 使用人民币还是虚拟币支付 0:虚拟币,1:人民币,2:两者都可以
total_course = db.Column(db.Integer) #总课程数量
total_lesson = db.Column(db.Integer) #总课时数量
min_study_days = db.Column(db.Integer) # 最小学习时间
max_study_days = db.Column(db.Integer) # 最大学习时间
sort_num = db.Column(db.Integer) # 排序
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
return_course_credit = db.Column(db.Integer,default=0) # 完成任务后返回学分
status = db.Column(db.Integer) #0:未开放,1:正常
task_type = db.Column(db.Integer) # 1 普通任务,2 扩展任务 ,3:项目任务
courses = db.relationship('Course',
secondary=task_course_relation,
primaryjoin=task_course_relation.c.task_id==id,
backref=db.backref('tasks', lazy='dynamic'),
lazy='dynamic')
class TrainUser(db.Model):
""" 学习任务表 """
__tablename__ = 'train_user'
id = db.Column(db.Integer, primary_key=True)
class_id = db.Column(db.Integer,db.ForeignKey('training_class.id'))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
created_time = db.Column(db.DateTime, default=datetime.now()) # 加入时间
ranking = db.Column(db.Integer,default=0) # 排名
course_credit = db.Column(db.Integer,default=0) # 学分
current_task_id = db.Column(db.Integer,db.ForeignKey('user_train_study_record.id')) #当前任务ID
status = db.Column(db.Integer) #1 :权限正常,0:权限关闭
current_task = db.relationship('UserTrainStudyRecord')
user = db.relationship("User")
class UserTrainStudyRecord(db.Model):
""" 学习任务表 """
__tablename__ = 'user_train_study_record'
id = db.Column(db.Integer, primary_key=True)
class_id = db.Column(db.Integer,db.ForeignKey('training_class.id'))
module_id = db.Column(db.Integer,db.ForeignKey('class_module.id'))
task_id = db.Column(db.Integer, db.ForeignKey('class_task.id'))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
should_start_time = db.Column(db.DateTime) #计划开始时间
should_finish_time = db.Column(db.DateTime) #计划结束时间
reality_start_time = db.Column(db.DateTime) #实际开始时间
reality_finish_time = db.Column(db.DateTime) #实际结束时间
type = db.Column(db.Integer) # 学习类型: 1:培训课程,2 :模块 ,3:任务
status = db.Column(db.Integer) # 状态: 0:未开始学习, 1: 正在学习 ,2:完成学习,3:超时学习
module = db.relationship('ClassModule')
task = db.relationship('ClassTask')
claz = db.relationship('TrainingClass')
class InviteRecord(db.Model):
""" 邀请记录表 """
__tablename__ = 'invite_record'
id = db.Column(db.Integer, primary_key=True)
inviter_id = db.Column(db.Integer,db.ForeignKey('users.id')) # 邀请人ID
register_id = db.Column(db.Integer,db.ForeignKey('users.id')) # 注册人ID
reg_time =db.Column(db.DateTime,default=datetime.now())
channel=db.Column(db.String(50)) #渠道
coin = db.Column(db.Integer) #赠与鸟币数量
vip_days = db.Column(db.Integer) #赠与VIP天数
register = db.relationship("User",foreign_keys='[InviteRecord.register_id]')
class CourseCouponGoods(db.Model):
""" 商品-课程优惠码 """
__tablename__ = 'course_coupon_goods'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100)) #标题
intro = db.Column(db.String(200)) # 简介
logo_url=db.Column(db.String(200))
class_id = db.Column(db.Integer, db.ForeignKey('classes.id'))
val = db.Column(db.Integer,default=0) # 优惠码面额
price = db.Column(db.Integer,default=0) #价格
expiry_date = db.Column(db.Integer)#过期时间,单位天
stock = db.Column(db.Integer,default=0)#库存量
is_virtual=db.Column(db.Integer,default=1) #是否是虚拟物品(虚拟物品指优惠码等)
content = db.Column(db.Text) # 详情内容
status = db.Column(db.Integer)#状态
class GoodsOrder(db.Model):
__tablename__ = 'goods_order'
id = db.Column(db.Integer, primary_key=True)
order_num = db.Column(db.String(50),unique=True) # 流水号
title = db.Column(db.String(200)) # 订单名称
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
goods_id = db.Column(db.Integer)
total_price = db.Column(db.Float) # 订单总价
created_time = db.Column(db.DateTime, default=datetime.now()) # 下单时间
pay_type = db.Column(db.String(10)) #支付方式:coin 鸟币,rmb 人民币
receiver =db.Column(db.String(20)) #收件人
addr = db.Column(db.String(100)) #手机地址
mobi = db.Column(db.String(11)) #手机好
user = db.relationship('User')
class PageMenu(db.Model):
__tablename__ = 'page_menu'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200)) # 名字
created_time = db.Column(db.DateTime, default=datetime.now()) # 创建时间
type = db.Column(db.Integer) #菜单类型
sort =db.Column(db.Integer) #排序
redirect_url = db.Column(db.String(200)) #跳转地址
#################活动模块##########################
class ActivityPost(db.Model):
__tablename__ = 'activity_post'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('thread_post.id'))
created_time = db.Column(db.DateTime, default=datetime.now()) # 下单时间
user = db.relationship('User')
###双11活动
class ActivityDoubleEleven(db.Model):
__tablename__ = 'activity_11'
id = db.Column(db.Integer, primary_key=True)
course_id = db.Column(db.Integer,db.ForeignKey('classes.id'))
sort_num = db.Column(db.Integer)
type = db.Column(db.Integer)
original_price = db.Column(db.Integer) #原价
current_price = db.Column(db.Integer) #现价
course = db.relationship("Course") |
import numpy as np
import matplotlib.pylab as mp
import scipy as sp
from scipy.integrate import odeint,dblquad
import time
from colorsys import hsv_to_rgb
from mpl_toolkits.mplot3d import Axes3D
# anim
from matplotlib import pyplot as plt
from matplotlib import animation
import twod
import fourier_2d as f2d
#import np.sin as sin
#import np.cos as cos
sin = np.sin
cos = np.cos
pi = np.pi
erf = sp.special.erf
exp = np.exp
sqrt = np.sqrt
def K(X,Y,sige,sigi,Kamp):
"""
X,Y: (x,y) coordinate vectors/points
exact 2d Mexican hat kernel
"""
A=1./(pi*sige**2);B=1./(pi*sigi**2)
#X = np.mod(X,2*pi)
#Y = np.mod(Y,2*pi)
return Kamp*(A*np.exp(-(X**2 + Y**2)/sige**2) -\
B*np.exp(-(X**2 + Y**2)/sigi**2))
def Khat(n,m,se,si):
"""
Fourier transform of Mexican hat kernel
"""
A=1./(pi*se**2);B=1./(pi*si**2)
#return (A*pi/sige**2)*np.exp(-(pi*sige)**2 * (n**2 + m**2))-\
# (B*pi/sigi**2)*np.exp(-(pi*sigi)**2 * (n**2 + m**2))
c1 = .25*exp(-.25*(n**2 + m**2)*(se**2 + si**2))
d1 = (se**2)*A*pi*exp(.25*(m**2 + n**2)*si**2)
d11 = erf(pi/se - m*se*1j/2.) + erf(pi/se + m*se*1j/2.)
d12 = erf(pi/se - n*se*1j/2.) + erf(pi/se + n*se*1j/2.)
e1 = (si**2)*B*pi*exp(.25*(m**2 + n**2)*se**2)
e11 = erf(pi/si - m*si*1j/2.) + erf(pi/si + m*si*1j/2.)
e12 = erf(pi/si - n*si*1j/2.) + erf(pi/si + n*si*1j/2.)
return c1*(d1*d11*d12 - e1*e11*e12)
#if n==0 and m==0:
# return A*pi*sige**2 * erf(pi/sige)**2 - B*pi*sigi**2 * erf(pi/sigi)**2
#else:
# pass
def Kapprox(X,Y,sige,sigi):
"""
Fourier approximation to Mexican hat kernel
"""
a00 = Khat(0,0,sige,sigi)
a10 = Khat(1,0,sige,sigi)
a01 = Khat(0,1,sige,sigi)
a11 = Khat(1,1,sige,sigi)
a20 = 0.#Khat(2,0,sige,sigi)
a02 = 0.#Khat(0,2,sige,sigi)
return a00 + a10*cos(X) + a01*cos(Y) + a11*cos(X)*cos(Y) +\
a20*cos(2*X) + a02*cos(2*Y)
def K2(X,Y,sig,Kamp):
"""
equation taken from https://en.wikipedia.org/wiki/Mexican_hat_wavelet
"""
return Kamp*((1./(pi*sig**4)) * (1 - (X**2 + Y**2/(2*sig**2))) * exp(-(X**2+Y**2)/(2*sig**2)))
def K_diff(x,se,si,d=False):
"""
difference of gaussians
"""
A=1./(sqrt(pi)*se);B=1./(sqrt(pi)*si)
if not(d):
return (A*exp(-(x/se)**2) -
B*exp(-(x/si)**2))
else:
return
def main():
## parameter estimation for mexican hat
sige=2;sigi=3
Kamp = 10
dim = 2 # dimension (1 or 2)
grid = 51
if dim == 1:
X = np.linspace(-pi,pi,grid)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(X,Kamp*K_diff(X,sige,sigi))
#ax.plot(X,Kamp*K_ricker(X,sige,sigi))
elif dim == 2:
#kernel = twod.Kernel().Ktable
kernel = np.roll(np.roll(twod.Kernel().Ktable,int(65/2),axis=-1),int(65/2),axis=-2)
nx,ny = np.shape(kernel)
X, Y = np.meshgrid(np.linspace(-pi,pi,nx),
np.linspace(-pi,pi,ny))
f_coeffs_kernel = np.fft.fft2(kernel)
#a = np.real(f_coeffs_kernel)
#b = np.imag(f_coeffs_kernel)
#a,b,a_1d,b_1d,idx_re,idx_im = f2d.extract_coeff_r(f_coeffs_kernel,threshold=2,return_1d=True)
#print a_1d,b_1d,idx_re,idx_im
c,c_1d,idx = f2d.extract_coeff_all(np.real(f_coeffs_kernel),threshold=.5,return_1d=True)
for i in range(len(c_1d)):
print i,'&',c_1d[i]/((2*pi)**2),'&',idx[i],'\\\\'
print 'REMEMBER TO ROLL BEFORE USING THESE FOURIER COEFFICIENTS'
#print np.real(c_1d),idx
"""
fig1 = plt.figure()
ax1 = fig1.gca(projection='3d')
#print nx,ny,np.shape(a)
ax1.plot_surface(X,Y,a)
"""
"""
print 'testing ifft2'
twod_arr = np.array([[20,2,5],[3,4,1],[0,-6,10]])
out = np.fft.fft2(twod_arr)
print np.fft.ifft2(out)
print f2d.idft2(out)
"""
fig1 = plt.figure()
ax1 = fig1.gca(projection='3d')
#approx = np.fft.ifft2(f_coeffs_kernel)
#approx = np.fft.ifft2(a+b*1j)
#approx_f = np.fft.ifft2(c)
approx_m = np.real(f2d.idft2(c,idx))*(65*65)
#approx_m = f2d.idft2_cos(c,idx)
#a00 + a10*cos(X) + a01*cos(Y) + a11*cos(X)*cos(Y)
#approx = f2d.idft2_trig(a+b*1j,idx_re,idx_im)
ax1.set_title('approx kernel manual')
ax1.plot_surface(X,Y,approx_m)
fig2 = plt.figure()
ax2 = fig2.gca(projection='3d')
#ax2.plot_surface(X,Y,kernel)
ax2.plot_surface(X,Y,-0.4739 + 2*(-0.1916)*cos(X) + 2*(-0.1916)*cos(Y) + 0.110515*cos(X)*cos(Y))
ax2.set_title('another approx kernel manual')
plt.show()
#Kfft = np.fft.fft2(np.reshape(Z,(grid,grid)))
#print np.real(Kfft[:2,:2])
#mp.figure()
#mp.plot(Kfft[0,:])
#mp.plot(Kfft[1,:])
#mp.plot(Kfft[2,:])
#mp.plot(Kfft[-1,:])
#print type(Kfft)
plt.show()
if __name__ == "__main__":
main()
|
from datetime import datetime, timedelta
import oembed
from urllib2 import HTTPError
from django.contrib.admin import helpers
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.paginator import Paginator
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as _
from tumblelog.managers import PostManager
from tumblelog.mixins import PostMetaMixin
from tumblelog.settings import OEMBED_DEFAULT_CACHE_AGE, TEXTFIELD_HELP_TEXT, \
USE_TAGGIT
class TumblelogMeta(object):
"""
A special Meta class for BasePostType subclasses; all properties defined
herein are ultimately added to BasePostType._tumblelog_meta
"""
raw_id_fields = None
fields = None
exclude = None
fieldsets = None
form = None
filter_vertical = None
filter_horizontal = None
radio_fields = None
prepopulated_fields = None
formfield_overrides = None
readonly_fields = None
ordering = None
list_display = None
list_display_links = None
list_filter = None
list_select_related = None
list_per_page = None
list_max_show_all = None
list_editable = None
search_fields = None
date_hierarchy = None
save_as = None
save_on_top = None
paginator = None
inlines = None
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
paginator = Paginator
inlines = []
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
def __init__(self, opts, **kwargs):
if opts:
opts = opts.__dict__.items()
else:
opts = []
opts.extend(kwargs.items())
for key, value in opts:
setattr(self, key, value)
def __iter__(self):
return iter([(k, v) for (k, v) in self.__dict__.items()])
class Post(PostMetaMixin, models.Model):
"""
A generic post model, consisting of a single generic foreign key and a set
of fields commonly used to look up posts. This is intended to be used to
create aggregate querysets of all subclasses of BasePostType.
"""
post_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
fields = generic.GenericForeignKey('post_type', 'object_id')
author = models.ForeignKey(User, blank=True, null=True)
slug = models.SlugField(_('Slug'),
max_length=64,
help_text=_('Used to construct the post\'s URL'),
unique=True,
)
objects = PostManager()
class Meta:
app_label = 'tumblelog'
ordering = ['-date_published']
permissions = (
('change_author', 'Can change the author of a post'),
('edit_others_posts', 'Can edit others\' posts'),
)
def __unicode__(self):
return '%s (%s)' % (self.fields.title, self.fields.__class__.__name__,)
@models.permalink
def get_absolute_url(self):
return ('tumblelog:detail', [], {'slug': self.fields.slug})
@property
def post_type_name(self):
if self.fields:
return slugify(self.fields.__class__.__name__)
return None
class PostTypeMetaclass(models.base.ModelBase):
"""
Metaclass for BasePostType models.
"""
def __new__(cls, name, bases, attrs):
"""
Creates a TumblelogMeta instance, accessible as obj._tumblelog_meta in
any BasePostType subclasses.
"""
opts = TumblelogMeta(attrs.pop('TumblelogMeta', None))
attrs['_tumblelog_meta'] = opts
# Make public pointer for templating
def get_tumblelog_meta(self):
return self._tumblelog_meta
attrs['tumblelog_meta'] = get_tumblelog_meta
return super(PostTypeMetaclass, cls).__new__(cls, name, bases, attrs)
class BasePostType(PostMetaMixin, models.Model):
"""
Abstract base class whose subclasses carry the constituent fields of each
post type.
"""
title = models.CharField(_('Title'), max_length=256)
author = models.ForeignKey(User, blank=True, null=True)
slug = models.SlugField(_('Slug'),
max_length=64,
help_text=_('Used to construct the post\'s URL'),
unique=True
)
post = generic.GenericRelation(Post, content_type_field='post_type', \
object_id_field='object_id')
meta_description = models.TextField(_('Meta Description'),
blank=True,
null=True,
editable=True,
help_text=_('Recommended length: 150-160 characters'),
)
__metaclass__ = PostTypeMetaclass
class Meta:
abstract = True
ordering = ['-date_published']
def __unicode__(self):
return self.title
def get_absolute_url(self):
return self.post.all()[0].get_absolute_url()
def clean_fields(self, exclude):
"""
Ensures that multiple posts do not share a slug.
"""
super(BasePostType, self).clean_fields(exclude)
errors = {}
matching_post = None
own_post = None
SLUG_EXISTS = [_('A post with this slug already exists.')]
try:
matching_post = Post.objects.get(slug=self.slug)
except Post.DoesNotExist:
pass
else:
try:
own_post = self.post.all()[0]
except IndexError:
if matching_post:
errors['slug'] = SLUG_EXISTS
else:
if matching_post != own_post:
errors['slug'] = SLUG_EXISTS
if errors:
raise ValidationError(errors)
def save(self, *args, **kwargs):
"""
Overrides save method to either creates or updates the correspondant
Post object when object is saved.
"""
super(BasePostType, self).save(*args, **kwargs)
content_type = ContentType.objects.get_for_model(self)
post, created = Post.objects.get_or_create(
post_type=content_type,
object_id=self.id
)
post.status = self.status
post.date_added = self.date_added
post.date_modified = self.date_modified
post.date_published = self.date_published
post.slug = self.slug
post.author = self.author
post.save()
@property
def post_template(self):
return 'tumblelog/post/%s.html' % slugify(self.__class__.__name__)
@property
def rss_template(self):
return [
'tumblelog/rss/%s.html' % slugify(self.__class__.__name__),
self.post_template,
]
# Add the django-taggit manager, if taggit is installed
if USE_TAGGIT:
from taggit.managers import TaggableManager
taggit_manager = TaggableManager()
taggit_manager.contribute_to_class(BasePostType, 'tags')
class BaseOembedPostType(BasePostType):
"""
Abstract post type base classes whose subclasses retrieve data from an
oEmbed endpoint.
"""
caption = models.TextField(_('Caption'),
blank=True,
null=True,
help_text=TEXTFIELD_HELP_TEXT
)
version = models.CharField(_('oEmbed Version'), max_length=3, null=True, \
blank=True, editable=True)
provider_name = models.CharField(_('oEmbed Provider Name'), \
max_length=128, blank=True, null=True, editable=True)
provider_url = models.CharField(_('oEmbed Provider URL'), max_length=512, \
blank=True, null=True, editable=True)
cache_age = models.IntegerField(_('Cache Age'), \
default=OEMBED_DEFAULT_CACHE_AGE)
date_updated = models.DateTimeField(_('Last Retrieved'), null=True, \
blank=True, editable=True)
oembed_map = (
'version',
'provider_name',
'provider_url',
)
oembed_endpoint = None
oembed_schema = None
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
super(BaseOembedPostType, self).__init__(*args, **kwargs)
if self.pk:
expiry = timedelta(seconds=self.cache_age) + self.date_updated
if datetime.now() > expiry:
self.oembed_update()
def oembed_consumer(self):
consumer = oembed.OEmbedConsumer()
endpoint = oembed.OEmbedEndpoint(
self.oembed_endpoint,
self.oembed_schema,
)
consumer.addEndpoint(endpoint)
return consumer
@property
def oembed_resource(self):
return None
@property
def oembed_endpoint_params(self):
return {}
def oembed_update(self):
self.date_updated = datetime.now()
response = self.oembed_retrieve()
self.oembed_map_values(response)
def oembed_retrieve(self, suppress_http_errors=True):
consumer = self.oembed_consumer()
try:
return consumer.embed(self.oembed_resource, 'json', \
**self.oembed_endpoint_params)
except HTTPError, e:
if not suppress_http_errors:
raise e
def oembed_map_values(self, response):
for mapping in self.oembed_map:
try:
prop, field = mapping
except ValueError:
prop, field = mapping, mapping
finally:
if hasattr(self, field):
value = self.oembed_clean_value(field, response[prop])
setattr(self, field, value)
def oembed_clean_value(self, key, value):
return value
def save(self, *args, **kwargs):
self.oembed_update()
super(BaseOembedPostType, self).save(*args, **kwargs)
class BaseOembedPhoto(BaseOembedPostType):
width = models.IntegerField(_('Width'), blank=True, null=True, \
editable=False)
height = models.IntegerField(_('Height'), blank=True, null=True, \
editable=False)
image_url = models.URLField(_('Image URL'), blank=True, null=True, \
editable=False)
class Meta:
abstract = True
oembed_map = (
'version',
'provider_name',
'provider_url',
'width',
'height',
('url', 'image_url',)
)
class BaseOembedVideo(BaseOembedPostType):
width = models.IntegerField(_('Width'), blank=True, null=True, \
editable=False)
height = models.IntegerField(_('Height'), blank=True, null=True, \
editable=False)
embed = models.TextField(_('Embed Code'), blank=True, null=True, \
editable=False)
class Meta:
abstract = True
oembed_map = (
'version',
'provider_name',
'provider_url',
'width',
'height',
('html', 'embed',),
)
class BaseOembedLink(BaseOembedPostType):
"""
"""
class Meta:
abstract = True
class BaseOembedRich(BaseOembedPostType):
width = models.IntegerField(_('Width'), blank=True, null=True, \
editable=False)
height = models.IntegerField(_('Height'), blank=True, null=True, \
editable=False)
embed = models.URLField(_('Embed Code'), blank=True, null=True, \
editable=False)
class Meta:
abstract = True
oembed_map = (
'version',
'provider_name',
'provider_url',
'width',
'height',
('html', 'embed',)
)
|
import arcade
from .base import Base
class BaseVisible(Base):
def __init__(self):
super().__init__()
"""the property below ensures that this component can only be added to an arcade sprite (gameobjectvisible)"""
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, value):
if isinstance(value, arcade.Sprite) or value is None:
self._parent = value
else:
raise ValueError(f"{self.__class__.__name__} Component can only be assigned to an instance of an arcade sprite.") |
import fido
from typing import List
from config import IRC
from modules.access import require_permission, Levels
from modules.nexmo import add_group, send_to_group, get_group
@require_permission(level=Levels.OP, message="Permission denied!")
async def sendmessage(bot: fido, channel: str, sender: str, args: List[str]):
"""
Sends a SMS message to the specified group. Dear gods, you better not abuse this.
:param bot: Bot instance
:param channel: Channel where command is invoked
:param sender: Sender of command
:param args: List of arguments, should contain group name and message
:return:
"""
if len(args) == 0:
return "Usage: " + IRC.commandPrefix + "groupadd <groupname> <nickname> <phonenumber>"
if not get_group(args[0]):
await bot.message(channel, f"Group not found: {args[0]}")
return
send_to_group(args[0], ' '.join(args[1:]))
await bot.message(channel, "Message sent.")
@require_permission(level=Levels.ADMIN, message="Nope!")
async def groupadd(bot: fido, channel: str, sender: str, args: List[str]):
"""
Add someone to a group. If the group does not already exist, it is created.
Phone number should contain full country code, starting with +.
The user must be in the channel to be added to the SMS group.
:param bot: Bot instance
:param channel: Channel where command is invoked
:param sender: Sender of command
:param args: List of arguments, should contain group, nickname and phonenumber.
:return:
"""
if len(args) == 0:
return "Usage: " + IRC.commandPrefix + "groupadd <groupname> <nickname> <phonenumber>"
lines = []
print(f"Args: {args}")
number = ''
nickname = ''
group = ''
for arg in args:
if arg == "":
continue # Ignore blank args.
print(f"Arg: [{arg.strip()}]")
if arg.startswith('+'):
number = arg
elif arg in bot.users:
nickname = arg
else:
group = arg
if not group or not nickname or not number:
await bot.message(channel, "Incorrect command usage. Ensure user is in channel, and that number has +<country code>.")
return
add_group(mygroup=group, nickname=nickname, number=number)
await bot.message(channel, f"Added {nickname} to SMS group {group} with number {number}")
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2 as google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2
import google.iam.v1.iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
import google.iam.v1.policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
import google.longrunning.operations_pb2 as google_dot_longrunning_dot_operations__pb2
import google.protobuf.empty_pb2 as google_dot_protobuf_dot_empty__pb2
class DatabaseAdminStub(object):
"""Cloud Spanner Database Admin API
The Cloud Spanner Database Admin API can be used to create, drop, and
list databases. It also enables updating the schema of pre-existing
databases.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListDatabases = channel.unary_unary(
'/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases',
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesResponse.FromString,
)
self.CreateDatabase = channel.unary_unary(
'/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase',
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.CreateDatabaseRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetDatabase = channel.unary_unary(
'/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase',
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.Database.FromString,
)
self.UpdateDatabaseDdl = channel.unary_unary(
'/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl',
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.DropDatabase = channel.unary_unary(
'/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase',
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.DropDatabaseRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetDatabaseDdl = channel.unary_unary(
'/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl',
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.FromString,
)
self.SetIamPolicy = channel.unary_unary(
'/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy',
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.GetIamPolicy = channel.unary_unary(
'/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy',
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.TestIamPermissions = channel.unary_unary(
'/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions',
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
)
class DatabaseAdminServicer(object):
"""Cloud Spanner Database Admin API
The Cloud Spanner Database Admin API can be used to create, drop, and
list databases. It also enables updating the schema of pre-existing
databases.
"""
def ListDatabases(self, request, context):
"""Lists Cloud Spanner databases.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateDatabase(self, request, context):
"""Creates a new Cloud Spanner database and starts to prepare it for serving.
The returned [long-running operation][google.longrunning.Operation] will
have a name of the format `<database_name>/operations/<operation_id>` and
can be used to track preparation of the database. The
[metadata][google.longrunning.Operation.metadata] field type is
[CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The
[response][google.longrunning.Operation.response] field type is
[Database][google.spanner.admin.database.v1.Database], if successful.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetDatabase(self, request, context):
"""Gets the state of a Cloud Spanner database.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateDatabaseDdl(self, request, context):
"""Updates the schema of a Cloud Spanner database by
creating/altering/dropping tables, columns, indexes, etc. The returned
[long-running operation][google.longrunning.Operation] will have a name of
the format `<database_name>/operations/<operation_id>` and can be used to
track execution of the schema change(s). The
[metadata][google.longrunning.Operation.metadata] field type is
[UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DropDatabase(self, request, context):
"""Drops (aka deletes) a Cloud Spanner database.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetDatabaseDdl(self, request, context):
"""Returns the schema of a Cloud Spanner database as a list of formatted
DDL statements. This method does not show pending schema updates, those may
be queried using the [Operations][google.longrunning.Operations] API.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetIamPolicy(self, request, context):
"""Sets the access control policy on a database resource. Replaces any
existing policy.
Authorization requires `spanner.databases.setIamPolicy` permission on
[resource][google.iam.v1.SetIamPolicyRequest.resource].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIamPolicy(self, request, context):
"""Gets the access control policy for a database resource. Returns an empty
policy if a database exists but does not have a policy set.
Authorization requires `spanner.databases.getIamPolicy` permission on
[resource][google.iam.v1.GetIamPolicyRequest.resource].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TestIamPermissions(self, request, context):
"""Returns permissions that the caller has on the specified database resource.
Attempting this RPC on a non-existent Cloud Spanner database will result in
a NOT_FOUND error if the user has `spanner.databases.list` permission on
the containing Cloud Spanner instance. Otherwise returns an empty set of
permissions.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DatabaseAdminServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListDatabases': grpc.unary_unary_rpc_method_handler(
servicer.ListDatabases,
request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesResponse.SerializeToString,
),
'CreateDatabase': grpc.unary_unary_rpc_method_handler(
servicer.CreateDatabase,
request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.CreateDatabaseRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'GetDatabase': grpc.unary_unary_rpc_method_handler(
servicer.GetDatabase,
request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.Database.SerializeToString,
),
'UpdateDatabaseDdl': grpc.unary_unary_rpc_method_handler(
servicer.UpdateDatabaseDdl,
request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'DropDatabase': grpc.unary_unary_rpc_method_handler(
servicer.DropDatabase,
request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.DropDatabaseRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetDatabaseDdl': grpc.unary_unary_rpc_method_handler(
servicer.GetDatabaseDdl,
request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.SerializeToString,
),
'SetIamPolicy': grpc.unary_unary_rpc_method_handler(
servicer.SetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
'GetIamPolicy': grpc.unary_unary_rpc_method_handler(
servicer.GetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
'TestIamPermissions': grpc.unary_unary_rpc_method_handler(
servicer.TestIamPermissions,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.spanner.admin.database.v1.DatabaseAdmin', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
# https://www.codechef.com/problems/CHFMOT18
import math
for _ in range(int(input())):
s,n=map(int,input().split())
print(math.ceil((s-s%2)/n)+(s%2)) |
import math
import os
import os.path
import numpy as np
import cv2 as cv
import torch
import torch.nn as nn
import torch.nn.functional as F
import imageio
from PIL import ImageFont, ImageDraw, Image
from console_progressbar import ProgressBar
import inference
from utils import ScreenSpaceShading
########################################
# CONFIGURATION
########################################
#input
RENDERER_CPU = '../bin/CPURenderer.exe'
RENDERER_GPU = '../bin/GPURenderer.exe'
MODEL_DIR = "D:/VolumeSuperResolution"
DATA_DIR_CPU = "../../data/"
DATA_DIR_GPU = "../../data/"
DATASETS = [
{
'file':'clouds/inputVBX/cloud-049.vbx',
'name':'Cloud - training data',
'iso':0.1,
'material':[255, 76, 0],
'ambient':[25, 25, 25],
'diffuse':[255, 255, 255],
'specular':[50, 50, 50],
'distance':1.8
},
{
'file':'clouds/inputVBX/den0adv_150.vbx',
'name':'Smoke plume - training data',
'iso':0.46,
'material':[165, 184, 186],
'ambient':[25, 25, 25],
'diffuse':[255, 255, 255],
'specular':[50, 50, 50],
'distance':1.95
},
{
'file':'volumes/vbx/snapshot_039_256.vbx',
'name':'Ejecta - test data',
'iso':0.40,
'material':[138, 129, 255],
'ambient':[0, 90, 15],
'diffuse':[121, 119, 255],
'specular':[50, 50, 50],
'distance':0.9
},
{
'file':'volumes/vbx/Bonsai.vbx',
'name':'Bonsai - test data',
'iso':0.25,
'material':[0, 173, 0],
'ambient':[76, 31, 31],
'diffuse':[255, 233, 191],
'specular':[30, 30, 30],
'distance':2.6
},
#{
# 'file':'volumes/vdb/aneurism256.vdb',
# 'name':'Aneurism - test data',
# 'iso':0.40,
# 'material':[255, 255, 255],
# 'ambient':[0, 0, 64],
# 'diffuse':[255, 0, 0],
# 'specular':[50, 50, 50],
# 'distance':1.95
#},
]
MODEL_INPUT = "<input>"
MODEL_GROUND_TRUTH = "<gt>"
MODELS = [
{
'name': 'Nearest (input)',
'path': MODEL_INPUT,
'temporal': False,
'masking': False
},
{
'name': 'Ground Truth',
'path': MODEL_GROUND_TRUTH,
'temporal': False,
'masking': False
},
{
'name': 'Unshaded Input - Perceptual + temporal L1',
'path': 'pretrained_unshaded/gen_percNormal_tempL2_1.pth',
'temporal': True,
'masking': True
},
{
'name': 'Unshaded Input - Temporal GAN',
'path': 'pretrained_unshaded/gen_tgan_1.pth',
'temporal': True,
'masking': True
},
]
UPSCALING = 4
AO_SAMPLES = 512
AO_RADIUS = 0.05
AO_STRENGTH = 1.0
SPECULAR_EXPONENT = 8
#rendering
OUTPUT_FILE = 'comparison6.mp4'
CAMERA_ORIGIN = [0, 0.8, -1.4] #will be normalized
FPS = 25
FRAMES_PER_ROTATION = 500
ROTATIONS_PER_EXAMPLE = 1
EMPTY_FRAMES = 25
#computation
MASKING = True
#layout
RESOLUTION = (1920, 1080)
BORDER = 5
FONT_FILE = "arial.ttf"
DATASET_FONT_SIZE = 25
MODEL_FONT_SIZE = 20
DatasetFont = ImageFont.truetype(FONT_FILE, DATASET_FONT_SIZE)
ModelFont = ImageFont.truetype(FONT_FILE, MODEL_FONT_SIZE)
TitleHeight = DatasetFont.getsize(DATASETS[0]['name'])[1] + BORDER
ModelHeight = ModelFont.getsize(MODELS[0]['name'])[1]
CanvasWidth = (RESOLUTION[0]-3*BORDER)//2
CanvasHeight = (RESOLUTION[1]-TitleHeight - 4*BORDER) // 2
CanvasOffsets = [
(BORDER, TitleHeight+ModelHeight + 2*BORDER),
(CanvasWidth + 2*BORDER, TitleHeight+ModelHeight + 2*BORDER),
(BORDER, TitleHeight+ModelHeight + CanvasHeight + 3*BORDER),
(CanvasWidth + 2*BORDER, TitleHeight+ModelHeight + CanvasHeight + 3*BORDER),
]
CanvasHeight -= ModelHeight
CanvasWidthSmall = CanvasWidth // UPSCALING
CanvasHeightSmall = CanvasHeight // UPSCALING
CanvasWidth = CanvasWidthSmall * UPSCALING
CanvasHeight = CanvasHeightSmall * UPSCALING
# background image and layout
def createBackgroundImage(dataset):
img = Image.new('RGB', RESOLUTION, color=(0,0,0))
d = ImageDraw.Draw(img)
#title
size = d.textsize(dataset, font=DatasetFont)
d.text(((RESOLUTION[0]-size[0])//2, BORDER), dataset, font=DatasetFont, fill=(255,255,255))
#models
for i in range(4):
size = d.textsize(MODELS[i]['name'], font=ModelFont)
d.text((CanvasOffsets[i][0]+(CanvasWidth-size[0])//2, CanvasOffsets[i][1]-ModelHeight), MODELS[i]['name'],
font=ModelFont, fill=(200,200,200))
return np.asarray(img)
# create writer
writer = imageio.get_writer(OUTPUT_FILE, fps=FPS)
# Load models
device = torch.device("cuda")
models = [None]*len(MODELS)
for i,m in enumerate(MODELS):
p = m['path']
if p is not MODEL_INPUT and p is not MODEL_GROUND_TRUTH:
models[i] = inference.LoadedModel(os.path.join(MODEL_DIR,p), device, UPSCALING)
# Render each dataset
for i in range(len(DATASETS)):
if i>0:
#write empty frames
empty = np.zeros((RESOLUTION[1], RESOLUTION[0], 3), dtype=np.uint8)
for j in range(EMPTY_FRAMES):
writer.append_data(empty)
background = createBackgroundImage(DATASETS[i]['name'])
# create renderer
camera = inference.Camera(CanvasWidth, CanvasHeight, CAMERA_ORIGIN)
camera.currentDistance = DATASETS[i]['distance']
material = inference.Material(DATASETS[i]['iso'])
renderer_path = RENDERER_CPU if DATASETS[i]['file'].endswith('vdb') else RENDERER_GPU
data_dir = DATA_DIR_CPU if DATASETS[i]['file'].endswith('vdb') else DATA_DIR_GPU
datasetfile = os.path.join(data_dir, DATASETS[i]['file'])
print('Open', datasetfile)
renderer = inference.Renderer(renderer_path, datasetfile, material, camera)
renderer.send_command("aoradius=%5.3f\n"%float(AO_RADIUS))
# create shading
shading = ScreenSpaceShading(torch.device('cpu'))
shading.fov(30)
shading.light_direction(np.array([0.1,0.1,1.0]))
shading.ambient_light_color(np.array(DATASETS[i]['ambient'])/255.0)
shading.diffuse_light_color(np.array(DATASETS[i]['diffuse'])/255.0)
shading.specular_light_color(np.array(DATASETS[i]['specular'])/255.0)
shading.specular_exponent(SPECULAR_EXPONENT)
shading.material_color(np.array(DATASETS[i]['material'])/255.0)
shading.ambient_occlusion(AO_STRENGTH)
# draw frames
print('Render', DATASETS[i]['file'])
pg = ProgressBar(FRAMES_PER_ROTATION * ROTATIONS_PER_EXAMPLE, 'Dataset %d'%(i+1), length=50)
previous_images = [None]*len(MODELS)
for j in range(FRAMES_PER_ROTATION * ROTATIONS_PER_EXAMPLE):
pg.print_progress_bar(j)
img = np.copy(background)
#send camera to renderer
currentOrigin = camera.getOrigin()
renderer.send_command("cameraOrigin=%5.3f,%5.3f,%5.3f\n"%(currentOrigin[0], currentOrigin[1], currentOrigin[2]))
#render models
for k,m in enumerate(MODELS):
#render
shading.inverse_ao = False
p = m['path']
if p==MODEL_INPUT:
renderer.send_command("resolution=%d,%d\n"%(CanvasWidthSmall, CanvasHeightSmall))
renderer.send_command("aosamples=0\n")
renderer.send_command("render\n")
image = renderer.read_image(CanvasWidthSmall, CanvasHeightSmall)
image = np.concatenate((
image[0:3,:,:],
image[3:4,:,:]*2-1, #transform mask into -1,+1
image[4: ,:,:]), axis=0)
image_shaded_input = np.concatenate((image[3:4,:,:], image[4:8,:,:], image[10:11,:,:]), axis=0)
image_shaded = torch.clamp(shading(torch.unsqueeze(torch.from_numpy(image_shaded_input),0)), 0, 1).numpy()[0]
imageRGB = image_shaded[0:3,:,:].transpose((2, 1, 0))
imageRGB = cv.resize(imageRGB,
dsize=None,
fx=UPSCALING,
fy=UPSCALING,
interpolation=cv.INTER_NEAREST)
elif p==MODEL_GROUND_TRUTH:
renderer.send_command("resolution=%d,%d\n"%(CanvasWidth, CanvasHeight))
renderer.send_command("aosamples=%d\n"%AO_SAMPLES)
renderer.send_command("render\n")
image = renderer.read_image(CanvasWidth, CanvasHeight)
image = np.concatenate((
image[0:3,:,:],
image[3:4,:,:]*2-1, #transform mask into -1,+1
image[4: ,:,:]), axis=0)
image_shaded_input = np.concatenate((image[3:4,:,:], image[4:8,:,:], image[10:11,:,:]), axis=0)
image_shaded = torch.clamp(shading(torch.unsqueeze(torch.from_numpy(image_shaded_input),0)), 0, 1).numpy()[0]
imageRGB = image_shaded[0:3,:,:].transpose((2, 1, 0))
else:
renderer.send_command("resolution=%d,%d\n"%(CanvasWidthSmall, CanvasHeightSmall))
renderer.send_command("aosamples=0\n")
renderer.send_command("render\n")
shading.inverse_ao = models[k].inverse_ao
image = renderer.read_image(CanvasWidthSmall, CanvasHeightSmall)
image = np.concatenate((
image[0:3,:,:],
image[3:4,:,:]*2-1, #transform mask into -1,+1
image[4: ,:,:]), axis=0)
image_shaded_input = np.concatenate((image[3:4,:,:], image[4:8,:,:], image[10:11,:,:]), axis=0)
image_shaded = torch.clamp(shading(torch.unsqueeze(torch.from_numpy(image_shaded_input),0)), 0, 1).numpy()[0]
previous_image = previous_images[k]
if not m['temporal']:
previous_image = None
if models[k].unshaded:
# unshaded input
imageRaw = models[k].inference(image, previous_image)
imageRaw = torch.cat([
torch.clamp(imageRaw[:,0:1,:,:], -1, +1),
ScreenSpaceShading.normalize(imageRaw[:,1:4,:,:], dim=1),
torch.clamp(imageRaw[:,4:,:,:], 0, 1)
], dim=1)
previous_images[k] = imageRaw
imageRGB = shading(imageRaw.cpu())[0].numpy().transpose((2, 1, 0))
else:
# shaded input
image_with_color = np.concatenate((
#image[0:3,:,:],
np.clip(image_shaded, 0, 1),
image[3:4,:,:]*0.5+0.5, #transform mask back to [0,1]
image[4:,:,:]), axis=0)
imageRaw = models[k].inference(image_with_color, previous_image)
imageRaw = torch.clamp(imageRaw, 0, 1)
previous_images[k] = imageRaw
imageRGB = imageRaw[0].cpu().numpy().transpose((2, 1, 0))
if m['masking']:
DILATION = 1
KERNEL = np.ones((3,3), np.float32)
mask = cv.dilate((image[3,:,:]*0.5+0.5).transpose((1, 0)), KERNEL, iterations = DILATION)
mask = cv.resize(mask, dsize=None, fx=UPSCALING, fy=UPSCALING, interpolation=cv.INTER_LINEAR)
imageRGB = imageRGB * mask[:,:,np.newaxis]
#write into image
img[CanvasOffsets[k][1]:CanvasOffsets[k][1]+CanvasHeight,
CanvasOffsets[k][0]:CanvasOffsets[k][0]+CanvasWidth
:] = np.clip(imageRGB*255, 0, 255).transpose((1, 0, 2)).astype(np.uint8)
#rotate camera
camera.currentYaw += 2 * math.pi / FRAMES_PER_ROTATION
#send to writer
writer.append_data(img)
pg.print_progress_bar(FRAMES_PER_ROTATION * ROTATIONS_PER_EXAMPLE)
# close renderer
renderer.close()
# done
writer.close()
print("Done") |
"""
Main program for robot.
"""
from pybricks import hubs
from pybricks.geometry import Axis
class Robot(hubs.PrimeHub):
def __init__(self, name: str, front_side: Axis = Axis.X, top_side: Axis = Axis.Z):
super.
|
import torch
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import PIL
from PIL import Image
from PIL import ImageFile
import cv2
import imageio
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Inference environment params.
model = torch.load("SurfaceDamageDetectionModel")
classes = {0:"HighDamage",1:"LowDamage",2:"MediumDamage",3:"NoDamage"}
def detect(frame,net,transform):
frame = PIL.Image.fromarray(frame)
print(frame.size)
return frame
# define pro-processing applied to images in a transformation object.
transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
video_capture = cv2.VideoCapture(1)
video_capture.open(1)
print(video_capture.isOpened())
while True:
_,frame = video_capture.read()
# pre-process the frame entered from camera feed.
img=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
img=Image.fromarray(img)
img=transform(img)
img=np.asarray(img)
img=torch.from_numpy(img)
img=img.view(1,3,224,224)
# pass the image produced to the model and print the output of the predicted label.
model.eval()
y_ = model(img)
_, y_label_ = torch.max(y_, 1)
print(classes[y_label_.data.cpu().numpy()[0]])
cv2.imshow("frame", frame)
# provide a way for exit from the main loop program.
if cv2.waitKey(500) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
|
#Project euler problem 10
#Problem link https://projecteuler.net/problem=10
def sumPrimes(n):
sum, sieve = 0, [True] * n
for p in range(2, n):
if sieve[p]:
sum += p
for i in range(p * p, n, p):
sieve[i] = False
return sum
print(sumPrimes(2000000)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: x.huang
# @date:28/05/19
import abc
import hashlib
import random
from enum import Enum
import requests
from pypay import err
from pypay.interfaces.GatewayApplicationInterface import GatewayApplicationInterface
from pypay.util import xml_et_cdatasection as ET
class WechatModeEnum(Enum):
""" 微信运行模式 """
MODE_NORMAL = 'normal' # 正常模式
MODE_DEV = 'dev' # 沙箱模式
MODE_HK = 'hk' # 香港钱包
MODE_US = 'us' # 境外模式
MODE_SERVICE = 'service' # 服务商模式
class WechatConfig:
def __init__(self,
appid='',
app_id='',
miniapp_id='',
mch_id='',
key='',
notify_url='',
cert_client='',
cert_key='',
mode=WechatModeEnum.MODE_NORMAL.value,
):
self.appid = appid # 商户绑定的appid
self.app_id = app_id # 公众号 app_id
self.miniapp_id = miniapp_id # 小程序 app_id
self.mch_id = mch_id # 商户号id
self.key = key # 商户支付秘钥
self.notify_url = notify_url # 通知回调 url
self.cert_client = cert_client
self.cert_key = cert_key
self.mode = mode # 运行模式
class WechatPay(GatewayApplicationInterface):
URL = {
WechatModeEnum.MODE_NORMAL.value: 'https://api.mch.weixin.qq.com/',
WechatModeEnum.MODE_DEV.value: 'https://api.mch.weixin.qq.com/sandboxnew/',
WechatModeEnum.MODE_HK.value: 'https://apihk.mch.weixin.qq.com/',
WechatModeEnum.MODE_US.value: 'https://apius.mch.weixin.qq.com/',
WechatModeEnum.MODE_SERVICE.value: 'https://api.mch.weixin.qq.com/',
}
def __init__(self, config: WechatConfig):
self.config = config
self.gateway = self.URL.get(self.config.mode)
self.payload = {
'appid': self.config.app_id, # 公众号 app_id
'mch_id': self.config.mch_id,
'nonce_str': self.gen_nonce_str(),
'sign_type': 'MD5',
'notify_url': self.config.notify_url,
'trade_type': self.get_trade_type(),
}
self.set_dev_key()
def check_config(self, arg):
if arg not in self.config:
raise err.InvalidArgumentException(f'Missing Config -- [{arg}]')
@staticmethod
def get_trade_type():
return ''
def pre_order(self, config_biz: dict):
biz = dict(self.payload, **config_biz)
return self.request_api('pay/unifiedorder', biz)
@abc.abstractmethod
def pay(self, config_biz: dict):
pass
def find(self, out_trade_no: str):
self.payload['out_trade_no'] = out_trade_no
self.unset_trade_type_and_notify_url()
return self.request_api('pay/orderquery', self.payload)
def refund(self, order: dict):
new_order = dict(self.payload, **order)
new_order['op_user_id'] = new_order.get('op_user_id') if 'op_user_id' in new_order else self.payload.get(
'mch_id', '')
self.unset_trade_type_and_notify_url()
return self.request_api('secapi/pay/refund', new_order, cert=True)
def cancel(self, order: dict):
pass
def close(self, out_trade_no: str):
self.payload['out_trade_no'] = out_trade_no
self.unset_trade_type_and_notify_url()
return self.request_api('pay/closeorder', self.payload)
def verify(self, content: str, sign=None, sync=False):
data = self.from_xml(content)
sign = data.get('sign') if sign is None else sign
return data if self.get_sign_content(data) == sign else False
def success(self):
ret_dict = {
'return_code': 'SUCCESS',
'return_msg': 'OK',
}
return self.to_xml(ret_dict)
def set_dev_key(self):
""" 设置沙箱模式的key """
if self.config.mode == WechatModeEnum.MODE_DEV.value:
data = {
'mch_id': self.config.mch_id,
'nonce_str': self.gen_nonce_str()
}
result = self.request_api('pay/getsignkey', data)
self.config.key = result['sandbox_signkey']
return self
@staticmethod
def gen_nonce_str(length=16):
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
str_list = []
chars_len = len(chars)
for _ in range(length):
str_list.append(chars[random.randint(0, chars_len - 1 - 1)])
return ''.join(str_list)
@staticmethod
def from_xml(data: str):
if not data:
return
xml_element = ET.fromstring(data)
dict_data = dict()
for element_one in xml_element.iter():
dict_data[element_one.tag] = element_one.text
if 'xml' in dict_data:
del dict_data['xml']
return dict_data
@staticmethod
def to_xml(data: dict) -> str:
xml_list = ['<xml>']
for k, v in data.items():
if isinstance(v, (str, float, int)):
item = f'<{k}>{v}</{k}>'
else:
item = f'<{k}><![CDATA[{v}]]</{k}>'
xml_list.append(item)
xml_list.append('</xml>')
return ''.join(xml_list)
def gen_sign(self, data: dict):
key = self.config.key
if not key:
raise err.InvalidArgumentException('Missing Wechat Config -- [key]')
md5_str = hashlib.md5(f'{self.get_sign_content(data)}&key={key}'.encode('utf-8'))
return md5_str.hexdigest().upper()
@staticmethod
def get_sign_content(data):
return '&'.join('%s=%s' % (key, data.get(key)) for key in sorted(data) if key != 'sign' and data.get(key))
def request_api(self, endpoint, data, cert=False):
url = self.gateway + endpoint
if not data.get('sign'):
data['sign'] = self.gen_sign(data)
# cert 证书问题
response = requests.post(
url,
data=self.to_xml(data).encode(),
)
result = response.text if response.ok else ''
result = self.from_xml(result)
return self.processing_api_result(endpoint, result)
def processing_api_result(self, endpoint, result):
if result.get('return_code', '') != 'SUCCESS':
raise err.GatewayException(f'Get API Error: {result.get("return_msg", "")} {result}')
if result.get('result_code') and result.get('result_code') != 'SUCCESS':
raise err.BusinessException(f'Wechat Business Error: {result.get("err_code", "")}')
if endpoint == 'pay/getsignkey' or self.gen_sign(result) == result.get('sign', ''):
return result
raise err.InvalidSignException('Wechat Sign Verify FAILED', result)
def unset_trade_type_and_notify_url(self):
if 'notify_url' in self.payload:
del self.payload['notify_url']
if 'trade_type' in self.payload:
del self.payload['trade_type']
return True
def unset_notify_url(self):
if 'notify_url' in self.payload:
del self.payload['notify_url']
return True
|
"""
Computes a TANGENT pulse shape with defined Start and End
Supply d, b, and sp to silence Interaction
W.T. Franks FMP Berlin
Arguments:
-sp:shaped pulse name
-d : delta (+/- d%)
-b : beta (curvature)
-p : number of points (default 1000) ""
-np: number of points
-sc: number of points
-name : specify name
"""
import sys
from sys import argv
sys.path.append(root.UtilPath.getTopspinHome()+ '/exp/stan/nmr/py/BioPY/modules/')
import Setup, Help
import Tangent
par=[]
d="0"
b="0"
np="1000"
sc="100."
nom="None"
sp="None"
Interactive=1
cmds=argv
i=0
while i < len(cmds):
#print cmds[i]
if cmds[i].find('-d') >=0: d=cmds[i+1]
if cmds[i].find('-b') >=0: b=cmds[i+1]
if cmds[i].find('-p') >=0: np=cmds[i+1]
if cmds[i].find('-np') >=0: np=cmds[i+1]
if cmds[i].find('-sc') >=0: sc=cmds[i+1]
if cmds[i].find('-name') >=0: nom=cmds[i+1]
if cmds[i].find('-sp') >=0:
sp=cmds[i+1]
if sp.find("SPNAM") <= 0:
sp="SPNAM "+sp
else:
sp="SPNAM "+sp[sp.find("M"):]
i=i+1
if sc=="100.":
sc=100.-float(d)
if d !="0" and b!="0" and sp != "None":
Interactive=0
Shape=sc,np,d,b
if Interactive == 1:
Shape=Tangent.dialog()
Name =Tangent.name(Shape,sp)
Wave =Tangent.make(Name,Shape[0],Shape[1],Shape[2],Shape[3])
else:
if nom == "None":
nom=Tangent.name(Shape,sp)
if sp != "None": PUTPAR(sp,nom)
Wave =Tangent.make(nom,sc,np,d,b)
|
phone = input("Enter Phone number:)"
digits_mapping = {
"1": "One",
"2": "Two",
"3": "Three",
"4": "Four",
"5": "Five",
"6": "Six",
"7": "Seven",
"8": "Eight",
"9": "Nine",
"0": "Zero"
}
output = ""
for ch in phone:
output += digits_mapping.get(ch, " ! ") + " "
print(output)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-12 15:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('titles', '0004_auto_20171210_1512'),
]
operations = [
migrations.CreateModel(
name='PopularPerson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('update_date', models.DateField(unique=True)),
('persons', models.ManyToManyField(blank=True, related_name='popular', to='titles.Person')),
],
options={
'ordering': ('-update_date',),
},
),
migrations.AlterModelOptions(
name='season',
options={'ordering': ('-number',)},
),
]
|
""" LINATE module: Compute (Latent) Ideological Embedding """
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler
import os.path
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix, issparse
# the user can specify the ideological embedding computation library to use
from importlib import import_module
from sklearn import utils
class IdeologicalEmbedding(BaseEstimator, TransformerMixin):
ideological_embedding_class = 'CA'
default_ideological_embedding_engines = ['sklearn', 'auto', 'fbpca'] # by default use the 'prince' module
# to compute the ideological embedding
def __init__(self, n_latent_dimensions = 2, n_iter = 10, check_input = True, random_state = None,
engine = 'auto', in_degree_threshold = None, out_degree_threshold = None,
force_bipartite = True, standardize_mean = True, standardize_std = False, force_full_rank = False):
self.random_state = random_state
self.check_input = check_input # sklearn valid input check
self.n_latent_dimensions = n_latent_dimensions # number of ideological embedding dimensions
self.n_iter = n_iter # number of iteration in SVD computation
self.engine = engine
print('Using Ideological Embedding engine:', self.engine)
self.in_degree_threshold = in_degree_threshold # nodes that are followed by less than this number
# (in the original graph) are taken out of the network
self.out_degree_threshold = out_degree_threshold # nodes that follow less than this number
# (in the original graph) are taken out of the network
self.force_bipartite = force_bipartite
self.standardize_mean = standardize_mean
self.standardize_std = standardize_std
self.force_full_rank = force_full_rank
def fit(self, X, y = None):
X = X.copy()
# first try to load engine module
self.ideological_embedding_module_name = 'prince'
if self.engine not in self.default_ideological_embedding_engines:
self.ideological_embedding_module_name = self.engine
#
try:
self.ideological_embedding_module = import_module(self.ideological_embedding_module_name)
except ModuleNotFoundError:
raise ValueError(self.ideological_embedding_module_name
+ ' module is not installed; please install and make it visible if you want to use it')
if self.force_full_rank:
if not isinstance(X, pd.DataFrame):
raise ValueError('To force full rank X needs to be a dataframe...')
if 'source' not in X.columns:
raise ValueError('To force full rank X needs to have a \'source\' column...')
if 'target' not in X.columns:
raise ValueError('To force full rank X needs to have a \'target\' column...')
# check if X is full rank
follow_pattern_df = X.sort_values('target').groupby('source').target.agg(' '.join)
unique_follow_pattern = follow_pattern_df.drop_duplicates(inplace = False)
unique_follow_pattern = unique_follow_pattern.index.tolist()
is_original_full_rank = False
if len(X.index.tolist()) == len(unique_follow_pattern):
is_original_full_rank = True
else:
original_X = X.copy()
X = X[X['source'].isin(unique_follow_pattern)]
follow_pattern_df = follow_pattern_df.to_frame()
follow_pattern_df = follow_pattern_df.reset_index()
if isinstance(X, pd.DataFrame):
X = self.__check_input_and_convert_to_matrix(X)
# check input
if self.check_input:
utils.check_array(X, accept_sparse = True)
# set source and targer entity numbers
if issparse(X):
self.source_entity_no_ = X.get_shape()[0]
self.target_entity_no_ = X.get_shape()[1]
else:
self.source_entity_no_ = X.shape[0]
self.target_entity_no_ = X.shape[1]
# and generate row and column IDs if needed
try:
l = len(self.column_ids_)
except AttributeError:
self.column_ids_ = np.empty(self.target_entity_no_, dtype = object)
for indx in range(self.target_entity_no_):
self.column_ids_[indx] = 'target_' + str(indx)
self.row_ids_ = np.empty(self.source_entity_no_, dtype = object)
for indx in range(self.source_entity_no_):
self.row_ids_[indx] = 'source_' + str(indx)
# compute number of ideological embedding dimensions to keep
n_latent_dimensions_tmp = self.source_entity_no_
if n_latent_dimensions_tmp > self.target_entity_no_:
n_latent_dimensions_tmp = self.target_entity_no_
if self.n_latent_dimensions < 0:
self.employed_n_latent_dimensions = n_latent_dimensions_tmp
else:
if self.n_latent_dimensions > n_latent_dimensions_tmp:
self.employed_n_latent_dimensions = n_latent_dimensions_tmp
else:
self.employed_n_latent_dimensions = self.n_latent_dimensions
#if np.isnan(X).any():
# return (self)
#try:
# if np.isfinite(X).any():
# return (self)
#except TypeError:
# return (self)
# compute Ideological Embedding of a (sparse) matrix
print('Computing Ideological Embedding...')
ideological_embedding_class = getattr(self.ideological_embedding_module, self.ideological_embedding_class)
self.ideological_embedding_model = None
if self.engine in self.default_ideological_embedding_engines:
self.ideological_embedding_model = ideological_embedding_class(n_components = self.employed_n_latent_dimensions,
n_iter = self.n_iter, check_input = False, engine = self.engine, random_state = self.random_state)
self.ideological_embedding_model.fit(X)
else:
self.ideological_embedding_model = ideological_embedding_class(n_components = self.employed_n_latent_dimensions)
self.ideological_embedding_model.fit(X)
# finally construct the results
if self.engine in self.default_ideological_embedding_engines:
self.ideological_embedding_source_latent_dimensions_ = self.ideological_embedding_model.row_coordinates(X)
else:
self.ideological_embedding_source_latent_dimensions_ = self.ideological_embedding_model.row_coordinates()
if self.engine in self.default_ideological_embedding_engines:
self.ideological_embedding_target_latent_dimensions_ = self.ideological_embedding_model.column_coordinates(X)
else:
self.ideological_embedding_target_latent_dimensions_ = self.ideological_embedding_model.column_coordinates()
if self.standardize_mean:
std_scaler = StandardScaler(with_mean = self.standardize_mean, with_std = self.standardize_std)
std_scaler.fit(pd.concat([self.ideological_embedding_source_latent_dimensions_,
self.ideological_embedding_target_latent_dimensions_], axis = 0))
target_scaled_dim = pd.DataFrame(columns = self.ideological_embedding_target_latent_dimensions_.columns,
data = std_scaler.transform(self.ideological_embedding_target_latent_dimensions_))
self.ideological_embedding_target_latent_dimensions_ = target_scaled_dim
source_scaled_dim = pd.DataFrame(columns = self.ideological_embedding_source_latent_dimensions_.columns,
data = std_scaler.transform(self.ideological_embedding_source_latent_dimensions_))
self.ideological_embedding_source_latent_dimensions_ = source_scaled_dim
column_names = self.ideological_embedding_source_latent_dimensions_.columns
new_column_names = []
for c in column_names:
new_column_names.append('latent_dimension_' + str(c))
self.ideological_embedding_source_latent_dimensions_.columns = new_column_names
self.ideological_embedding_source_latent_dimensions_.index = self.row_ids_
self.ideological_embedding_source_latent_dimensions_.index.name = 'source_id'
if self.force_full_rank:
if not is_original_full_rank:
self.ideological_embedding_source_latent_dimensions_ = self.ideological_embedding_source_latent_dimensions_.reset_index()
source_target_map_df = pd.merge(self.ideological_embedding_source_latent_dimensions_, follow_pattern_df,
left_on = 'source_id', right_on = 'source', how = 'left')
source_target_map_df.drop(columns = 'source', inplace = True)
self.ideological_embedding_source_latent_dimensions_ = pd.merge(follow_pattern_df,
source_target_map_df, on = 'target', how = 'inner')
self.ideological_embedding_source_latent_dimensions_.drop(columns = ['target', 'source_id'], inplace = True)
self.ideological_embedding_source_latent_dimensions_.rename(columns = {'source': 'source_id'}, inplace = True)
self.ideological_embedding_source_latent_dimensions_.index.name = 'source_id'
#print(self.ideological_embedding_source_latent_dimensions_)
column_names = self.ideological_embedding_target_latent_dimensions_.columns
new_column_names = []
for c in column_names:
new_column_names.append('latent_dimension_' + str(c))
self.ideological_embedding_target_latent_dimensions_.columns = new_column_names
self.ideological_embedding_target_latent_dimensions_.index = self.column_ids_
self.ideological_embedding_target_latent_dimensions_.index.name = 'target_id'
#print(self.ideological_embedding_target_latent_dimensions_)
self.eigenvalues_ = self.ideological_embedding_model.eigenvalues_ # list
#print('Eigenvalues: ', eigenvalues_)
self.candidate_total_inertia_ = self.ideological_embedding_model.total_inertia_ # numpy.float64 or None
#print('Total inertia: ', total_inertia_)
self.candidate_explained_inertia_ = self.ideological_embedding_model.explained_inertia_ # list or None
#print('Explained inertia: ', explained_inertia_)
return self
def transform(self, X):
return (X)
def get_params(self, deep = True):
return {'random_state': self.random_state,
'check_input': self.check_input,
'n_latent_dimensions': self.n_latent_dimensions,
'n_iter': self.n_iter,
'engine': self.engine,
'in_degree_threshold': self.in_degree_threshold,
'out_degree_threshold': self.out_degree_threshold,
'force_bipartite': self.force_bipartite,
'standardize_mean': self.standardize_mean,
'standardize_std': self.standardize_std}
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self,parameter, value)
return self
def score(self, X, y):
return 1
def compute_latent_embedding_distance(self, Y, use_target_ideological_embedding = True,
ideological_dimension_mapping = None, error_aggregation_fun = None):
try:
X = None
if use_target_ideological_embedding:
X = self.ideological_embedding_target_latent_dimensions_.copy()
# sort to achieve correspondence with Y
X = X.sort_values('target ID', ascending = True)
else:
X = self.ideological_embedding_source_latent_dimensions_.copy()
# sort to achieve correspondence with Y
X = X.sort_values('source ID', ascending = True)
except AttributeError:
raise AttributeError('Ideological Embedding model has not been fitted.')
if X.index.name is not None:
X = X.reset_index()
if isinstance(X, pd.DataFrame):
if ideological_dimension_mapping is None:
# delete first column which is the entity ID
X = X.iloc[: , 1:]
else:
if 'X' in ideological_dimension_mapping.keys():
X = X[ideological_dimension_mapping['X']]
else:
X = X.iloc[: , 1:]
X = X.to_numpy()
assert isinstance(X, np.ndarray)
if isinstance(Y, pd.DataFrame):
if 'entity' not in Y.columns:
raise ValueError('Benchmark dimension data frame has to have an \'entity\' column.')
# sort to achieve correspondence with Y
Y = Y.sort_values('entity', ascending = True)
if ideological_dimension_mapping is None:
# delete first column which is the entity ID
Y = Y.iloc[: , 1:]
else:
if 'Y' in ideological_dimension_mapping.keys():
Y = Y[ideological_dimension_mapping['Y']]
else:
Y = Y.iloc[: , 1:]
Y = Y.to_numpy()
assert isinstance(Y, np.ndarray)
# X and Y should have the same dimensions
if X.shape[0] != Y.shape[0]:
raise ValueError('Dimension matrices should have the same shape.')
if X.shape[1] != Y.shape[1]:
raise ValueError('Dimension matrices should have the same shape.')
# compute Euclidean norm of each row
l2 = np.sqrt(np.power(X - Y, 2.0).sum(axis = 1))
ideological_dim_distance = np.sqrt(np.power(l2, 2.0).sum()) / float(len(l2)) # default RMSE variant
if error_aggregation_fun is not None:
if error_aggregation_fun == 'MAE':
ideological_dim_distance = np.absolute(l2).sum() / float(len(l2))
else: # user defined
ideological_dim_distance = error_aggregation_fun(l2)
return (ideological_dim_distance)
def load_benchmark_ideological_dimensions_from_file(self, path_benchmark_ideological_dimensions_data,
benchmark_ideological_dimensions_data_header_names = None):
# check that benchmark ideological dimensions file is provided
if path_benchmark_ideological_dimensions_data is None:
raise ValueError('Benchmark ideological dimensions file name is not provided.')
# check that benchmark ideological dimensions file exists
if not os.path.isfile(path_benchmark_ideological_dimensions_data):
raise ValueError('Benchmark ideological dimensions file does not exist.')
# handles files with or without header
header_df = pd.read_csv(path_benchmark_ideological_dimensions_data, nrows = 0)
column_no = len(header_df.columns)
if column_no < 2:
raise ValueError('Benchmark ideological dimensions file has to have at least two columns.')
# sanity checks in header
if benchmark_ideological_dimensions_data_header_names is not None:
if benchmark_ideological_dimensions_data_header_names['entity'] not in header_df.columns:
raise ValueError('Benchmark ideological dimensions file has to have a '
+ benchmark_ideological_dimensions_data_header_names['entity'] + ' column.')
# load data
input_df = None
if benchmark_ideological_dimensions_data_header_names is None:
input_df = pd.read_csv(path_benchmark_ideological_dimensions_data,
header = None).rename(columns = {0:'entity'})
else:
input_df = pd.read_csv(path_benchmark_ideological_dimensions_data).rename(columns =
{benchmark_ideological_dimensions_data_header_names['entity']:'entity'})
if benchmark_ideological_dimensions_data_header_names is not None:
if 'dimensions' in benchmark_ideological_dimensions_data_header_names.keys():
cols = benchmark_ideological_dimensions_data_header_names['dimensions']
cols.append('entity')
input_df = input_df[cols]
input_df['entity'] = input_df['entity'].astype(str)
for c in input_df.columns:
if c != 'entity':
input_df[c] = input_df[c].astype(float)
return (input_df)
def load_input_from_file(self, path_to_network_data, network_file_header_names = None):
# check that a network file is provided
if path_to_network_data is None:
raise ValueError('Network file name is not provided.')
# check network file exists
if not os.path.isfile(path_to_network_data):
raise ValueError('Network file does not exist.')
# handles files with or without header
header_df = pd.read_csv(path_to_network_data, nrows = 0)
column_no = len(header_df.columns)
if column_no < 2:
raise ValueError('Network file has to have at least two columns.')
# sanity checks in header
if network_file_header_names is not None:
if network_file_header_names['source'] not in header_df.columns:
raise ValueError('Network file has to have a ' + network_file_header_names['source'] + ' column.')
if network_file_header_names['target'] not in header_df.columns:
raise ValueError('Network file has to have a ' + network_file_header_names['target'] + ' column.')
# load network data
input_df = None
if network_file_header_names is None:
if column_no == 2:
input_df = pd.read_csv(path_to_network_data, header = None,
dtype = {0:str, 1:str}).rename(columns = {0:'source', 1:'target'})
else:
input_df = pd.read_csv(path_to_network_data, header = None,
dtype = {0:str, 1:str}).rename(columns = {0:'source', 1:'target', 2:'multiplicity'})
else:
if 'multiplicity' in network_file_header_names.keys():
input_df = pd.read_csv(path_to_network_data, dtype = {network_file_header_names['source']:str,
network_file_header_names['target']:str}).rename(columns = {network_file_header_names['source']:'source',
network_file_header_names['target']:'target',
network_file_header_names['multiplicity']:'multiplicity'})
else:
input_df = pd.read_csv(path_to_network_data, dtype = {network_file_header_names['source']:str,
network_file_header_names['target']:str}).rename(columns = {network_file_header_names['source']:'source',
network_file_header_names['target']:'target'})
#print(input_df.shape, len(input_df.target.unique()))
#print()
#input_df = self.__check_input_and_convert_to_matrix(input_df)
print('Finished loading network..')
return(input_df)
def save_ideological_embedding_source_latent_dimensions(self, path_to_file):
try:
self.ideological_embedding_source_latent_dimensions_.to_csv(path_to_file)
except AttributeError:
raise AttributeError('Source ideological embedding latent dimensions have not been computed.')
def save_ideological_embedding_target_latent_dimensions(self, path_to_file):
try:
self.ideological_embedding_target_latent_dimensions_.to_csv(path_to_file)
except AttributeError:
raise AttributeError('Target ideological embedding latent dimensions have not been computed.')
@property
def total_inertia_(self):
try:
if self.engine in self.default_ideological_embedding_engines:
return (self.candidate_total_inertia_)
self.candidate_total_inertia_ = self.ideological_embedding_model.get_total_inertia()
return (self.candidate_total_inertia_)
except AttributeError:
raise AttributeError('Ideological Embedding model has not been fitted.')
@property
def explained_inertia_(self):
try:
if self.engine in self.default_ideological_embedding_engines:
return (self.candidate_explained_inertia_)
self.candidate_total_inertia_ = self.ideological_embedding_model.get_explained_inertia()
return (self.candidate_total_inertia_)
except AttributeError:
raise AttributeError('Ideological Embedding model has not been fitted.')
def __check_input_and_convert_to_matrix(self, input_df):
# first perform validity checks over the input
if not isinstance(input_df, pd.DataFrame):
raise ValueError('Input should be a pandas dataframe.')
if 'source' not in input_df.columns:
raise ValueError('Input dataframe should have a source column.')
if 'target' not in input_df.columns:
raise ValueError('Input dataframe should have a target column.')
# remove NAs from input data
input_df.dropna(subset = ['source', 'target'], inplace = True)
# convert to 'str'
input_df['source'] = input_df['source'].astype(str)
input_df['target'] = input_df['target'].astype(str)
# the file should either have repeated edges or a multiplicity column but not both
#has_more_columns = True if input_df.columns.size > 2 else False
has_repeated_edges = True if input_df.duplicated(subset = ['source', 'target']).sum() > 0 else False
if ('multiplicity' in input_df.columns) and has_repeated_edges:
raise ValueError('There cannot be repeated edges AND a 3rd column with edge multiplicities.')
# if there is a third column, it must containt integers
#if has_more_columns:
# if 'multiplicity' not in input_df.columns:
# raise ValueError('Input dataframe should have a multiplicity column.')
if 'multiplicity' in input_df.columns:
input_df['multiplicity'] = input_df['multiplicity'].astype(int) # will fail if missing element, or cannot convert
# checking if final network is bipartite:
common_nodes_np = np.intersect1d(input_df['source'], input_df['target'])
self.is_bipartite_ = common_nodes_np.size == 0
if not self.is_bipartite_:
if self.force_bipartite:
input_df = input_df[~input_df['source'].isin(common_nodes_np)]
print('Bipartite network:', self.is_bipartite_)
# remove nodes with small degree if needed
degree_per_target = None
if self.in_degree_threshold is not None:
degree_per_target = input_df.groupby('target').count()
degree_per_source = None
if self.out_degree_threshold is not None:
degree_per_source = input_df.groupby('source').count()
if degree_per_target is not None:
if 'multiplicity' in degree_per_target.columns:
degree_per_target.drop('multiplicity', axis = 1, inplace = True)
degree_per_target = degree_per_target[degree_per_target >= self.in_degree_threshold].dropna().reset_index()
degree_per_target.drop('source', axis = 1, inplace = True)
input_df = pd.merge(input_df, degree_per_target, on = ['target'], how = 'inner')
if degree_per_source is not None:
if 'multiplicity' in degree_per_source.columns:
degree_per_source.drop('multiplicity', axis = 1, inplace = True)
degree_per_source = degree_per_source[degree_per_source >= self.out_degree_threshold].dropna().reset_index()
degree_per_source.drop('target', axis = 1, inplace = True)
input_df = pd.merge(input_df, degree_per_source, on = ['source'], how = 'inner')
# and then assemble the matrices to be fed to Ideological embedding
ntwrk_df = input_df[['source', 'target']]
n_i, r = ntwrk_df['target'].factorize()
#self.target_entity_no_ = len(np.unique(n_i))
self.column_ids_ = r.values
n_j, c = ntwrk_df['source'].factorize()
assert len(n_i) == len(n_j)
#self.source_entity_no_ = len(np.unique(n_j))
self.row_ids_ = c.values
network_edge_no = len(n_i)
n_in_j, tups = pd.factorize(list(zip(n_j, n_i)))
ntwrk_csr = csr_matrix((np.bincount(n_in_j), tuple(zip(*tups)))) # COO might be faster
#print('shape', ntwrk_csr.get_shape())
if self.engine in self.default_ideological_embedding_engines:
ntwrk_np = ntwrk_csr.toarray()
return (ntwrk_np)
return (ntwrk_csr)
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import pins
from esphome.const import (
CONF_CLOCK_PIN,
CONF_DATA_PIN,
CONF_ID,
CONF_NUM_CHANNELS,
CONF_NUM_CHIPS,
)
AUTO_LOAD = ["output"]
sm16716_ns = cg.esphome_ns.namespace("sm16716")
SM16716 = sm16716_ns.class_("SM16716", cg.Component)
MULTI_CONF = True
CONFIG_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.declare_id(SM16716),
cv.Required(CONF_DATA_PIN): pins.gpio_output_pin_schema,
cv.Required(CONF_CLOCK_PIN): pins.gpio_output_pin_schema,
cv.Optional(CONF_NUM_CHANNELS, default=3): cv.int_range(min=3, max=255),
cv.Optional(CONF_NUM_CHIPS, default=1): cv.int_range(min=1, max=85),
}
).extend(cv.COMPONENT_SCHEMA)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
data = await cg.gpio_pin_expression(config[CONF_DATA_PIN])
cg.add(var.set_data_pin(data))
clock = await cg.gpio_pin_expression(config[CONF_CLOCK_PIN])
cg.add(var.set_clock_pin(clock))
cg.add(var.set_num_channels(config[CONF_NUM_CHANNELS]))
cg.add(var.set_num_chips(config[CONF_NUM_CHIPS]))
|
"""Dicionário
Escreva uma list comprehension que crie um dicionário em que cada chave
corresponde a um outro dicionário, contendo nome e a nota do aluno.
NOTA: O nome e a nota podem ter valor None.
"""
d = { 'student_' + str(i): {'name': None, 'score': None} for i in range(3) }
print(d)
|
"""Odata library."""
from .odata import (
Blueprint,
Odata,
OdataMixin,
OdataSchema,
)
__all__ = [
"Blueprint",
"Odata",
"OdataMixin",
"OdataSchema",
]
|
import argparse
import time
import yaml
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from utils.profile import count_params
from utils.data_aug import ColorAugmentation
import os
from torch.autograd.variable import Variable
import models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='models architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('--config', default='cfgs/local_test.yaml')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=32, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate models on validation set')
parser.add_argument('--train_image_list', default='', type=str, help='path to train image list')
parser.add_argument('--input_size', default=224, type=int, help='img crop size')
parser.add_argument('--image_size', default=256, type=int, help='ori img size')
parser.add_argument('--model_name', default='', type=str, help='name of the models')
best_prec1 = 0
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
USE_GPU = torch.cuda.is_available()
def main():
global args, best_prec1, USE_GPU
args = parser.parse_args()
with open(args.config) as f:
config = yaml.load(f)
for k, v in config['common'].items():
setattr(args, k, v)
# create models
if args.input_size != 224 or args.image_size != 256:
image_size = args.image_size
input_size = args.input_size
else:
image_size = 256
input_size = 224
print("Input image size: {}, test size: {}".format(image_size, input_size))
if "model" in config.keys():
model = models.__dict__[args.arch](**config['model'])
else:
model = models.__dict__[args.arch]()
if USE_GPU:
model = model.cuda()
model = torch.nn.DataParallel(model)
count_params(model)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img_size = args.input_size
ratio = 224.0 / float(img_size)
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(img_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
ColorAugmentation(),
normalize,
]))
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(int(256 * ratio)),
transforms.CenterCrop(img_size),
transforms.ToTensor(),
normalize,
]))
# if args.distributed:
# train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
# val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
# else:
train_sampler = None
val_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=(train_sampler is None), sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=val_sampler)
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
# if args.distributed:
# train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
save_name = '{}/{}_{}_best.pth.tar'.format(args.save_path, args.model_name, epoch) if is_best else\
'{}/{}_{}.pth.tar'.format(args.save_path, args.model_name, epoch)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, filename=save_name)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# pytorch 0.4.0 compatible
if '0.4.' in torch.__version__:
if USE_GPU:
input_var = torch.cuda.FloatTensor(input.cuda())
target_var = torch.cuda.LongTensor(target.cuda())
else:
input_var = torch.FloatTensor(input)
target_var = torch.LongTensor(target)
else: # pytorch 0.3.1 or less compatible
if USE_GPU:
input = input.cuda()
target = target.cuda(async=True)
input_var = Variable(input)
target_var = Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
prec1, prec5 = accuracy(output.data, target_var, topk=(1, 5))
# measure accuracy and record loss
reduced_prec1 = prec1.clone()
reduced_prec5 = prec5.clone()
top1.update(reduced_prec1[0])
top5.update(reduced_prec5[0])
reduced_loss = loss.data.clone()
losses.update(reduced_loss)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
# check whether the network is well connected
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
with open('logs/{}_{}.log'.format(time_stp, args.arch), 'a+') as flog:
line = 'Epoch: [{0}][{1}/{2}]\t ' \
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' \
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader),
batch_time=batch_time, loss=losses, top1=top1, top5=top5)
print(line)
flog.write('{}\n'.format(line))
def validate(val_loader, model, criterion):
global time_stp
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
# pytorch 0.4.0 compatible
if '0.4.' in torch.__version__:
with torch.no_grad():
if USE_GPU:
input_var = torch.cuda.FloatTensor(input.cuda())
target_var = torch.cuda.LongTensor(target.cuda())
else:
input_var = torch.FloatTensor(input)
target_var = torch.LongTensor(target)
else: # pytorch 0.3.1 or less compatible
if USE_GPU:
input = input.cuda()
target = target.cuda(async=True)
input_var = Variable(input, volatile=True)
target_var = Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target_var, topk=(1, 5))
losses.update(loss.data, input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
line = 'Test: [{0}/{1}]\t' \
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' \
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(val_loader), batch_time=batch_time,
loss=losses, top1=top1, top5=top5)
with open('logs/{}_{}.log'.format(time_stp, args.arch), 'a+') as flog:
flog.write('{}\n'.format(line))
print(line)
return top1.avg
def save_checkpoint(state, filename='checkpoint.pth.tar'):
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
time_stp = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime())
main()
|
# Generated by Django 2.1.13 on 2020-01-14 19:54
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nablaforum', '0005_auto_20200104_1336'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='members',
field=models.ManyToManyField(blank=True, related_name='members', to=settings.AUTH_USER_MODEL),
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
""" Initial migrations """
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessAttempt',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user_agent', models.CharField(max_length=255)),
('ip_address', models.GenericIPAddressField(null=True, verbose_name='IP Address')),
('username', models.CharField(max_length=255, null=True)),
('http_accept', models.CharField(max_length=1025, verbose_name='HTTP Accept')),
('path_info', models.CharField(max_length=255, verbose_name='Path')),
('attempt_time', models.DateTimeField(auto_now_add=True)),
('login_valid', models.BooleanField(default=False)),
],
options={
'ordering': ['-attempt_time'],
},
bases=(models.Model,),
),
]
|
#!/usr/bin/env python
# DON'T USE THIS. IMPORT OMNI OMNI.OMNI_TO_SWMF.
import datetime as dt
import numpy as np
from spacepy.pybats import ImfInput
raise Exception("DON'T USE THIS YOU IDIOT.")
def fix(arr, flag):
'''
For data vector 'arr', interpolate (linearly) over all regions where
the the bad data flag, 'flag', is set.
'''
x = np.arange(arr.size)
locs = arr!=flag
return np.interp(x, x[locs], arr[locs])
raw = np.loadtxt('./omni_min_31756.lst')
# Convert time.
start = dt.datetime(int(raw[0,0]), 1,1,0,0,0)
time = np.zeros(raw.shape[0], dtype=object)
for i in range(raw.shape[0]):
time[i] = start+dt.timedelta(days =int(raw[i,1])-1,
hours=int(raw[i,2]), minutes=int(raw[i,3]))
# Create imf object:
imf = ImfInput(filename='imf_input.dat', load=False)
# Populate:
imf['time'] = time
imf['bx'], imf['by'], imf['bz'] = \
fix(raw[:,5],9999.99), fix(raw[:,6],9999.99), fix(raw[:,7],9999.99)
imf['ux'], imf['uy'], imf['uz'] = \
fix(raw[:,9],99999.9), fix(raw[:,10],99999.9), fix(raw[:,11],99999.9)
imf['rho'] = fix(raw[:,-2], 999.99)
imf['temp'] = fix(raw[:,-1], 9999999.)
imf.pop('pram')
imf.write()
|
from fttcrypt import FTTCryptor
key = "SecretKeyGoesHere"
text="Secured to to be encrypted"
# Encrypt a plain Text to Cipher
cipherText = FTTCryptor.encryptText(text, key)
print(cipherText)
# Decrypt Cipher to Plain Text
decryptedText = FTTCryptor.decryptText(cipherText, key)
print(decryptedText)
# Encrypt a file to Cipher
cipherText = FTTCryptor.encryptFile("file.jpg", key)
print(cipherText)
# Decrypt Cipher to File
file = FTTCryptor.decryptFile(cipherText, key)
print(file)
|
import click
from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor
from .processor import OlaHdClientProcessor
@click.command()
@ocrd_cli_options
def cli(*args, **kwargs):
return ocrd_cli_wrap_processor(OlaHdClientProcessor, *args, **kwargs)
|
from django.core.files.uploadedfile import SimpleUploadedFile
from rest_framework.test import APIClient
from testing.testcases import TestCase
from tweets.models import Tweet, TweetPhoto
from utils.paginations import EndlessPagination
# 注意要加 '/' 结尾,要不然会产生 301 redirect
TWEET_LIST_API = '/api/tweets/'
TWEET_CREATE_API = '/api/tweets/'
TWEET_RETRIEVE_API = '/api/tweets/{}/'
class TweetApiTests(TestCase):
def setUp(self):
super(TweetApiTests, self).setUp()
self.user1 = self.create_user('user1', 'user1@jiuzhang.com')
self.tweets1 = [
self.create_tweet(self.user1)
for i in range(3)
]
self.user1_client = APIClient()
self.user1_client.force_authenticate(self.user1)
self.user2 = self.create_user('user2', 'user2@jiuzhang.com')
self.tweets2 = [
self.create_tweet(self.user2)
for i in range(2)
]
def test_list_api(self):
# 必须带 user_id
response = self.anonymous_client.get(TWEET_LIST_API)
self.assertEqual(response.status_code, 400)
# 正常 request
response = self.anonymous_client.get(TWEET_LIST_API, {'user_id': self.user1.id})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 3)
response = self.anonymous_client.get(TWEET_LIST_API, {'user_id': self.user2.id})
self.assertEqual(len(response.data['results']), 2)
# 检测排序是按照新创建的在前面的顺序来的
self.assertEqual(response.data['results'][0]['id'], self.tweets2[1].id)
self.assertEqual(response.data['results'][1]['id'], self.tweets2[0].id)
def test_create_api(self):
# 必须登录
response = self.anonymous_client.post(TWEET_CREATE_API)
self.assertEqual(response.status_code, 403)
# 必须带 content
response = self.user1_client.post(TWEET_CREATE_API)
self.assertEqual(response.status_code, 400)
# content 不能太短
response = self.user1_client.post(TWEET_CREATE_API, {'content': '1'})
self.assertEqual(response.status_code, 400)
# content 不能太长
response = self.user1_client.post(TWEET_CREATE_API, {
'content': '0' * 141
})
self.assertEqual(response.status_code, 400)
# 正常发帖
tweets_count = Tweet.objects.count()
response = self.user1_client.post(TWEET_CREATE_API, {
'content': 'Hello World, this is my first tweet!'
})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['user']['id'], self.user1.id)
self.assertEqual(Tweet.objects.count(), tweets_count + 1)
def test_create_with_files(self):
# 上传空文件列表
response = self.user1_client.post(TWEET_CREATE_API, {
'content': 'a selfie',
'files': [],
})
self.assertEqual(response.status_code, 201)
self.assertEqual(TweetPhoto.objects.count(), 0)
# 上传单个文件
# content 需要是一个 bytes 类型,所以用 str.encode 转换一下
file = SimpleUploadedFile(
name='selfie.jpg',
content=str.encode('a fake image'),
content_type='image/jpeg',
)
response = self.user1_client.post(TWEET_CREATE_API, {
'content': 'a selfie',
'files': [file],
})
self.assertEqual(response.status_code, 201)
self.assertEqual(TweetPhoto.objects.count(), 1)
# 测试多个文件上传
file1 = SimpleUploadedFile(
name='selfie1.jpg',
content=str.encode('selfie 1'),
content_type='image/jpeg',
)
file2 = SimpleUploadedFile(
name='selfie2.jpg',
content=str.encode('selfie 2'),
content_type='image/jpeg',
)
response = self.user1_client.post(TWEET_CREATE_API, {
'content': 'two selfies',
'files': [file1, file2],
})
self.assertEqual(response.status_code, 201)
self.assertEqual(TweetPhoto.objects.count(), 3)
# 从读取的 API 里确保已经包含了 photo 的地址
retrieve_url = TWEET_RETRIEVE_API.format(response.data['id'])
response = self.user1_client.get(retrieve_url)
self.assertEqual(len(response.data['photo_urls']), 2)
self.assertEqual('selfie1' in response.data['photo_urls'][0], True)
self.assertEqual('selfie2' in response.data['photo_urls'][1], True)
# 测试上传超过 9 个文件会失败
files = [
SimpleUploadedFile(
name=f'selfie{i}.jpg',
content=str.encode(f'selfie{i}'),
content_type='image/jpeg',
)
for i in range(10)
]
response = self.user1_client.post(TWEET_CREATE_API, {
'content': 'failed due to number of photos exceeded limit',
'files': files,
})
self.assertEqual(response.status_code, 400)
self.assertEqual(TweetPhoto.objects.count(), 3)
def test_retrieve(self):
# tweet with id=-1 does not exist
url = TWEET_RETRIEVE_API.format(-1)
response = self.anonymous_client.get(url)
self.assertEqual(response.status_code, 404)
# 获取某个 tweet 的时候会一起把 comments 也拿下
tweet = self.create_tweet(self.user1)
url = TWEET_RETRIEVE_API.format(tweet.id)
response = self.anonymous_client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['comments']), 0)
self.create_comment(self.user2, tweet, 'holly s***')
self.create_comment(self.user1, tweet, 'hmm...')
response = self.anonymous_client.get(url)
self.assertEqual(len(response.data['comments']), 2)
# tweet 里包含用户的头像和昵称
profile = self.user1.profile
self.assertEqual(response.data['user']['nickname'], profile.nickname)
self.assertEqual(response.data['user']['avatar_url'], None)
def test_pagination(self):
page_size = EndlessPagination.page_size
# create page_size * 2 tweets
# we have created self.tweets1 in setUp
for i in range(page_size * 2 - len(self.tweets1)):
self.tweets1.append(self.create_tweet(self.user1, 'tweet{}'.format(i)))
tweets = self.tweets1[::-1]
# pull the first page
response = self.user1_client.get(TWEET_LIST_API, {'user_id': self.user1.id})
self.assertEqual(response.data['has_next_page'], True)
self.assertEqual(len(response.data['results']), page_size)
self.assertEqual(response.data['results'][0]['id'], tweets[0].id)
self.assertEqual(response.data['results'][1]['id'], tweets[1].id)
self.assertEqual(response.data['results'][page_size - 1]['id'], tweets[page_size - 1].id)
# pull the second page
response = self.user1_client.get(TWEET_LIST_API, {
'created_at__lt': tweets[page_size - 1].created_at,
'user_id': self.user1.id,
})
self.assertEqual(response.data['has_next_page'], False)
self.assertEqual(len(response.data['results']), page_size)
self.assertEqual(response.data['results'][0]['id'], tweets[page_size].id)
self.assertEqual(response.data['results'][1]['id'], tweets[page_size + 1].id)
self.assertEqual(response.data['results'][page_size - 1]['id'], tweets[2 * page_size - 1].id)
# pull latest newsfeeds
response = self.user1_client.get(TWEET_LIST_API, {
'created_at__gt': tweets[0].created_at,
'user_id': self.user1.id,
})
self.assertEqual(response.data['has_next_page'], False)
self.assertEqual(len(response.data['results']), 0)
new_tweet = self.create_tweet(self.user1, 'a new tweet comes in')
response = self.user1_client.get(TWEET_LIST_API, {
'created_at__gt': tweets[0].created_at,
'user_id': self.user1.id,
})
self.assertEqual(response.data['has_next_page'], False)
self.assertEqual(len(response.data['results']), 1)
self.assertEqual(response.data['results'][0]['id'], new_tweet.id)
|
from django.apps import AppConfig
class EnergySaverConfig(AppConfig):
name = 'energy_saver'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.